From 6917255adc9dd3f90535bbee6b0beca7d64ee032 Mon Sep 17 00:00:00 2001 From: LIU Hao Date: Mon, 4 Nov 2024 02:05:55 +0800 Subject: [PATCH] fwd: Use native immediate value format for Intel and ARM syntaxes In Intel syntax immediates have no prefix. In ARM syntax immediates are prefixed by `#`. `%c` is only ever necessary in the pathological AT&T syntax when we want to interpret something as an address outside parentheses. --- mcfgthread/fwd.h | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/mcfgthread/fwd.h b/mcfgthread/fwd.h index a8325c4ef..013c4ee1d 100644 --- a/mcfgthread/fwd.h +++ b/mcfgthread/fwd.h @@ -159,42 +159,42 @@ __MCF_CXX(extern "C" {) # define __MCF_TEB_LOAD_32_IMMEDIATE(out, offset) \ __asm__ volatile ( \ - "{ mov %%gs:%c1, %k0 | mov %k0, gs:[%c1] }" \ + "{ mov %%gs:%c1, %k0 | mov %k0, gs:[%1] }" \ : "=r"(*(out)) : "e"(offset)) # define __MCF_TEB_STORE_32_IMMEDIATE(offset, value) \ __asm__ volatile ( \ - "{ mov %k1, %%gs:%c0 | mov gs:[%c0], %k1 }" \ + "{ mov %k1, %%gs:%c0 | mov gs:[%0], %k1 }" \ : : "e"(offset), "r"(value)) # define __MCF_TEB_LOAD_32_INDEXED(out, offset, index) \ __asm__ volatile ( \ - "{ mov %%gs:%c1(,%2,4), %k0 | mov %k0, gs:[%c1+%2*4] }" \ + "{ mov %%gs:%c1(,%2,4), %k0 | mov %k0, gs:[%1+%2*4] }" \ : "=r"(*(out)) : "e"(offset), "r"(index)) # define __MCF_TEB_STORE_32_INDEXED(offset, index, value) \ __asm__ volatile ( \ - "{ mov %k2, %%gs:%c0(,%1,4) | mov gs:[%c0+%1*4], %k2 }" \ + "{ mov %k2, %%gs:%c0(,%1,4) | mov gs:[%0+%1*4], %k2 }" \ : : "e"(offset), "r"(index), "r"(value)) # define __MCF_TEB_LOAD_PTR_IMMEDIATE(out, offset) \ __asm__ volatile ( \ - "{ mov %%gs:%c1, %0 | mov %0, gs:[%c1] }" \ + "{ mov %%gs:%c1, %0 | mov %0, gs:[%1] }" \ : "=r"(*(out)) : "e"(offset)) # define __MCF_TEB_STORE_PTR_IMMEDIATE(offset, value) \ __asm__ volatile ( \ - "{ mov %1, %%gs:%c0 | mov gs:[%c0], %1 }" \ + "{ mov %1, %%gs:%c0 | mov gs:[%0], %1 }" \ : : "e"(offset), "r"(value)) # define __MCF_TEB_LOAD_PTR_INDEXED(out, offset, index) \ __asm__ volatile ( \ - "{ mov %%gs:%c1(,%2,8), %0 | mov %0, gs:[%c1+%2*8] }" \ + "{ mov %%gs:%c1(,%2,8), %0 | mov %0, gs:[%1+%2*8] }" \ : "=r"(*(out)) : "e"(offset), "r"(index)) # define __MCF_TEB_STORE_PTR_INDEXED(offset, index, value) \ __asm__ volatile ( \ - "{ mov %2, %%gs:%c0(,%1,8) | mov gs:[%c0+%1*8], %2 }" \ + "{ mov %2, %%gs:%c0(,%1,8) | mov gs:[%0+%1*8], %2 }" \ : : "e"(offset), "r"(index), "r"(value)) # define __MCF_64_32(x, y) x @@ -231,42 +231,42 @@ __MCF_CXX(extern "C" {) # define __MCF_TEB_LOAD_32_IMMEDIATE(out, offset) \ __asm__ volatile ( \ - "{ mov %%fs:%c1, %k0 | mov %k0, fs:[%c1] }" \ + "{ mov %%fs:%c1, %k0 | mov %k0, fs:[%1] }" \ : "=r"(*(out)) : "e"(offset)) # define __MCF_TEB_STORE_32_IMMEDIATE(offset, value) \ __asm__ volatile ( \ - "{ mov %k1, %%fs:%c0 | mov fs:[%c0], %k1 }" \ + "{ mov %k1, %%fs:%c0 | mov fs:[%0], %k1 }" \ : : "e"(offset), "r"(value)) # define __MCF_TEB_LOAD_32_INDEXED(out, offset, index) \ __asm__ volatile ( \ - "{ mov %%fs:%c1(,%2,4), %k0 | mov %k0, fs:[%c1+%2*4] }" \ + "{ mov %%fs:%c1(,%2,4), %k0 | mov %k0, fs:[%1+%2*4] }" \ : "=r"(*(out)) : "e"(offset), "r"(index)) # define __MCF_TEB_STORE_32_INDEXED(offset, index, value) \ __asm__ volatile ( \ - "{ mov %k1, %%fs:%c0(,%1,4) | mov fs:[%c0+%1*4], %k2 }" \ + "{ mov %k1, %%fs:%c0(,%1,4) | mov fs:[%0+%1*4], %k2 }" \ : : "e"(offset), "r"(index), "r"(value)) # define __MCF_TEB_LOAD_PTR_IMMEDIATE(out, offset) \ __asm__ volatile ( \ - "{ mov %%fs:%c1, %0 | mov %0, fs:[%c1] }" \ + "{ mov %%fs:%c1, %0 | mov %0, fs:[%1] }" \ : "=r"(*(out)) : "e"(offset)) # define __MCF_TEB_STORE_PTR_IMMEDIATE(offset, value) \ __asm__ volatile ( \ - "{ mov %1, %%fs:%c0 | mov fs:[%c0], %1 }" \ + "{ mov %1, %%fs:%c0 | mov fs:[%0], %1 }" \ : : "e"(offset), "r"(value)) # define __MCF_TEB_LOAD_PTR_INDEXED(out, offset, index) \ __asm__ volatile ( \ - "{ mov %%fs:%c1(,%2,4), %0 | mov %0, fs:[%c1+%2*4] }" \ + "{ mov %%fs:%c1(,%2,4), %0 | mov %0, fs:[%1+%2*4] }" \ : "=r"(*(out)) : "e"(offset), "r"(index)) # define __MCF_TEB_STORE_PTR_INDEXED(offset, index, value) \ __asm__ volatile ( \ - "{ mov %2, %%fs:%c0(,%1,4) | mov fs:[%c0+%1*4], %2 }" \ + "{ mov %2, %%fs:%c0(,%1,4) | mov fs:[%0+%1*4], %2 }" \ : : "e"(offset), "r"(index), "r"(value)) # define __MCF_64_32(x, y) y @@ -303,12 +303,12 @@ __MCF_CXX(extern "C" {) # define __MCF_TEB_LOAD_32_IMMEDIATE(out, offset) \ __asm__ volatile ( \ - " ldr %w0, [x18, %c1] " \ + " ldr %w0, [x18, %1] " \ : "=r"(*(out)) : "M"(offset)) # define __MCF_TEB_STORE_32_IMMEDIATE(offset, value) \ __asm__ volatile ( \ - " str %w0, [x18, %c1] " \ + " str %w0, [x18, %1] " \ : : "r"(value), "M"(offset)) # define __MCF_TEB_LOAD_32_INDEXED(out, offset, index) \ @@ -323,12 +323,12 @@ __MCF_CXX(extern "C" {) # define __MCF_TEB_LOAD_PTR_IMMEDIATE(out, offset) \ __asm__ volatile ( \ - " ldr %0, [x18, %c1] " \ + " ldr %0, [x18, %1] " \ : "=r"(*(out)) : "M"(offset)) # define __MCF_TEB_STORE_PTR_IMMEDIATE(offset, value) \ __asm__ volatile ( \ - " str %0, [x18, %c1] " \ + " str %0, [x18, %1] " \ : : "r"(value), "M"(offset)) # define __MCF_TEB_LOAD_PTR_INDEXED(out, offset, index) \