@@ -342,6 +342,44 @@ static size_t roundSize(size_t size)
342
342
return size ;
343
343
}
344
344
345
+ #ifdef DYNAREC
346
+ #define GET_PROT_WAIT (A , B ) \
347
+ uint32_t A; \
348
+ do { \
349
+ A = native_lock_xchg_b(&block[B], PROT_WAIT); \
350
+ } while(A==PROT_WAIT)
351
+ #define GET_PROT (A , B ) \
352
+ uint32_t A; \
353
+ do { \
354
+ A = native_lock_get_b(&block[B]); \
355
+ } while(A==PROT_WAIT)
356
+
357
+ #define SET_PROT (A , B ) native_lock_storeb(&block[A], B)
358
+ #define LOCK_NODYNAREC ()
359
+ #define UNLOCK_DYNAREC () UNLOCK_PROT()
360
+ #define UNLOCK_NODYNAREC ()
361
+ #else
362
+ #define GET_PROT_WAIT (A , B ) uint32_t A = block[B]
363
+ #define GET_PROT (A , B ) uint32_t A = block[B]
364
+ #define SET_PROT (A , B ) block[A] = B
365
+ #define LOCK_NODYNAREC () LOCK_PROT()
366
+ #define UNLOCK_DYNAREC ()
367
+ #define UNLOCK_NODYNAREC () UNLOCK_PROT()
368
+ #endif
369
+ static uintptr_t defered_prot_p = 0 ;
370
+ static size_t defered_prot_sz = 0 ;
371
+ static uint32_t defered_prot_prot = 0 ;
372
+ #define LOCK_PROT () mutex_lock(&mutex_prot)
373
+ #define LOCK_PROT_READ () mutex_lock(&mutex_prot)
374
+ #define UNLOCK_PROT () if(defered_prot_p) { \
375
+ uintptr_t p = defered_prot_p; size_t sz = defered_prot_sz; uint32_t prot = defered_prot_prot; \
376
+ defered_prot_p = 0; \
377
+ mutex_unlock(&mutex_prot); \
378
+ setProtection(p, sz, prot); \
379
+ } else mutex_unlock(&mutex_prot)
380
+ #define UNLOCK_PROT_READ () mutex_unlock(&mutex_prot)
381
+
382
+
345
383
#ifdef TRACE_MEMSTAT
346
384
static uint64_t customMalloc_allocated = 0 ;
347
385
#endif
@@ -399,8 +437,13 @@ void* customMalloc(size_t size)
399
437
void * ret = allocBlock (p_blocks [i ].block , p , size , & p_blocks [i ].first );
400
438
p_blocks [i ].maxfree = getMaxFreeBlock (p_blocks [i ].block , p_blocks [i ].size , p_blocks [i ].first );
401
439
mutex_unlock (& mutex_blocks );
402
- if (mapallmem )
403
- setProtection ((uintptr_t )p , allocsize , PROT_READ | PROT_WRITE );
440
+ if (mapallmem ) {
441
+ // defer the setProtection...
442
+ //setProtection((uintptr_t)p, allocsize, PROT_READ | PROT_WRITE);
443
+ defered_prot_p = (uintptr_t )p ;
444
+ defered_prot_sz = allocsize ;
445
+ defered_prot_prot = PROT_READ |PROT_WRITE ;
446
+ }
404
447
return ret ;
405
448
}
406
449
void * customCalloc (size_t n , size_t size )
@@ -461,31 +504,6 @@ void customFree(void* p)
461
504
dynarec_log (LOG_NONE , "Warning, block %p not found in p_blocks for Free\n" , (void * )addr );
462
505
}
463
506
464
- #ifdef DYNAREC
465
- #define GET_PROT_WAIT (A , B ) \
466
- uint32_t A; \
467
- do { \
468
- A = native_lock_xchg_b(&block[B], PROT_WAIT); \
469
- } while(A==PROT_WAIT)
470
- #define GET_PROT (A , B ) \
471
- uint32_t A; \
472
- do { \
473
- A = native_lock_get_b(&block[B]); \
474
- } while(A==PROT_WAIT)
475
-
476
- #define SET_PROT (A , B ) native_lock_storeb(&block[A], B)
477
- #define LOCK_NODYNAREC ()
478
- #define UNLOCK_DYNAREC () mutex_unlock(&mutex_prot)
479
- #define UNLOCK_NODYNAREC ()
480
- #else
481
- #define GET_PROT_WAIT (A , B ) uint32_t A = block[B]
482
- #define GET_PROT (A , B ) uint32_t A = block[B]
483
- #define SET_PROT (A , B ) block[A] = B
484
- #define LOCK_NODYNAREC () mutex_lock(&mutex_prot)
485
- #define UNLOCK_DYNAREC ()
486
- #define UNLOCK_NODYNAREC () mutex_unlock(&mutex_prot)
487
- #endif
488
-
489
507
#ifdef DYNAREC
490
508
#define NCHUNK 64
491
509
typedef struct mmaplist_s {
@@ -999,7 +1017,7 @@ void protectDBJumpTable(uintptr_t addr, size_t size, void* jump, void* ref)
999
1017
uintptr_t cur = addr & ~(box64_pagesize - 1 );
1000
1018
uintptr_t end = ALIGN (addr + size );
1001
1019
1002
- mutex_lock ( & mutex_prot );
1020
+ LOCK_PROT ( );
1003
1021
while (cur != end ) {
1004
1022
uint32_t prot = 0 , oprot ;
1005
1023
uintptr_t bend = 0 ;
@@ -1025,7 +1043,7 @@ void protectDBJumpTable(uintptr_t addr, size_t size, void* jump, void* ref)
1025
1043
}
1026
1044
if (jump )
1027
1045
setJumpTableIfRef64 ((void * )addr , jump , ref );
1028
- mutex_unlock ( & mutex_prot );
1046
+ UNLOCK_PROT ( );
1029
1047
}
1030
1048
1031
1049
// Remove the Write flag from an adress range, so DB can be executed safely
@@ -1036,7 +1054,7 @@ void protectDB(uintptr_t addr, uintptr_t size)
1036
1054
uintptr_t cur = addr & ~(box64_pagesize - 1 );
1037
1055
uintptr_t end = ALIGN (addr + size );
1038
1056
1039
- mutex_lock ( & mutex_prot );
1057
+ LOCK_PROT ( );
1040
1058
while (cur != end ) {
1041
1059
uint32_t prot = 0 , oprot ;
1042
1060
uintptr_t bend = 0 ;
@@ -1060,7 +1078,7 @@ void protectDB(uintptr_t addr, uintptr_t size)
1060
1078
rb_set (memprot , cur , bend , prot );
1061
1079
cur = bend ;
1062
1080
}
1063
- mutex_unlock ( & mutex_prot );
1081
+ UNLOCK_PROT ( );
1064
1082
}
1065
1083
1066
1084
// Add the Write flag from an adress range, and mark all block as dirty
@@ -1071,7 +1089,7 @@ void unprotectDB(uintptr_t addr, size_t size, int mark)
1071
1089
uintptr_t cur = addr & ~(box64_pagesize - 1 );
1072
1090
uintptr_t end = ALIGN (addr + size );
1073
1091
1074
- mutex_lock ( & mutex_prot );
1092
+ LOCK_PROT ( );
1075
1093
while (cur != end ) {
1076
1094
uint32_t prot = 0 , oprot ;
1077
1095
uintptr_t bend = 0 ;
@@ -1098,27 +1116,27 @@ void unprotectDB(uintptr_t addr, size_t size, int mark)
1098
1116
rb_set (memprot , cur , bend , prot );
1099
1117
cur = bend ;
1100
1118
}
1101
- mutex_unlock ( & mutex_prot );
1119
+ UNLOCK_PROT ( );
1102
1120
}
1103
1121
1104
1122
int isprotectedDB (uintptr_t addr , size_t size )
1105
1123
{
1106
1124
dynarec_log (LOG_DEBUG , "isprotectedDB %p -> %p => " , (void * )addr , (void * )(addr + size - 1 ));
1107
1125
addr &=~(box64_pagesize - 1 );
1108
1126
uintptr_t end = ALIGN (addr + size );
1109
- mutex_lock ( & mutex_prot );
1127
+ LOCK_PROT_READ ( );
1110
1128
while (addr < end ) {
1111
1129
uint32_t prot ;
1112
1130
uintptr_t bend ;
1113
1131
if (!rb_get_end (memprot , addr , & prot , & bend ) || !(prot & PROT_DYN )) {
1114
1132
dynarec_log (LOG_DEBUG , "0\n" );
1115
- mutex_unlock ( & mutex_prot );
1133
+ UNLOCK_PROT_READ ( );
1116
1134
return 0 ;
1117
1135
} else {
1118
1136
addr = bend ;
1119
1137
}
1120
1138
}
1121
- mutex_unlock ( & mutex_prot );
1139
+ UNLOCK_PROT_READ ( );
1122
1140
dynarec_log (LOG_DEBUG , "1\n" );
1123
1141
return 1 ;
1124
1142
}
@@ -1128,7 +1146,7 @@ int isprotectedDB(uintptr_t addr, size_t size)
1128
1146
void updateProtection (uintptr_t addr , size_t size , uint32_t prot )
1129
1147
{
1130
1148
dynarec_log (LOG_DEBUG , "updateProtection %p:%p 0x%hhx\n" , (void * )addr , (void * )(addr + size - 1 ), prot );
1131
- mutex_lock ( & mutex_prot );
1149
+ LOCK_PROT ( );
1132
1150
uintptr_t cur = addr & ~(box64_pagesize - 1 );
1133
1151
uintptr_t end = ALIGN (cur + size );
1134
1152
rb_set (mapallmem , cur , cur + size , 1 );
@@ -1149,18 +1167,18 @@ void updateProtection(uintptr_t addr, size_t size, uint32_t prot)
1149
1167
rb_set (memprot , cur , bend , prot |dyn );
1150
1168
cur = bend ;
1151
1169
}
1152
- mutex_unlock ( & mutex_prot );
1170
+ UNLOCK_PROT ( );
1153
1171
}
1154
1172
1155
1173
void setProtection (uintptr_t addr , size_t size , uint32_t prot )
1156
1174
{
1157
1175
size = ALIGN (size );
1158
- mutex_lock ( & mutex_prot );
1176
+ LOCK_PROT ( );
1159
1177
uintptr_t cur = addr & ~(box64_pagesize - 1 );
1160
1178
uintptr_t end = ALIGN (cur + size );
1161
1179
rb_set (mapallmem , cur , end , 1 );
1162
1180
rb_set (memprot , cur , end , prot );
1163
- mutex_unlock ( & mutex_prot );
1181
+ UNLOCK_PROT ( );
1164
1182
}
1165
1183
1166
1184
void setProtection_mmap (uintptr_t addr , size_t size , uint32_t prot )
@@ -1169,15 +1187,15 @@ void setProtection_mmap(uintptr_t addr, size_t size, uint32_t prot)
1169
1187
return ;
1170
1188
addr &= ~(box64_pagesize - 1 );
1171
1189
size = ALIGN (size );
1172
- mutex_lock ( & mutex_prot );
1190
+ LOCK_PROT ( );
1173
1191
rb_set (mmapmem , addr , addr + size , 1 );
1174
- mutex_unlock ( & mutex_prot );
1192
+ UNLOCK_PROT ( );
1175
1193
if (prot )
1176
1194
setProtection (addr , size , prot );
1177
1195
else {
1178
- mutex_lock ( & mutex_prot );
1196
+ LOCK_PROT ( );
1179
1197
rb_set (mapallmem , addr , addr + size , 1 );
1180
- mutex_unlock ( & mutex_prot );
1198
+ UNLOCK_PROT ( );
1181
1199
}
1182
1200
}
1183
1201
@@ -1188,32 +1206,32 @@ void setProtection_elf(uintptr_t addr, size_t size, uint32_t prot)
1188
1206
if (prot )
1189
1207
setProtection (addr , size , prot );
1190
1208
else {
1191
- mutex_lock ( & mutex_prot );
1209
+ LOCK_PROT ( );
1192
1210
rb_set (mapallmem , addr , addr + size , 1 );
1193
- mutex_unlock ( & mutex_prot );
1211
+ UNLOCK_PROT ( );
1194
1212
}
1195
1213
}
1196
1214
1197
1215
void refreshProtection (uintptr_t addr )
1198
1216
{
1199
- mutex_lock ( & mutex_prot );
1217
+ LOCK_PROT ( );
1200
1218
uint32_t prot ;
1201
1219
uintptr_t bend ;
1202
1220
if (rb_get_end (memprot , addr , & prot , & bend )) {
1203
1221
int ret = mprotect ((void * )(addr & ~(box64_pagesize - 1 )), box64_pagesize , prot & ~PROT_CUSTOM );
1204
1222
dynarec_log (LOG_DEBUG , "refreshProtection(%p): %p/0x%x (ret=%d/%s)\n" , (void * )addr , (void * )(addr & ~(box64_pagesize - 1 )), prot , ret , ret ?strerror (errno ):"ok" );
1205
1223
}
1206
- mutex_unlock ( & mutex_prot );
1224
+ UNLOCK_PROT ( );
1207
1225
}
1208
1226
1209
1227
void allocProtection (uintptr_t addr , size_t size , uint32_t prot )
1210
1228
{
1211
1229
dynarec_log (LOG_DEBUG , "allocProtection %p:%p 0x%x\n" , (void * )addr , (void * )(addr + size - 1 ), prot );
1212
1230
size = ALIGN (size );
1213
1231
addr &= ~(box64_pagesize - 1 );
1214
- mutex_lock ( & mutex_prot );
1232
+ LOCK_PROT ( );
1215
1233
rb_set (mapallmem , addr , addr + size , 1 );
1216
- mutex_unlock ( & mutex_prot );
1234
+ UNLOCK_PROT ( );
1217
1235
// don't need to add precise tracking probably
1218
1236
}
1219
1237
@@ -1254,18 +1272,18 @@ void freeProtection(uintptr_t addr, size_t size)
1254
1272
size = ALIGN (size );
1255
1273
addr &= ~(box64_pagesize - 1 );
1256
1274
dynarec_log (LOG_DEBUG , "freeProtection %p:%p\n" , (void * )addr , (void * )(addr + size - 1 ));
1257
- mutex_lock ( & mutex_prot );
1275
+ LOCK_PROT ( );
1258
1276
rb_unset (mapallmem , addr , addr + size );
1259
1277
rb_unset (mmapmem , addr , addr + size );
1260
1278
rb_unset (memprot , addr , addr + size );
1261
- mutex_unlock ( & mutex_prot );
1279
+ UNLOCK_PROT ( );
1262
1280
}
1263
1281
1264
1282
uint32_t getProtection (uintptr_t addr )
1265
1283
{
1266
- mutex_lock ( & mutex_prot );
1284
+ LOCK_PROT_READ ( );
1267
1285
uint32_t ret = rb_get (memprot , addr );
1268
- mutex_unlock ( & mutex_prot );
1286
+ UNLOCK_PROT_READ ( );
1269
1287
return ret ;
1270
1288
}
1271
1289
@@ -1280,7 +1298,7 @@ int getMmapped(uintptr_t addr)
1280
1298
1281
1299
void * find31bitBlockNearHint (void * hint , size_t size , uintptr_t mask )
1282
1300
{
1283
- int prot ;
1301
+ uint32_t prot ;
1284
1302
if (hint < LOWEST ) hint = LOWEST ;
1285
1303
uintptr_t bend = 0 ;
1286
1304
uintptr_t cur = (uintptr_t )hint ;
@@ -1313,7 +1331,7 @@ void* find47bitBlock(size_t size)
1313
1331
}
1314
1332
void * find47bitBlockNearHint (void * hint , size_t size , uintptr_t mask )
1315
1333
{
1316
- int prot ;
1334
+ uint32_t prot ;
1317
1335
if (hint < LOWEST ) hint = LOWEST ;
1318
1336
uintptr_t bend = 0 ;
1319
1337
uintptr_t cur = (uintptr_t )hint ;
@@ -1347,7 +1365,7 @@ void* find47bitBlockElf(size_t size, int mainbin, uintptr_t mask)
1347
1365
1348
1366
int isBlockFree (void * hint , size_t size )
1349
1367
{
1350
- int prot ;
1368
+ uint32_t prot ;
1351
1369
uintptr_t bend = 0 ;
1352
1370
uintptr_t cur = (uintptr_t )hint ;
1353
1371
if (!rb_get_end (mapallmem , cur , & prot , & bend )) {
@@ -1425,7 +1443,7 @@ void reserveHighMem()
1425
1443
return ; // don't reserve by default
1426
1444
uintptr_t cur = 1ULL <<47 ;
1427
1445
uintptr_t bend = 0 ;
1428
- int prot ;
1446
+ uint32_t prot ;
1429
1447
while (bend != 0xffffffffffffffffLL ) {
1430
1448
if (!rb_get_end (mapallmem , cur , & prot , & bend )) {
1431
1449
void * ret = internal_mmap ((void * )cur , bend - cur , 0 , MAP_ANONYMOUS |MAP_FIXED |MAP_PRIVATE |MAP_NORESERVE , -1 , 0 );
0 commit comments