@@ -196,19 +196,6 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
196
196
}
197
197
if (!offs && !head -> vaddr )
198
198
offs = (uintptr_t )find47bitBlockElf (head -> memsz + head -> align , mainbin , max_align ); // limit to 47bits...
199
- #if defined(PAGE8K ) || defined(PAGE16K ) || defined(PAGE64K )
200
- // Will not try anything smart on pagesize != 4k....
201
- size_t sz = head -> memsz ;
202
- void * raw = NULL ;
203
- void * image = NULL ;
204
- if (!head -> vaddr ) {
205
- sz += head -> align ;
206
- raw = mmap64 ((void * )offs , sz , 0 , MAP_ANONYMOUS |MAP_PRIVATE |MAP_NORESERVE , -1 , 0 );
207
- image = mmap64 ((void * )(((uintptr_t )raw + max_align )& ~max_align ), head -> memsz , PROT_READ |PROT_WRITE |PROT_EXEC , MAP_ANONYMOUS |MAP_PRIVATE , -1 , 0 );
208
- } else {
209
- image = raw = mmap64 ((void * )head -> vaddr , sz , PROT_READ |PROT_WRITE |PROT_EXEC , MAP_ANONYMOUS |MAP_PRIVATE , -1 , 0 );
210
- }
211
- #else // PAGE4K
212
199
// prereserve the whole elf image, without populating
213
200
size_t sz = head -> memsz ;
214
201
void * raw = NULL ;
@@ -220,7 +207,6 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
220
207
} else {
221
208
image = raw = mmap64 ((void * )head -> vaddr , sz , 0 , MAP_ANONYMOUS |MAP_PRIVATE |MAP_NORESERVE , -1 , 0 );
222
209
}
223
- #endif
224
210
if (image != MAP_FAILED && !head -> vaddr && image != (void * )offs ) {
225
211
printf_log (LOG_INFO , "%s: Mmap64 for (@%p 0x%zx) for elf \"%s\" returned %p(%p/0x%zx) instead\n" , (((uintptr_t )image )& max_align )?"Error" :"Warning" , (void * )(head -> vaddr ?head -> vaddr :offs ), head -> memsz , head -> name , image , raw , head -> align );
226
212
offs = (uintptr_t )image ;
@@ -240,17 +226,14 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
240
226
return 1 ;
241
227
offs = (uintptr_t )image - head -> vaddr ;
242
228
}
243
- printf_log (log_level , "Pre-allocated 0x%zx byte at %p for %s\n" , head -> memsz , image , head -> name );
229
+ printf_dump (log_level , "Pre-allocated 0x%zx byte at %p for %s\n" , head -> memsz , image , head -> name );
244
230
head -> delta = offs ;
245
- printf_log (log_level , "Delta of %p (vaddr=%p) for Elf \"%s\"\n" , (void * )offs , (void * )head -> vaddr , head -> name );
231
+ printf_dump (log_level , "Delta of %p (vaddr=%p) for Elf \"%s\"\n" , (void * )offs , (void * )head -> vaddr , head -> name );
246
232
247
233
head -> image = image ;
248
234
head -> raw = raw ;
249
235
head -> raw_size = sz ;
250
236
setProtection_elf ((uintptr_t )raw , sz , 0 );
251
- #if defined(PAGE8K ) || defined(PAGE16K ) || defined(PAGE64K )
252
- setProtection_elf ((uintptr_t )image , head -> memsz , PROT_READ |PROT_WRITE |PROT_EXEC );
253
- #endif
254
237
255
238
head -> multiblocks = (multiblock_t * )box_calloc (head -> multiblock_n , sizeof (multiblock_t ));
256
239
head -> tlsbase = AddTLSPartition (context , head -> tlssize );
@@ -267,20 +250,10 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
267
250
head -> multiblocks [n ].size = e -> p_filesz ;
268
251
head -> multiblocks [n ].align = e -> p_align ;
269
252
uint8_t prot = ((e -> p_flags & PF_R )?PROT_READ :0 )|((e -> p_flags & PF_W )?PROT_WRITE :0 )|((e -> p_flags & PF_X )?PROT_EXEC :0 );
270
- #if defined(PAGE8K ) || defined(PAGE16K ) || defined(PAGE64K )
271
- head -> multiblocks [n ].p = NULL ;
272
- if (e -> p_filesz ) {
273
- fseeko64 (head -> file , head -> multiblocks [n ].offs , SEEK_SET );
274
- if (fread ((void * )head -> multiblocks [n ].paddr , head -> multiblocks [n ].size , 1 , head -> file )!= 1 ) {
275
- printf_log (LOG_NONE , "Cannot read elf block (@%p 0x%zx) for elf \"%s\"\n" , (void * )head -> multiblocks [n ].offs , head -> multiblocks [n ].asize , head -> name );
276
- return 1 ;
277
- }
278
- }
279
- #else //PAGE4K
280
253
// check if alignment is correct
281
254
uintptr_t balign = head -> multiblocks [n ].align - 1 ;
282
- if (balign < ( box64_pagesize - 1 )) balign = ( box64_pagesize - 1 ) ;
283
- head -> multiblocks [n ].asize = ALIGN (e -> p_memsz + (e -> p_paddr & balign )) ;
255
+ if (balign < 4095 ) balign = 4095 ;
256
+ head -> multiblocks [n ].asize = (e -> p_memsz + (e -> p_paddr & balign )+ 4095 ) & ~ 4095 ;
284
257
int try_mmap = 1 ;
285
258
if (e -> p_paddr & balign )
286
259
try_mmap = 0 ;
@@ -290,8 +263,10 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
290
263
try_mmap = 0 ;
291
264
if (!e -> p_filesz )
292
265
try_mmap = 0 ;
266
+ if (e -> p_align < box64_pagesize )
267
+ try_mmap = 0 ;
293
268
if (try_mmap ) {
294
- printf_log (log_level , "Mmaping 0x%lx(0x%lx) bytes @%p for Elf \"%s\"\n" , head -> multiblocks [n ].size , head -> multiblocks [n ].asize , (void * )head -> multiblocks [n ].paddr , head -> name );
269
+ printf_dump (log_level , "Mmaping 0x%lx(0x%lx) bytes @%p for Elf \"%s\"\n" , head -> multiblocks [n ].size , head -> multiblocks [n ].asize , (void * )head -> multiblocks [n ].paddr , head -> name );
295
270
void * p = mmap64 (
296
271
(void * )head -> multiblocks [n ].paddr ,
297
272
head -> multiblocks [n ].size ,
@@ -302,7 +277,7 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
302
277
);
303
278
if (p == MAP_FAILED || p != (void * )head -> multiblocks [n ].paddr ) {
304
279
try_mmap = 0 ;
305
- printf_log (log_level , "Mapping failed, using regular mmap+read" );
280
+ printf_dump (log_level , "Mapping failed, using regular mmap+read" );
306
281
} else {
307
282
if (e -> p_memsz > e -> p_filesz )
308
283
memset ((void * )((uintptr_t )p + e -> p_filesz ), 0 , e -> p_memsz - e -> p_filesz );
@@ -314,15 +289,43 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
314
289
if (!try_mmap ) {
315
290
uintptr_t paddr = head -> multiblocks [n ].paddr & ~balign ;
316
291
size_t asize = head -> multiblocks [n ].asize ;
317
- printf_log (log_level , "Allocating 0x%zx (0x%zx) bytes @%p, will read 0x%zx @%p for Elf \"%s\"\n" , asize , e -> p_memsz , (void * )paddr , e -> p_filesz , (void * )head -> multiblocks [n ].paddr , head -> name );
318
- void * p = mmap64 (
319
- (void * )paddr ,
320
- asize ,
321
- prot |PROT_WRITE ,
322
- MAP_PRIVATE |MAP_ANONYMOUS |MAP_FIXED ,
323
- -1 ,
324
- 0
325
- );
292
+ void * p = MAP_FAILED ;
293
+ if (paddr == (paddr & ~(box64_pagesize - 1 )) && (asize == ALIGN (asize ))) {
294
+ printf_dump (log_level , "Allocating 0x%zx (0x%zx) bytes @%p, will read 0x%zx @%p for Elf \"%s\"\n" , asize , e -> p_memsz , (void * )paddr , e -> p_filesz , (void * )head -> multiblocks [n ].paddr , head -> name );
295
+ p = mmap64 (
296
+ (void * )paddr ,
297
+ asize ,
298
+ prot |PROT_WRITE ,
299
+ MAP_PRIVATE |MAP_ANONYMOUS |MAP_FIXED ,
300
+ -1 ,
301
+ 0
302
+ );
303
+ } else {
304
+ // difference in pagesize, so need to mmap only what needed to be...
305
+ //check startint point
306
+ uintptr_t new_addr = paddr ;
307
+ ssize_t new_size = asize ;
308
+ while (getProtection (new_addr ) && (new_size > 0 )) {
309
+ new_size -= ALIGN (new_addr ) - new_addr ;
310
+ new_addr = ALIGN (new_addr );
311
+ }
312
+ if (new_size > 0 ) {
313
+ printf_dump (log_level , "Allocating 0x%zx (0x%zx) bytes @%p, will read 0x%zx @%p for Elf \"%s\"\n" , ALIGN (new_size ), e -> p_memsz , (void * )new_addr , e -> p_filesz , (void * )head -> multiblocks [n ].paddr , head -> name );
314
+ p = mmap64 (
315
+ (void * )new_addr ,
316
+ ALIGN (new_size ),
317
+ prot |PROT_WRITE ,
318
+ MAP_PRIVATE |MAP_ANONYMOUS |MAP_FIXED ,
319
+ -1 ,
320
+ 0
321
+ );
322
+ if (p == (void * )new_addr )
323
+ p = (void * )paddr ;
324
+ } else {
325
+ p = (void * )paddr ;
326
+ printf_dump (log_level , "Will read 0x%zx @%p for Elf \"%s\"\n" , e -> p_filesz , (void * )head -> multiblocks [n ].paddr , head -> name );
327
+ }
328
+ }
326
329
if (p == MAP_FAILED || p != (void * )paddr ) {
327
330
printf_log (LOG_NONE , "Cannot create memory map (@%p 0x%zx/0x%zx) for elf \"%s\"" , (void * )paddr , asize , balign , head -> name );
328
331
if (p == MAP_FAILED ) {
@@ -341,10 +344,9 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
341
344
return 1 ;
342
345
}
343
346
}
344
- if (!(prot & PROT_WRITE ))
347
+ if (!(prot & PROT_WRITE ) && ( paddr == ( paddr & ( box64_pagesize - 1 )) && ( asize == ALIGN ( asize ))) )
345
348
mprotect ((void * )paddr , asize , prot );
346
349
}
347
- #endif //PAGE4K
348
350
#ifdef DYNAREC
349
351
if (box64_dynarec && (e -> p_flags & PF_X )) {
350
352
dynarec_log (LOG_DEBUG , "Add ELF eXecutable Memory %p:%p\n" , head -> multiblocks [n ].p , (void * )head -> multiblocks [n ].asize );
@@ -1430,20 +1432,13 @@ int IsAddressInElfSpace(const elfheader_t* h, uintptr_t addr)
1430
1432
{
1431
1433
if (!h )
1432
1434
return 0 ;
1433
- #if defined(PAGE8K ) || defined(PAGE16K ) || defined(PAGE64K )
1434
- uintptr_t base = (uintptr_t )h -> image ;
1435
- uintptr_t end = base + h -> memsz ;
1436
- if (base && addr >=base && addr <=end )
1437
- return 1 ;
1438
- #else //PAGE4K
1439
1435
for (int i = 0 ; i < h -> multiblock_n ; ++ i ) {
1440
1436
uintptr_t base = (uintptr_t )h -> multiblocks [i ].p ;
1441
1437
uintptr_t end = (uintptr_t )h -> multiblocks [i ].p + h -> multiblocks [i ].asize - 1 ;
1442
1438
if (base && addr >=base && addr <=end )
1443
1439
return 1 ;
1444
1440
1445
1441
}
1446
- #endif //PAGE4K
1447
1442
return 0 ;
1448
1443
}
1449
1444
elfheader_t * FindElfAddress (box64context_t * context , uintptr_t addr )
0 commit comments