Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion Include/internal/pycore_obmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -691,7 +691,11 @@ struct _obmalloc_state {


/* Allocate memory directly from the O/S virtual memory system,
* where supported. Otherwise fallback on malloc */
* where supported. Otherwise fallback on malloc.
*
* Large-page and huge-page backends may round the mapped size up
* internally, so pass the original requested size back to
* _PyObject_VirtualFree(). */
void *_PyObject_VirtualAlloc(size_t size);
void _PyObject_VirtualFree(void *, size_t size);

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Fix a bug that could cause applications with specific allocation patterns to
leak memory via Huge Pages if compiled with Huge Page support. Patch by
Pablo Galindo
60 changes: 57 additions & 3 deletions Objects/obmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <stdlib.h> // malloc()
#include <stdbool.h>
#include <stdio.h> // fopen(), fgets(), sscanf()
#include <errno.h> // errno
#ifdef WITH_MIMALLOC
// Forward declarations of functions used in our mimalloc modifications
static void _PyMem_mi_page_clear_qsbr(mi_page_t *page);
Expand Down Expand Up @@ -572,6 +573,49 @@ _pymalloc_system_hugepage_size(void)
}
#endif

#if (defined(MS_WINDOWS) && defined(PYMALLOC_USE_HUGEPAGES)) || \
(defined(PYMALLOC_USE_HUGEPAGES) && defined(ARENAS_USE_MMAP) && defined(MAP_HUGETLB))
static size_t
_pymalloc_round_up_to_multiple(size_t size, size_t multiple)
{
if (multiple == 0 || size == 0) {
return size;
}

size_t remainder = size % multiple;
if (remainder == 0) {
return size;
}

size_t padding = multiple - remainder;
if (size > SIZE_MAX - padding) {
return 0;
}
return size + padding;
}
#endif

static size_t
_pymalloc_virtual_alloc_size(size_t size)
{
#if defined(MS_WINDOWS) && defined(PYMALLOC_USE_HUGEPAGES)
if (_PyRuntime.allocators.use_hugepages) {
SIZE_T large_page_size = GetLargePageMinimum();
if (large_page_size > 0) {
return _pymalloc_round_up_to_multiple(size, (size_t)large_page_size);
}
}
#elif defined(PYMALLOC_USE_HUGEPAGES) && defined(ARENAS_USE_MMAP) && defined(MAP_HUGETLB)
if (_PyRuntime.allocators.use_hugepages) {
size_t hp_size = _pymalloc_system_hugepage_size();
if (hp_size > 0) {
return _pymalloc_round_up_to_multiple(size, hp_size);
}
}
#endif
return size;
}

void *
_PyMem_ArenaAlloc(void *Py_UNUSED(ctx), size_t size)
{
Expand Down Expand Up @@ -648,7 +692,11 @@ _PyMem_ArenaFree(void *Py_UNUSED(ctx), void *ptr,
if (ptr == NULL) {
return;
}
munmap(ptr, size);
if (munmap(ptr, size) < 0) {
_Py_FatalErrorFormat(__func__,
"munmap(%p, %zu) failed with errno %d",
ptr, size, errno);
}
#else
free(ptr);
#endif
Expand Down Expand Up @@ -1128,13 +1176,19 @@ PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
void *
_PyObject_VirtualAlloc(size_t size)
{
return _PyObject_Arena.alloc(_PyObject_Arena.ctx, size);
size_t alloc_size = _pymalloc_virtual_alloc_size(size);
if (alloc_size == 0 && size != 0) {
return NULL;
}
return _PyObject_Arena.alloc(_PyObject_Arena.ctx, alloc_size);
}

void
_PyObject_VirtualFree(void *obj, size_t size)
{
_PyObject_Arena.free(_PyObject_Arena.ctx, obj, size);
size_t alloc_size = _pymalloc_virtual_alloc_size(size);
assert(alloc_size != 0 || size == 0);
_PyObject_Arena.free(_PyObject_Arena.ctx, obj, alloc_size);
}


Expand Down
Loading