PostgreSQL Source Code git master
shmem.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * shmem.c
4 * create shared memory and initialize shared memory data structures.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/storage/ipc/shmem.c
12 *
13 *-------------------------------------------------------------------------
14 */
15/*
16 * POSTGRES processes share one or more regions of shared memory.
17 * The shared memory is created by a postmaster and is inherited
18 * by each backend via fork() (or, in some ports, via other OS-specific
19 * methods). The routines in this file are used for allocating and
20 * binding to shared memory data structures.
21 *
22 * NOTES:
23 * (a) There are three kinds of shared memory data structures
24 * available to POSTGRES: fixed-size structures, queues and hash
25 * tables. Fixed-size structures contain things like global variables
26 * for a module and should never be allocated after the shared memory
27 * initialization phase. Hash tables have a fixed maximum size, but
28 * their actual size can vary dynamically. When entries are added
29 * to the table, more space is allocated. Queues link data structures
30 * that have been allocated either within fixed-size structures or as hash
31 * buckets. Each shared data structure has a string name to identify
32 * it (assigned in the module that declares it).
33 *
34 * (b) During initialization, each module looks for its
35 * shared data structures in a hash table called the "Shmem Index".
36 * If the data structure is not present, the caller can allocate
37 * a new one and initialize it. If the data structure is present,
38 * the caller "attaches" to the structure by initializing a pointer
39 * in the local address space.
40 * The shmem index has two purposes: first, it gives us
41 * a simple model of how the world looks when a backend process
42 * initializes. If something is present in the shmem index,
43 * it is initialized. If it is not, it is uninitialized. Second,
44 * the shmem index allows us to allocate shared memory on demand
45 * instead of trying to preallocate structures and hard-wire the
46 * sizes and locations in header files. If you are using a lot
47 * of shared memory in a lot of different places (and changing
48 * things during development), this is important.
49 *
50 * (c) In standard Unix-ish environments, individual backends do not
51 * need to re-establish their local pointers into shared memory, because
52 * they inherit correct values of those variables via fork() from the
53 * postmaster. However, this does not work in the EXEC_BACKEND case.
54 * In ports using EXEC_BACKEND, new backends have to set up their local
55 * pointers using the method described in (b) above.
56 *
57 * (d) memory allocation model: shared memory can never be
58 * freed, once allocated. Each hash table has its own free list,
59 * so hash buckets can be reused when an item is deleted. However,
60 * if one hash table grows very large and then shrinks, its space
61 * cannot be redistributed to other tables. We could build a simple
62 * hash bucket garbage collector if need be. Right now, it seems
63 * unnecessary.
64 */
65
66#include "postgres.h"
67
68#include "fmgr.h"
69#include "funcapi.h"
70#include "miscadmin.h"
71#include "port/pg_numa.h"
72#include "storage/lwlock.h"
73#include "storage/pg_shmem.h"
74#include "storage/shmem.h"
75#include "storage/spin.h"
76#include "utils/builtins.h"
77
78static void *ShmemAllocRaw(Size size, Size *allocated_size);
79static void *ShmemAllocUnlocked(Size size);
80
81/* shared memory global variables */
82
83static PGShmemHeader *ShmemSegHdr; /* shared mem segment header */
84
85static void *ShmemBase; /* start address of shared memory */
86
87static void *ShmemEnd; /* end+1 address of shared memory */
88
89slock_t *ShmemLock; /* spinlock for shared memory and LWLock
90 * allocation */
91
92static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
93
94/* To get reliable results for NUMA inquiry we need to "touch pages" once */
95static bool firstNumaTouch = true;
96
98
99/*
100 * InitShmemAccess() --- set up basic pointers to shared memory.
101 */
102void
104{
105 ShmemSegHdr = seghdr;
106 ShmemBase = seghdr;
107 ShmemEnd = (char *) ShmemBase + seghdr->totalsize;
108}
109
110/*
111 * InitShmemAllocation() --- set up shared-memory space allocation.
112 *
113 * This should be called only in the postmaster or a standalone backend.
114 */
115void
117{
118 PGShmemHeader *shmhdr = ShmemSegHdr;
119 char *aligned;
120
121 Assert(shmhdr != NULL);
122
123 /*
124 * Initialize the spinlock used by ShmemAlloc. We must use
125 * ShmemAllocUnlocked, since obviously ShmemAlloc can't be called yet.
126 */
127 ShmemLock = (slock_t *) ShmemAllocUnlocked(sizeof(slock_t));
128
130
131 /*
132 * Allocations after this point should go through ShmemAlloc, which
133 * expects to allocate everything on cache line boundaries. Make sure the
134 * first allocation begins on a cache line boundary.
135 */
136 aligned = (char *)
137 (CACHELINEALIGN((((char *) shmhdr) + shmhdr->freeoffset)));
138 shmhdr->freeoffset = aligned - (char *) shmhdr;
139
140 /* ShmemIndex can't be set up yet (need LWLocks first) */
141 shmhdr->index = NULL;
142 ShmemIndex = (HTAB *) NULL;
143}
144
145/*
146 * ShmemAlloc -- allocate max-aligned chunk from shared memory
147 *
148 * Throws error if request cannot be satisfied.
149 *
150 * Assumes ShmemLock and ShmemSegHdr are initialized.
151 */
152void *
154{
155 void *newSpace;
156 Size allocated_size;
157
158 newSpace = ShmemAllocRaw(size, &allocated_size);
159 if (!newSpace)
161 (errcode(ERRCODE_OUT_OF_MEMORY),
162 errmsg("out of shared memory (%zu bytes requested)",
163 size)));
164 return newSpace;
165}
166
167/*
168 * ShmemAllocNoError -- allocate max-aligned chunk from shared memory
169 *
170 * As ShmemAlloc, but returns NULL if out of space, rather than erroring.
171 */
172void *
174{
175 Size allocated_size;
176
177 return ShmemAllocRaw(size, &allocated_size);
178}
179
180/*
181 * ShmemAllocRaw -- allocate align chunk and return allocated size
182 *
183 * Also sets *allocated_size to the number of bytes allocated, which will
184 * be equal to the number requested plus any padding we choose to add.
185 */
186static void *
187ShmemAllocRaw(Size size, Size *allocated_size)
188{
189 Size newStart;
190 Size newFree;
191 void *newSpace;
192
193 /*
194 * Ensure all space is adequately aligned. We used to only MAXALIGN this
195 * space but experience has proved that on modern systems that is not good
196 * enough. Many parts of the system are very sensitive to critical data
197 * structures getting split across cache line boundaries. To avoid that,
198 * attempt to align the beginning of the allocation to a cache line
199 * boundary. The calling code will still need to be careful about how it
200 * uses the allocated space - e.g. by padding each element in an array of
201 * structures out to a power-of-two size - but without this, even that
202 * won't be sufficient.
203 */
204 size = CACHELINEALIGN(size);
205 *allocated_size = size;
206
207 Assert(ShmemSegHdr != NULL);
208
210
211 newStart = ShmemSegHdr->freeoffset;
212
213 newFree = newStart + size;
214 if (newFree <= ShmemSegHdr->totalsize)
215 {
216 newSpace = (char *) ShmemBase + newStart;
217 ShmemSegHdr->freeoffset = newFree;
218 }
219 else
220 newSpace = NULL;
221
223
224 /* note this assert is okay with newSpace == NULL */
225 Assert(newSpace == (void *) CACHELINEALIGN(newSpace));
226
227 return newSpace;
228}
229
230/*
231 * ShmemAllocUnlocked -- allocate max-aligned chunk from shared memory
232 *
233 * Allocate space without locking ShmemLock. This should be used for,
234 * and only for, allocations that must happen before ShmemLock is ready.
235 *
236 * We consider maxalign, rather than cachealign, sufficient here.
237 */
238static void *
240{
241 Size newStart;
242 Size newFree;
243 void *newSpace;
244
245 /*
246 * Ensure allocated space is adequately aligned.
247 */
248 size = MAXALIGN(size);
249
250 Assert(ShmemSegHdr != NULL);
251
252 newStart = ShmemSegHdr->freeoffset;
253
254 newFree = newStart + size;
255 if (newFree > ShmemSegHdr->totalsize)
257 (errcode(ERRCODE_OUT_OF_MEMORY),
258 errmsg("out of shared memory (%zu bytes requested)",
259 size)));
260 ShmemSegHdr->freeoffset = newFree;
261
262 newSpace = (char *) ShmemBase + newStart;
263
264 Assert(newSpace == (void *) MAXALIGN(newSpace));
265
266 return newSpace;
267}
268
269/*
270 * ShmemAddrIsValid -- test if an address refers to shared memory
271 *
272 * Returns true if the pointer points within the shared memory segment.
273 */
274bool
275ShmemAddrIsValid(const void *addr)
276{
277 return (addr >= ShmemBase) && (addr < ShmemEnd);
278}
279
280/*
281 * InitShmemIndex() --- set up or attach to shmem index table.
282 */
283void
285{
286 HASHCTL info;
287
288 /*
289 * Create the shared memory shmem index.
290 *
291 * Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
292 * hashtable to exist already, we have a bit of a circularity problem in
293 * initializing the ShmemIndex itself. The special "ShmemIndex" hash
294 * table name will tell ShmemInitStruct to fake it.
295 */
297 info.entrysize = sizeof(ShmemIndexEnt);
298
299 ShmemIndex = ShmemInitHash("ShmemIndex",
301 &info,
303}
304
305/*
306 * ShmemInitHash -- Create and initialize, or attach to, a
307 * shared memory hash table.
308 *
309 * We assume caller is doing some kind of synchronization
310 * so that two processes don't try to create/initialize the same
311 * table at once. (In practice, all creations are done in the postmaster
312 * process; child processes should always be attaching to existing tables.)
313 *
314 * max_size is the estimated maximum number of hashtable entries. This is
315 * not a hard limit, but the access efficiency will degrade if it is
316 * exceeded substantially (since it's used to compute directory size and
317 * the hash table buckets will get overfull).
318 *
319 * init_size is the number of hashtable entries to preallocate. For a table
320 * whose maximum size is certain, this should be equal to max_size; that
321 * ensures that no run-time out-of-shared-memory failures can occur.
322 *
323 * *infoP and hash_flags must specify at least the entry sizes and key
324 * comparison semantics (see hash_create()). Flag bits and values specific
325 * to shared-memory hash tables are added here, except that callers may
326 * choose to specify HASH_PARTITION and/or HASH_FIXED_SIZE.
327 *
328 * Note: before Postgres 9.0, this function returned NULL for some failure
329 * cases. Now, it always throws error instead, so callers need not check
330 * for NULL.
331 */
332HTAB *
333ShmemInitHash(const char *name, /* table string name for shmem index */
334 int64 init_size, /* initial table size */
335 int64 max_size, /* max size of the table */
336 HASHCTL *infoP, /* info about key and bucket size */
337 int hash_flags) /* info about infoP */
338{
339 bool found;
340 void *location;
341
342 /*
343 * Hash tables allocated in shared memory have a fixed directory; it can't
344 * grow or other backends wouldn't be able to find it. So, make sure we
345 * make it big enough to start with.
346 *
347 * The shared memory allocator must be specified too.
348 */
349 infoP->dsize = infoP->max_dsize = hash_select_dirsize(max_size);
350 infoP->alloc = ShmemAllocNoError;
351 hash_flags |= HASH_SHARED_MEM | HASH_ALLOC | HASH_DIRSIZE;
352
353 /* look it up in the shmem index */
354 location = ShmemInitStruct(name,
355 hash_get_shared_size(infoP, hash_flags),
356 &found);
357
358 /*
359 * if it already exists, attach to it rather than allocate and initialize
360 * new space
361 */
362 if (found)
363 hash_flags |= HASH_ATTACH;
364
365 /* Pass location of hashtable header to hash_create */
366 infoP->hctl = (HASHHDR *) location;
367
368 return hash_create(name, init_size, infoP, hash_flags);
369}
370
371/*
372 * ShmemInitStruct -- Create/attach to a structure in shared memory.
373 *
374 * This is called during initialization to find or allocate
375 * a data structure in shared memory. If no other process
376 * has created the structure, this routine allocates space
377 * for it. If it exists already, a pointer to the existing
378 * structure is returned.
379 *
380 * Returns: pointer to the object. *foundPtr is set true if the object was
381 * already in the shmem index (hence, already initialized).
382 *
383 * Note: before Postgres 9.0, this function returned NULL for some failure
384 * cases. Now, it always throws error instead, so callers need not check
385 * for NULL.
386 */
387void *
388ShmemInitStruct(const char *name, Size size, bool *foundPtr)
389{
390 ShmemIndexEnt *result;
391 void *structPtr;
392
393 LWLockAcquire(ShmemIndexLock, LW_EXCLUSIVE);
394
395 if (!ShmemIndex)
396 {
397 PGShmemHeader *shmemseghdr = ShmemSegHdr;
398
399 /* Must be trying to create/attach to ShmemIndex itself */
400 Assert(strcmp(name, "ShmemIndex") == 0);
401
403 {
404 /* Must be initializing a (non-standalone) backend */
405 Assert(shmemseghdr->index != NULL);
406 structPtr = shmemseghdr->index;
407 *foundPtr = true;
408 }
409 else
410 {
411 /*
412 * If the shmem index doesn't exist, we are bootstrapping: we must
413 * be trying to init the shmem index itself.
414 *
415 * Notice that the ShmemIndexLock is released before the shmem
416 * index has been initialized. This should be OK because no other
417 * process can be accessing shared memory yet.
418 */
419 Assert(shmemseghdr->index == NULL);
420 structPtr = ShmemAlloc(size);
421 shmemseghdr->index = structPtr;
422 *foundPtr = false;
423 }
424 LWLockRelease(ShmemIndexLock);
425 return structPtr;
426 }
427
428 /* look it up in the shmem index */
429 result = (ShmemIndexEnt *)
431
432 if (!result)
433 {
434 LWLockRelease(ShmemIndexLock);
436 (errcode(ERRCODE_OUT_OF_MEMORY),
437 errmsg("could not create ShmemIndex entry for data structure \"%s\"",
438 name)));
439 }
440
441 if (*foundPtr)
442 {
443 /*
444 * Structure is in the shmem index so someone else has allocated it
445 * already. The size better be the same as the size we are trying to
446 * initialize to, or there is a name conflict (or worse).
447 */
448 if (result->size != size)
449 {
450 LWLockRelease(ShmemIndexLock);
452 (errmsg("ShmemIndex entry size is wrong for data structure"
453 " \"%s\": expected %zu, actual %zu",
454 name, size, result->size)));
455 }
456 structPtr = result->location;
457 }
458 else
459 {
460 Size allocated_size;
461
462 /* It isn't in the table yet. allocate and initialize it */
463 structPtr = ShmemAllocRaw(size, &allocated_size);
464 if (structPtr == NULL)
465 {
466 /* out of memory; remove the failed ShmemIndex entry */
468 LWLockRelease(ShmemIndexLock);
470 (errcode(ERRCODE_OUT_OF_MEMORY),
471 errmsg("not enough shared memory for data structure"
472 " \"%s\" (%zu bytes requested)",
473 name, size)));
474 }
475 result->size = size;
476 result->allocated_size = allocated_size;
477 result->location = structPtr;
478 }
479
480 LWLockRelease(ShmemIndexLock);
481
482 Assert(ShmemAddrIsValid(structPtr));
483
484 Assert(structPtr == (void *) CACHELINEALIGN(structPtr));
485
486 return structPtr;
487}
488
489
490/*
491 * Add two Size values, checking for overflow
492 */
493Size
495{
496 Size result;
497
498 result = s1 + s2;
499 /* We are assuming Size is an unsigned type here... */
500 if (result < s1 || result < s2)
502 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
503 errmsg("requested shared memory size overflows size_t")));
504 return result;
505}
506
507/*
508 * Multiply two Size values, checking for overflow
509 */
510Size
512{
513 Size result;
514
515 if (s1 == 0 || s2 == 0)
516 return 0;
517 result = s1 * s2;
518 /* We are assuming Size is an unsigned type here... */
519 if (result / s2 != s1)
521 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
522 errmsg("requested shared memory size overflows size_t")));
523 return result;
524}
525
526/* SQL SRF showing allocated shared memory */
527Datum
529{
530#define PG_GET_SHMEM_SIZES_COLS 4
531 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
532 HASH_SEQ_STATUS hstat;
533 ShmemIndexEnt *ent;
534 Size named_allocated = 0;
536 bool nulls[PG_GET_SHMEM_SIZES_COLS];
537
538 InitMaterializedSRF(fcinfo, 0);
539
540 LWLockAcquire(ShmemIndexLock, LW_SHARED);
541
542 hash_seq_init(&hstat, ShmemIndex);
543
544 /* output all allocated entries */
545 memset(nulls, 0, sizeof(nulls));
546 while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
547 {
548 values[0] = CStringGetTextDatum(ent->key);
549 values[1] = Int64GetDatum((char *) ent->location - (char *) ShmemSegHdr);
550 values[2] = Int64GetDatum(ent->size);
551 values[3] = Int64GetDatum(ent->allocated_size);
552 named_allocated += ent->allocated_size;
553
554 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
555 values, nulls);
556 }
557
558 /* output shared memory allocated but not counted via the shmem index */
559 values[0] = CStringGetTextDatum("<anonymous>");
560 nulls[1] = true;
561 values[2] = Int64GetDatum(ShmemSegHdr->freeoffset - named_allocated);
562 values[3] = values[2];
563 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
564
565 /* output as-of-yet unused shared memory */
566 nulls[0] = true;
568 nulls[1] = false;
570 values[3] = values[2];
571 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
572
573 LWLockRelease(ShmemIndexLock);
574
575 return (Datum) 0;
576}
577
578/*
579 * SQL SRF showing NUMA memory nodes for allocated shared memory
580 *
581 * Compared to pg_get_shmem_allocations(), this function does not return
582 * information about shared anonymous allocations and unused shared memory.
583 */
584Datum
586{
587#define PG_GET_SHMEM_NUMA_SIZES_COLS 3
588 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
589 HASH_SEQ_STATUS hstat;
590 ShmemIndexEnt *ent;
593 Size os_page_size;
594 void **page_ptrs;
595 int *pages_status;
596 uint64 shm_total_page_count,
597 shm_ent_page_count,
598 max_nodes;
599 Size *nodes;
600
601 if (pg_numa_init() == -1)
602 elog(ERROR, "libnuma initialization failed or NUMA is not supported on this platform");
603
604 InitMaterializedSRF(fcinfo, 0);
605
606 max_nodes = pg_numa_get_max_node();
607 nodes = palloc(sizeof(Size) * (max_nodes + 1));
608
609 /*
610 * Shared memory allocations can vary in size and may not align with OS
611 * memory page boundaries, while NUMA queries work on pages.
612 *
613 * To correctly map each allocation to NUMA nodes, we need to: 1.
614 * Determine the OS memory page size. 2. Align each allocation's start/end
615 * addresses to page boundaries. 3. Query NUMA node information for all
616 * pages spanning the allocation.
617 */
618 os_page_size = pg_get_shmem_pagesize();
619
620 /*
621 * Allocate memory for page pointers and status based on total shared
622 * memory size. This simplified approach allocates enough space for all
623 * pages in shared memory rather than calculating the exact requirements
624 * for each segment.
625 *
626 * Add 1, because we don't know how exactly the segments align to OS
627 * pages, so the allocation might use one more memory page. In practice
628 * this is not very likely, and moreover we have more entries, each of
629 * them using only fraction of the total pages.
630 */
631 shm_total_page_count = (ShmemSegHdr->totalsize / os_page_size) + 1;
632 page_ptrs = palloc0(sizeof(void *) * shm_total_page_count);
633 pages_status = palloc(sizeof(int) * shm_total_page_count);
634
635 if (firstNumaTouch)
636 elog(DEBUG1, "NUMA: page-faulting shared memory segments for proper NUMA readouts");
637
638 LWLockAcquire(ShmemIndexLock, LW_SHARED);
639
640 hash_seq_init(&hstat, ShmemIndex);
641
642 /* output all allocated entries */
643 memset(nulls, 0, sizeof(nulls));
644 while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
645 {
646 int i;
647 char *startptr,
648 *endptr;
649 Size total_len;
650
651 /*
652 * Calculate the range of OS pages used by this segment. The segment
653 * may start / end half-way through a page, we want to count these
654 * pages too. So we align the start/end pointers down/up, and then
655 * calculate the number of pages from that.
656 */
657 startptr = (char *) TYPEALIGN_DOWN(os_page_size, ent->location);
658 endptr = (char *) TYPEALIGN(os_page_size,
659 (char *) ent->location + ent->allocated_size);
660 total_len = (endptr - startptr);
661
662 shm_ent_page_count = total_len / os_page_size;
663
664 /*
665 * If we ever get 0xff (-1) back from kernel inquiry, then we probably
666 * have a bug in mapping buffers to OS pages.
667 */
668 memset(pages_status, 0xff, sizeof(int) * shm_ent_page_count);
669
670 /*
671 * Setup page_ptrs[] with pointers to all OS pages for this segment,
672 * and get the NUMA status using pg_numa_query_pages.
673 *
674 * In order to get reliable results we also need to touch memory
675 * pages, so that inquiry about NUMA memory node doesn't return -2
676 * (ENOENT, which indicates unmapped/unallocated pages).
677 */
678 for (i = 0; i < shm_ent_page_count; i++)
679 {
680 page_ptrs[i] = startptr + (i * os_page_size);
681
682 if (firstNumaTouch)
684
686 }
687
688 if (pg_numa_query_pages(0, shm_ent_page_count, page_ptrs, pages_status) == -1)
689 elog(ERROR, "failed NUMA pages inquiry status: %m");
690
691 /* Count number of NUMA nodes used for this shared memory entry */
692 memset(nodes, 0, sizeof(Size) * (max_nodes + 1));
693
694 for (i = 0; i < shm_ent_page_count; i++)
695 {
696 int s = pages_status[i];
697
698 /* Ensure we are adding only valid index to the array */
699 if (s < 0 || s > max_nodes)
700 {
701 elog(ERROR, "invalid NUMA node id outside of allowed range "
702 "[0, " UINT64_FORMAT "]: %d", max_nodes, s);
703 }
704
705 nodes[s]++;
706 }
707
708 /*
709 * Add one entry for each NUMA node, including those without allocated
710 * memory for this segment.
711 */
712 for (i = 0; i <= max_nodes; i++)
713 {
714 values[0] = CStringGetTextDatum(ent->key);
715 values[1] = Int32GetDatum(i);
716 values[2] = Int64GetDatum(nodes[i] * os_page_size);
717
718 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
719 values, nulls);
720 }
721 }
722
723 LWLockRelease(ShmemIndexLock);
724 firstNumaTouch = false;
725
726 return (Datum) 0;
727}
728
729/*
730 * Determine the memory page size used for the shared memory segment.
731 *
732 * If the shared segment was allocated using huge pages, returns the size of
733 * a huge page. Otherwise returns the size of regular memory page.
734 *
735 * This should be used only after the server is started.
736 */
737Size
739{
740 Size os_page_size;
741#ifdef WIN32
742 SYSTEM_INFO sysinfo;
743
744 GetSystemInfo(&sysinfo);
745 os_page_size = sysinfo.dwPageSize;
746#else
747 os_page_size = sysconf(_SC_PAGESIZE);
748#endif
749
752
754 GetHugePageSize(&os_page_size, NULL);
755
756 return os_page_size;
757}
758
759Datum
761{
763}
static Datum values[MAXATTR]
Definition: bootstrap.c:153
#define CStringGetTextDatum(s)
Definition: builtins.h:97
#define CACHELINEALIGN(LEN)
Definition: c.h:818
#define MAXALIGN(LEN)
Definition: c.h:815
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:808
int64_t int64
Definition: c.h:540
#define UINT64_FORMAT
Definition: c.h:562
uint64_t uint64
Definition: c.h:544
size_t Size
Definition: c.h:615
#define TYPEALIGN_DOWN(ALIGNVAL, LEN)
Definition: c.h:820
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:358
Size hash_get_shared_size(HASHCTL *info, int flags)
Definition: dynahash.c:854
int64 hash_select_dirsize(int64 num_entries)
Definition: dynahash.c:830
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1415
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
int errcode(int sqlerrcode)
Definition: elog.c:863
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
#define PG_RETURN_BOOL(x)
Definition: fmgr.h:359
void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags)
Definition: funcapi.c:76
bool IsUnderPostmaster
Definition: globals.c:120
int huge_pages_status
Definition: guc_tables.c:582
Assert(PointerIsAligned(start, uint64))
#define HASH_STRINGS
Definition: hsearch.h:96
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER_NULL
Definition: hsearch.h:116
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_ALLOC
Definition: hsearch.h:101
#define HASH_DIRSIZE
Definition: hsearch.h:94
#define HASH_ATTACH
Definition: hsearch.h:104
#define HASH_SHARED_MEM
Definition: hsearch.h:103
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1174
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1894
@ LW_SHARED
Definition: lwlock.h:113
@ LW_EXCLUSIVE
Definition: lwlock.h:112
void * palloc0(Size size)
Definition: mcxt.c:1395
void * palloc(Size size)
Definition: mcxt.c:1365
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
PGDLLIMPORT int pg_numa_get_max_node(void)
Definition: pg_numa.c:136
#define pg_numa_touch_mem_if_required(ptr)
Definition: pg_numa.h:37
PGDLLIMPORT int pg_numa_query_pages(int pid, unsigned long count, void **pages, int *status)
Definition: pg_numa.c:130
PGDLLIMPORT int pg_numa_init(void)
Definition: pg_numa.c:123
@ HUGE_PAGES_UNKNOWN
Definition: pg_shmem.h:56
@ HUGE_PAGES_ON
Definition: pg_shmem.h:54
static Datum Int64GetDatum(int64 X)
Definition: postgres.h:403
uint64_t Datum
Definition: postgres.h:70
static Datum Int32GetDatum(int32 X)
Definition: postgres.h:222
char * s1
char * s2
bool ShmemAddrIsValid(const void *addr)
Definition: shmem.c:275
Datum pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
Definition: shmem.c:585
Datum pg_numa_available(PG_FUNCTION_ARGS)
Definition: shmem.c:760
static void * ShmemBase
Definition: shmem.c:85
Datum pg_get_shmem_allocations(PG_FUNCTION_ARGS)
Definition: shmem.c:528
void InitShmemIndex(void)
Definition: shmem.c:284
static void * ShmemEnd
Definition: shmem.c:87
void InitShmemAccess(PGShmemHeader *seghdr)
Definition: shmem.c:103
Size add_size(Size s1, Size s2)
Definition: shmem.c:494
Size pg_get_shmem_pagesize(void)
Definition: shmem.c:738
#define PG_GET_SHMEM_NUMA_SIZES_COLS
void * ShmemAllocNoError(Size size)
Definition: shmem.c:173
Size mul_size(Size s1, Size s2)
Definition: shmem.c:511
void * ShmemAlloc(Size size)
Definition: shmem.c:153
slock_t * ShmemLock
Definition: shmem.c:89
HTAB * ShmemInitHash(const char *name, int64 init_size, int64 max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:333
#define PG_GET_SHMEM_SIZES_COLS
void InitShmemAllocation(void)
Definition: shmem.c:116
static PGShmemHeader * ShmemSegHdr
Definition: shmem.c:83
static void * ShmemAllocRaw(Size size, Size *allocated_size)
Definition: shmem.c:187
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:388
static HTAB * ShmemIndex
Definition: shmem.c:92
static void * ShmemAllocUnlocked(Size size)
Definition: shmem.c:239
static bool firstNumaTouch
Definition: shmem.c:95
#define SHMEM_INDEX_SIZE
Definition: shmem.h:53
#define SHMEM_INDEX_KEYSIZE
Definition: shmem.h:51
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
HashAllocFunc alloc
Definition: hsearch.h:84
Size keysize
Definition: hsearch.h:75
int64 max_dsize
Definition: hsearch.h:73
Size entrysize
Definition: hsearch.h:76
HASHHDR * hctl
Definition: hsearch.h:88
int64 dsize
Definition: hsearch.h:72
Definition: dynahash.c:222
Size freeoffset
Definition: pg_shmem.h:35
void * index
Definition: pg_shmem.h:37
Size totalsize
Definition: pg_shmem.h:34
TupleDesc setDesc
Definition: execnodes.h:364
Tuplestorestate * setResult
Definition: execnodes.h:363
void * location
Definition: shmem.h:59
Size size
Definition: shmem.h:60
Size allocated_size
Definition: shmem.h:61
void GetHugePageSize(Size *hugepagesize, int *mmap_flags)
Definition: sysv_shmem.c:479
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, const Datum *values, const bool *isnull)
Definition: tuplestore.c:784
const char * name