2 * See the file LICENSE for redistribution information.
4 * Copyright (c) 1996, 1997
5 * Sleepycat Software. All rights reserved.
10 static const char sccsid[] = "@(#)mp_sync.c 10.15 (Sleepycat) 11/1/97";
13 #ifndef NO_SYSTEM_INCLUDES
14 #include <sys/types.h>
25 #include "common_ext.h"
27 static int __bhcmp __P((const void *, const void *));
31 * Mpool sync function.
42 int ar_cnt, cnt, nalloc, next, notused, ret, wrote;
46 if (dbenv->lg_info == NULL) {
47 __db_err(dbenv, "memp_sync: requires logging");
52 * We try and write the buffers in page order so that the underlying
53 * filesystem doesn't have to seek and can write contiguous blocks,
54 * plus, we don't want to hold the region lock while we write the
55 * buffers. Get memory to hold the buffer pointers. Get a good-size
56 * block, too, because we realloc while holding the region lock if we
60 (BH **)__db_malloc((nalloc = 1024) * sizeof(BH *))) == NULL)
66 * If the application is asking about a previous call to memp_sync(),
67 * and we haven't found any buffers that the application holding the
68 * pin couldn't write, return yes or no based on the current count.
69 * Note, if the application is asking about a LSN *smaller* than one
70 * we've already handled or are currently handling, then we return a
71 * result based on the count for the larger LSN.
74 if (!F_ISSET(mp, MP_LSN_RETRY) && log_compare(lsnp, &mp->lsn) <= 0) {
75 if (mp->lsn_cnt == 0) {
83 /* Else, it's a new checkpoint. */
84 F_CLR(mp, MP_LSN_RETRY);
87 * Save the LSN. We know that it's a new LSN or larger than the one
88 * for which we were already doing a checkpoint. (BTW, I don't expect
89 * to see multiple LSN's from the same or multiple processes, but You
90 * Just Never Know. Responding as if they all called with the largest
91 * of the LSNs specified makes everything work.)
93 * We don't currently use the LSN we save. We could potentially save
94 * the last-written LSN in each buffer header and use it to determine
95 * what buffers need to be written. The problem with this is that it's
96 * sizeof(LSN) more bytes of buffer header. We currently write all the
97 * dirty buffers instead.
99 * Walk the list of shared memory segments clearing the count of
100 * buffers waiting to be written.
104 for (mfp = SH_TAILQ_FIRST(&dbmp->mp->mpfq, __mpoolfile);
105 mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
109 * Walk the list of buffers and mark all dirty buffers to be written
110 * and all pinned buffers to be potentially written (we can't know if
111 * we'll need to write them until the holding process returns them to
112 * the cache). We do this in one pass while holding the region locked
113 * so that processes can't make new buffers dirty, causing us to never
114 * finish. Since the application may have restarted the sync, clear
115 * any BH_WRITE flags that appear to be left over from previous calls.
117 * Keep a count of the total number of buffers we need to write in
118 * MPOOL->lsn_cnt, and for each file, in MPOOLFILE->lsn_count.
121 for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
122 bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh))
123 if (F_ISSET(bhp, BH_DIRTY) || bhp->ref != 0) {
124 F_SET(bhp, BH_WRITE);
128 mfp = R_ADDR(dbmp, bhp->mf_offset);
132 * If the buffer isn't in use, we should be able to
133 * write it immediately, so save a reference to it.
136 if (ar_cnt == nalloc) {
139 (BH **)__db_realloc(bharray,
140 nalloc * sizeof(BH *))) == NULL) {
145 bharray[ar_cnt++] = bhp;
148 F_CLR(bhp, BH_WRITE);
150 /* If there no buffers we can write immediately, we're done. */
152 ret = mp->lsn_cnt ? DB_INCOMPLETE : 0;
156 /* Lock down the buffers and their contents. */
157 for (cnt = 0; cnt < ar_cnt; ++cnt)
162 /* Sort the buffers we're going to write. */
163 qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
167 /* Walk the array, writing buffers. */
168 for (next = 0; next < ar_cnt; ++next) {
170 * It's possible for a thread to have gotten the buffer since
171 * we listed it for writing. If the reference count is still
172 * 1, we're the only ones using the buffer, go ahead and write.
173 * If it's >1, then skip the buffer and assume that it will be
174 * written when it's returned to the cache.
176 if (bharray[next]->ref > 1) {
177 --bharray[next]->ref;
181 /* Write the buffer. */
182 mfp = R_ADDR(dbmp, bharray[next]->mf_offset);
184 __memp_bhwrite(dbmp, mfp, bharray[next], ¬used, &wrote);
186 /* Release the buffer. */
187 --bharray[next]->ref;
189 /* If there's an error, release the rest of the buffers. */
190 if (ret != 0 || !wrote) {
191 while (++next < ar_cnt)
192 --bharray[next]->ref;
198 * Any process syncing the shared memory buffer pool
199 * had better be able to write to any underlying file.
200 * Be understanding, but firm, on this point.
203 __db_err(dbenv, "%s: unable to flush page: %lu",
204 R_ADDR(dbmp, mfp->path_off),
205 (u_long)bharray[next]->pgno);
211 ret = mp->lsn_cnt ? DB_INCOMPLETE : 0;
217 * MPOOL->lsn_cnt (the total sync count)
218 * MPOOLFILE->lsn_cnt (the per-file sync count)
219 * BH_WRITE flag (the scheduled for writing flag)
222 for (mfp = SH_TAILQ_FIRST(&dbmp->mp->mpfq, __mpoolfile);
223 mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
225 for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
226 bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh))
227 F_CLR(bhp, BH_WRITE);
236 * Mpool file sync function.
245 int ar_cnt, cnt, nalloc, next, pincnt, notused, ret, wrote;
248 * If this handle doesn't have a file descriptor that's open for
249 * writing, or if the file is a temporary, there's no reason to
252 if (F_ISSET(dbmfp, MP_READONLY | MP_PATH_TEMP))
257 mf_offset = R_OFFSET(dbmp, dbmfp->mfp);
260 * We try and write the buffers in page order so that the underlying
261 * filesystem doesn't have to seek and can write contiguous blocks,
262 * plus, we don't want to hold the region lock while we write the
263 * buffers. Get memory to hold the buffer pointers. Get a good-size
264 * block, too, because we realloc while holding the region lock if we
269 (BH **)__db_malloc((size_t)nalloc * sizeof(BH *))) == NULL)
275 * Walk the LRU list of buffer headers, and get a list of buffers to
276 * write for this MPOOLFILE.
279 for (bhp = SH_TAILQ_FIRST(&dbmp->mp->bhq, __bh);
280 bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
281 if (!F_ISSET(bhp, BH_DIRTY) || bhp->mf_offset != mf_offset)
283 if (bhp->ref != 0 || F_ISSET(bhp, BH_LOCKED)) {
288 if (ar_cnt == nalloc) {
290 if ((bharray = (BH **)__db_realloc(bharray,
291 nalloc * sizeof(BH *))) == NULL) {
297 bharray[ar_cnt++] = bhp;
300 /* Lock down the buffers and their contents. */
301 for (cnt = 0; cnt < ar_cnt; ++cnt)
306 /* Sort the buffers we're going to write. */
307 qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
311 /* Walk the array, writing buffers. */
312 for (next = 0; next < ar_cnt; ++next) {
314 * It's possible for a thread to have gotten the buffer since
315 * we listed it for writing. If the reference count is still
316 * 1, we're the only ones using the buffer, go ahead and write.
317 * If it's >1, then skip the buffer and assume that it will be
318 * written when it's returned to the cache.
320 if (bharray[next]->ref > 1) {
323 --bharray[next]->ref;
327 /* Write the buffer. */
328 ret = __memp_pgwrite(dbmfp, bharray[next], ¬used, &wrote);
330 /* Release the buffer. */
331 --bharray[next]->ref;
333 /* If there's an error, release the rest of the buffers. */
335 while (++next < ar_cnt)
336 --bharray[next]->ref;
343 err: UNLOCKREGION(dbmp);
348 * Sync the underlying file as the last thing we do, so that the OS
349 * has maximal opportunity to flush buffers before we request it.
352 * Don't lock the region around the sync, fsync(2) has no atomicity
356 return (pincnt == 0 ? __db_fsync(dbmfp->fd) : DB_INCOMPLETE);
363 * Keep a specified percentage of the buffers clean.
366 memp_trickle(dbmp, pct, nwrotep)
374 int notused, ret, wrote;
380 if (pct < 1 || pct > 100)
386 * If there are sufficient clean buffers, or no buffers or no dirty
387 * buffers, we're done.
390 * Using st_page_clean and st_page_dirty is our only choice at the
391 * moment, but it's not as correct as we might like in the presence
392 * of pools with more than one buffer size, as a free 512-byte buffer
393 * isn't the same as a free 8K buffer.
395 loop: total = mp->stat.st_page_clean + mp->stat.st_page_dirty;
396 if (total == 0 || mp->stat.st_page_dirty == 0 ||
397 (mp->stat.st_page_clean * 100) / total >= (u_long)pct) {
402 /* Loop until we write a buffer. */
403 for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
404 bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
406 !F_ISSET(bhp, BH_DIRTY) || F_ISSET(bhp, BH_LOCKED))
409 mfp = R_ADDR(dbmp, bhp->mf_offset);
411 __memp_bhwrite(dbmp, mfp, bhp, ¬used, &wrote)) != 0)
415 * Any process syncing the shared memory buffer pool
416 * had better be able to write to any underlying file.
417 * Be understanding, but firm, on this point.
420 __db_err(dbmp->dbenv, "%s: unable to flush page: %lu",
421 R_ADDR(dbmp, mfp->path_off), (u_long)bhp->pgno);
426 ++mp->stat.st_page_trickle;
432 /* No more buffers to write. */
435 err: UNLOCKREGION(dbmp);
448 /* Sort by file (shared memory pool offset). */
449 if (bhp1->mf_offset < bhp2->mf_offset)
451 if (bhp1->mf_offset > bhp2->mf_offset)
454 /* Sort by page in file. */
455 return (bhp1->pgno < bhp2->pgno ? -1 : 1);