2 * See the file LICENSE for redistribution information.
4 * Copyright (c) 1996, 1997, 1998
5 * Sleepycat Software. All rights reserved.
8 * Copyright (c) 1990, 1993, 1994, 1995, 1996
9 * Keith Bostic. All rights reserved.
12 * Copyright (c) 1990, 1993, 1994, 1995
13 * The Regents of the University of California. All rights reserved.
15 * This code is derived from software contributed to Berkeley by
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. All advertising materials mentioning features or use of this software
27 * must display the following acknowledgement:
28 * This product includes software developed by the University of
29 * California, Berkeley and its contributors.
30 * 4. Neither the name of the University nor the names of its contributors
31 * may be used to endorse or promote products derived from this software
32 * without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 static const char sccsid[] = "@(#)bt_delete.c 10.31 (Sleepycat) 5/6/98";
53 #ifndef NO_SYSTEM_INCLUDES
54 #include <sys/types.h>
63 static int __bam_dpages __P((DB *, BTREE *));
67 * Delete the items referenced by a key.
69 * PUBLIC: int __bam_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
72 __bam_delete(argdbp, txn, key, flags)
81 db_indx_t cnt, i, indx;
82 int dpage, exact, ret, stack;
84 DEBUG_LWRITE(argdbp, txn, "bam_delete", key, NULL, flags);
88 /* Check for invalid flags. */
89 if ((ret = __db_delchk(argdbp,
90 key, flags, F_ISSET(argdbp, DB_AM_RDONLY))) != 0)
93 GETHANDLE(argdbp, txn, &dbp, ret);
96 /* Search the tree for the key; delete only deletes exact matches. */
97 if ((ret = __bam_search(dbp, key, S_DELETE, 1, NULL, &exact)) != 0)
101 indx = t->bt_csp->indx;
103 /* Delete the key/data pair, including any on-or-off page duplicates. */
104 for (cnt = 1, i = indx;; ++cnt)
105 if ((i += P_INDX) >= NUM_ENT(h) || h->inp[i] != h->inp[indx])
107 for (; cnt > 0; --cnt, ++t->lstat.bt_deleted)
108 if (__bam_ca_delete(dbp, h->pgno, indx, NULL, 1) == 0) {
111 * Delete the key item first, otherwise the duplicate
112 * checks in __bam_ditem() won't work!
114 if ((ret = __bam_ditem(dbp, h, indx)) != 0)
116 if ((ret = __bam_ditem(dbp, h, indx)) != 0)
119 B_DSET(GET_BKEYDATA(h, indx + O_INDX)->type);
123 /* If we're using record numbers, update internal page record counts. */
124 if (F_ISSET(dbp, DB_BT_RECNUM) && (ret = __bam_adjust(dbp, t, -1)) != 0)
127 /* If the page is now empty, delete it. */
128 dpage = NUM_ENT(h) == 0 && h->pgno != PGNO_ROOT;
133 ret = dpage ? __bam_dpage(dbp, key) : 0;
143 * Delete the items referenced by a key.
145 * PUBLIC: int __ram_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
148 __ram_delete(argdbp, txn, key, flags)
161 int exact, ret, stack;
165 /* Check for invalid flags. */
166 if ((ret = __db_delchk(argdbp,
167 key, flags, F_ISSET(argdbp, DB_AM_RDONLY))) != 0)
170 GETHANDLE(argdbp, txn, &dbp, ret);
173 /* Check the user's record number and fill in as necessary. */
174 if ((ret = __ram_getno(argdbp, key, &recno, 0)) != 0)
177 /* Search the tree for the key; delete only deletes exact matches. */
178 if ((ret = __bam_rsearch(dbp, &recno, S_DELETE, 1, &exact)) != 0)
186 indx = t->bt_csp->indx;
189 /* If the record has already been deleted, we couldn't have found it. */
190 if (B_DISSET(GET_BKEYDATA(h, indx)->type)) {
196 * If we're not renumbering records, replace the record with a marker
199 if (!F_ISSET(dbp, DB_RE_RENUMBER)) {
200 if ((ret = __bam_ditem(dbp, h, indx)) != 0)
203 B_TSET(bk.type, B_KEYDATA, 1);
205 memset(&hdr, 0, sizeof(hdr));
207 hdr.size = SSZA(BKEYDATA, data);
208 memset(&data, 0, sizeof(data));
209 data.data = (char *)"";
211 if ((ret = __db_pitem(dbp,
212 h, indx, BKEYDATA_SIZE(0), &hdr, &data)) != 0)
215 ++t->lstat.bt_deleted;
219 /* Delete the item. */
220 if ((ret = __bam_ditem(dbp, h, indx)) != 0)
223 ++t->lstat.bt_deleted;
224 if (t->bt_recno != NULL)
225 F_SET(t->bt_recno, RECNO_MODIFIED);
227 /* Adjust the counts. */
228 __bam_adjust(dbp, t, -1);
230 /* Adjust the cursors. */
231 __ram_ca(dbp, recno, CA_DELETE);
234 * If the page is now empty, delete it -- we have the whole tree
235 * locked, so there are no preparations to make. Else, release
238 if (NUM_ENT(h) == 0 && h->pgno != PGNO_ROOT) {
240 ret = __bam_dpages(dbp, t);
253 * Delete one or more entries from a page.
255 * PUBLIC: int __bam_ditem __P((DB *, PAGE *, u_int32_t));
258 __bam_ditem(dbp, h, indx)
271 bi = GET_BINTERNAL(h, indx);
272 switch (B_TYPE(bi->type)) {
275 nbytes = BINTERNAL_SIZE(bi->len);
276 bo = (BOVERFLOW *)bi->data;
279 nbytes = BINTERNAL_SIZE(bi->len);
282 return (__db_pgfmt(dbp, h->pgno));
286 nbytes = RINTERNAL_SIZE;
290 * If it's a duplicate key, discard the index and don't touch
291 * the actual page item.
294 * This works because no data item can have an index matching
295 * any other index so even if the data item is in a key "slot",
296 * it won't match any other index.
298 if ((indx % 2) == 0) {
300 * Check for a duplicate after us on the page. NOTE:
301 * we have to delete the key item before deleting the
302 * data item, otherwise the "indx + P_INDX" calculation
305 if (indx + P_INDX < (u_int32_t)NUM_ENT(h) &&
306 h->inp[indx] == h->inp[indx + P_INDX])
307 return (__bam_adjindx(dbp,
308 h, indx, indx + O_INDX, 0));
310 * Check for a duplicate before us on the page. It
311 * doesn't matter if we delete the key item before or
312 * after the data item for the purposes of this one.
314 if (indx > 0 && h->inp[indx] == h->inp[indx - P_INDX])
315 return (__bam_adjindx(dbp,
316 h, indx, indx - P_INDX, 0));
320 bk = GET_BKEYDATA(h, indx);
321 switch (B_TYPE(bk->type)) {
324 nbytes = BOVERFLOW_SIZE;
325 bo = GET_BOVERFLOW(h, indx);
327 offpage: /* Delete duplicate/offpage chains. */
328 if (B_TYPE(bo->type) == B_DUPLICATE) {
330 __db_ddup(dbp, bo->pgno, __bam_free)) != 0)
334 __db_doff(dbp, bo->pgno, __bam_free)) != 0)
338 nbytes = BKEYDATA_SIZE(bk->len);
341 return (__db_pgfmt(dbp, h->pgno));
345 return (__db_pgfmt(dbp, h->pgno));
348 /* Delete the item. */
349 if ((ret = __db_ditem(dbp, h, indx, nbytes)) != 0)
352 /* Mark the page dirty. */
353 return (memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY));
358 * Adjust an index on the page.
360 * PUBLIC: int __bam_adjindx __P((DB *, PAGE *, u_int32_t, u_int32_t, int));
363 __bam_adjindx(dbp, h, indx, indx_copy, is_insert)
366 u_int32_t indx, indx_copy;
372 /* Log the change. */
373 if (DB_LOGGING(dbp) &&
374 (ret = __bam_adj_log(dbp->dbenv->lg_info, dbp->txn, &LSN(h),
375 0, dbp->log_fileid, PGNO(h), &LSN(h), indx, indx_copy,
376 (u_int32_t)is_insert)) != 0)
380 copy = h->inp[indx_copy];
381 if (indx != NUM_ENT(h))
382 memmove(&h->inp[indx + O_INDX], &h->inp[indx],
383 sizeof(db_indx_t) * (NUM_ENT(h) - indx));
388 if (indx != NUM_ENT(h))
389 memmove(&h->inp[indx], &h->inp[indx + O_INDX],
390 sizeof(db_indx_t) * (NUM_ENT(h) - indx));
393 /* Mark the page dirty. */
394 ret = memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY);
396 /* Adjust the cursors. */
397 __bam_ca_di(dbp, h->pgno, indx, is_insert ? 1 : -1);
403 * Delete a page from the tree.
405 * PUBLIC: int __bam_dpage __P((DB *, const DBT *));
408 __bam_dpage(dbp, key)
416 int level; /* !!!: has to hold number of tree levels. */
423 * The locking protocol is that we acquire locks by walking down the
424 * tree, to avoid the obvious deadlocks.
426 * Call __bam_search to reacquire the empty leaf page, but this time
427 * get both the leaf page and it's parent, locked. Walk back up the
428 * tree, until we have the top pair of pages that we want to delete.
429 * Once we have the top page that we want to delete locked, lock the
430 * underlying pages and check to make sure they're still empty. If
431 * they are, delete them.
433 for (level = LEAFLEVEL;; ++level) {
434 /* Acquire a page and its parent, locked. */
436 __bam_search(dbp, key, S_WRPAIR, level, NULL, &exact)) != 0)
440 * If we reach the root or the page isn't going to be empty
441 * when we delete one record, quit.
443 h = t->bt_csp[-1].page;
444 if (h->pgno == PGNO_ROOT || NUM_ENT(h) != 1)
447 /* Release the two locked pages. */
448 (void)memp_fput(dbp->mpf, t->bt_csp[-1].page, 0);
449 (void)__BT_TLPUT(dbp, t->bt_csp[-1].lock);
450 (void)memp_fput(dbp->mpf, t->bt_csp[0].page, 0);
451 (void)__BT_TLPUT(dbp, t->bt_csp[0].lock);
455 * Leave the stack pointer one after the last entry, we may be about
456 * to push more items on the stack.
461 * t->bt_csp[-2].page is the top page, which we're not going to delete,
462 * and t->bt_csp[-1].page is the first page we are going to delete.
464 * Walk down the chain, acquiring the rest of the pages until we've
465 * retrieved the leaf page. If we find any pages that aren't going
466 * to be emptied by the delete, someone else added something while we
467 * were walking the tree, and we discontinue the delete.
469 for (h = t->bt_csp[-1].page;;) {
479 * Get the next page, write lock it and push it onto the stack.
480 * We know it's index 0, because it can only have one element.
482 pgno = TYPE(h) == P_IBTREE ?
483 GET_BINTERNAL(h, 0)->pgno : GET_RINTERNAL(h, 0)->pgno;
485 if ((ret = __bam_lget(dbp, 0, pgno, DB_LOCK_WRITE, &lock)) != 0)
487 if ((ret = __bam_pget(dbp, &h, &pgno, 0)) != 0)
489 BT_STK_PUSH(t, h, 0, lock, ret);
495 return (__bam_dpages(dbp, t));
498 /* Discard any locked pages and return. */
506 * Delete a set of locked pages.
527 * There is an interesting deadlock situation here. We have to relink
528 * the leaf page chain around the leaf page being deleted. Consider
529 * a cursor walking through the leaf pages, that has the previous page
530 * read-locked and is waiting on a lock for the page we're deleting.
531 * It will deadlock here. This is a problem, because if our process is
532 * selected to resolve the deadlock, we'll leave an empty leaf page
533 * that we can never again access by walking down the tree. So, before
534 * we unlink the subtree, we relink the leaf page chain.
536 if ((ret = __db_relink(dbp, t->bt_csp->page, NULL, 1)) != 0)
540 * We have the entire stack of deletable pages locked. Start from the
541 * top of the tree and move to the bottom, as it's better to release
542 * the inner pages as soon as possible.
544 if ((ret = __bam_ditem(dbp, epg->page, epg->indx)) != 0)
548 * If we just deleted the last or next-to-last item from the root page,
549 * the tree can collapse a level. Write lock the last page referenced
550 * by the root page and copy it over the root page. If we can't get a
551 * write lock, that's okay, the tree just remains a level deeper than
555 if (h->pgno == PGNO_ROOT && NUM_ENT(h) <= 1) {
556 pgno = TYPE(epg->page) == P_IBTREE ?
557 GET_BINTERNAL(epg->page, 0)->pgno :
558 GET_RINTERNAL(epg->page, 0)->pgno;
559 if ((ret = __bam_lget(dbp, 0, pgno, DB_LOCK_WRITE, &lock)) != 0)
561 if ((ret = __bam_pget(dbp, &h, &pgno, 0)) != 0)
564 /* Log the change. */
565 if (DB_LOGGING(dbp)) {
566 memset(&a, 0, sizeof(a));
568 a.size = dbp->pgsize;
569 memset(&b, 0, sizeof(b));
570 b.data = P_ENTRY(epg->page, 0);
571 b.size = BINTERNAL_SIZE(((BINTERNAL *)b.data)->len);
572 __bam_rsplit_log(dbp->dbenv->lg_info, dbp->txn,
573 &h->lsn, 0, dbp->log_fileid, h->pgno, &a,
574 RE_NREC(epg->page), &b, &epg->page->lsn);
580 * One fixup -- if the tree has record numbers and we're not
581 * converting to a leaf page, we have to preserve the total
584 if (TYPE(h) == P_IRECNO ||
585 (TYPE(h) == P_IBTREE && F_ISSET(dbp, DB_BT_RECNUM)))
586 rcnt = RE_NREC(epg->page);
587 memcpy(epg->page, h, dbp->pgsize);
588 epg->page->pgno = PGNO_ROOT;
589 if (TYPE(h) == P_IRECNO ||
590 (TYPE(h) == P_IBTREE && F_ISSET(dbp, DB_BT_RECNUM)))
591 RE_NREC_SET(epg->page, rcnt);
592 (void)memp_fset(dbp->mpf, epg->page, DB_MPOOL_DIRTY);
595 * Free the page copied onto the root page and discard its
596 * lock. (The call to __bam_free() discards our reference
599 * It's possible that the reverse split we're doing involves
600 * pages from the stack of pages we're deleting. Don't free
603 if (h->pgno == (epg + 1)->page->pgno)
604 (void)memp_fput(dbp->mpf, h, 0);
606 (void)__bam_free(dbp, h);
609 (void)__BT_TLPUT(dbp, lock);
611 /* Adjust the cursors. */
612 __bam_ca_move(dbp, h->pgno, PGNO_ROOT);
615 /* Release the top page in the subtree. */
616 (void)memp_fput(dbp->mpf, epg->page, 0);
617 (void)__BT_TLPUT(dbp, epg->lock);
620 * Free the rest of the pages.
623 * Don't bother checking for errors. We've unlinked the subtree from
624 * the tree, and there's no possibility of recovery.
626 while (++epg <= t->bt_csp) {
629 * Why do we need to do this? Isn't the page already empty?
631 if (NUM_ENT(epg->page) != 0)
632 (void)__bam_ditem(dbp, epg->page, epg->indx);
634 (void)__bam_free(dbp, epg->page);
635 (void)__BT_TLPUT(dbp, epg->lock);
641 /* Discard any remaining pages and return. */
642 for (; epg <= t->bt_csp; ++epg) {
643 (void)memp_fput(dbp->mpf, epg->page, 0);
644 (void)__BT_TLPUT(dbp, epg->lock);