2 * See the file LICENSE for redistribution information.
4 * Copyright (c) 1996, 1997
5 * Sleepycat Software. All rights reserved.
11 static const char copyright[] =
12 "@(#) Copyright (c) 1997\n\
13 Sleepycat Software Inc. All rights reserved.\n";
14 static const char sccsid[] = "@(#)lock_deadlock.c 10.26 (Sleepycat) 11/25/97";
17 #ifndef NO_SYSTEM_INCLUDES
18 #include <sys/types.h>
29 #include "common_ext.h"
31 #define ISSET_MAP(M, N) (M[(N) / 32] & (1 << (N) % 32))
33 #define CLEAR_MAP(M, N) { \
35 for (__i = 0; __i < (N); __i++) \
39 #define SET_MAP(M, B) (M[(B) / 32] |= (1 << ((B) % 32)))
40 #define CLR_MAP(M, B) (M[(B) / 32] &= ~(1 << ((B) % 32)))
42 #define OR_MAP(D, S, N) { \
44 for (__i = 0; __i < (N); __i++) \
47 #define BAD_KILLID 0xffffffff
56 static int __dd_abort __P((DB_ENV *, locker_info *));
58 __P((DB_ENV *, u_int32_t **, u_int32_t *, locker_info **));
60 *__dd_find __P((u_int32_t *, locker_info *, u_int32_t));
63 static void __dd_debug __P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t));
67 lock_detect(lt, flags, atype)
73 u_int32_t *bitmap, *deadlock, i, killid, nentries, nlockers;
76 /* Validate arguments. */
78 __db_fchk(lt->dbenv, "lock_detect", flags, DB_LOCK_CONFLICT)) != 0)
81 /* Check if a detector run is necessary. */
83 if (LF_ISSET(DB_LOCK_CONFLICT)) {
84 /* Make a pass every time a lock waits. */
86 do_pass = dbenv->lk_info->region->need_dd != 0;
87 UNLOCK_LOCKREGION(lt);
93 /* Build the waits-for bitmap. */
94 if ((ret = __dd_build(dbenv, &bitmap, &nlockers, &idmap)) != 0)
100 if (dbenv->db_verbose != 0)
101 __dd_debug(dbenv, idmap, bitmap, nlockers);
103 /* Find a deadlock. */
104 deadlock = __dd_find(bitmap, idmap, nlockers);
105 nentries = ALIGN(nlockers, 32) / 32;
107 if (deadlock != NULL) {
112 * Find the first bit set in the current
113 * array and then look for a lower tid in
116 for (i = 0; i < nlockers; i++)
117 if (ISSET_MAP(deadlock, i))
120 if (killid == BAD_KILLID) {
122 "warning: could not find locker to abort");
127 * The oldest transaction has the lowest
130 for (i = killid + 1; i < nlockers; i++)
131 if (ISSET_MAP(deadlock, i) &&
132 idmap[i].id < idmap[killid].id)
135 case DB_LOCK_DEFAULT:
138 * We are trying to calculate the id of the
139 * locker whose entry is indicated by deadlock.
141 killid = (deadlock - bitmap) / nentries;
143 case DB_LOCK_YOUNGEST:
145 * Find the first bit set in the current
146 * array and then look for a lower tid in
149 for (i = 0; i < nlockers; i++)
150 if (ISSET_MAP(deadlock, i))
153 if (killid == BAD_KILLID) {
155 "warning: could not find locker to abort");
159 * The youngest transaction has the highest
162 for (i = killid + 1; i < nlockers; i++)
163 if (ISSET_MAP(deadlock, i) &&
164 idmap[i].id > idmap[killid].id)
172 /* Kill the locker with lockid idmap[killid]. */
173 if (dbenv->db_verbose != 0 && killid != BAD_KILLID)
174 __db_err(dbenv, "Aborting locker %lx",
175 (u_long)idmap[killid].id);
177 if (killid != BAD_KILLID &&
178 (ret = __dd_abort(dbenv, &idmap[killid])) != 0)
180 "warning: unable to abort locker %lx",
181 (u_long)idmap[killid].id);
190 * ========================================================================
194 __dd_build(dbenv, bmp, nlockers, idmap)
196 u_int32_t **bmp, *nlockers;
199 struct __db_lock *lp;
201 DB_LOCKOBJ *op, *lo, *lockerp;
203 locker_info *id_array;
204 u_int32_t *bitmap, count, *entryp, i, id, nentries, *tmpmap;
210 * We'll check how many lockers there are, add a few more in for
211 * good measure and then allocate all the structures. Then we'll
212 * verify that we have enough room when we go back in and get the
213 * mutex the second time.
216 retry: count = lt->region->nlockers;
217 lt->region->need_dd = 0;
218 UNLOCK_LOCKREGION(lt);
225 if (dbenv->db_verbose)
226 __db_err(dbenv, "%lu lockers", (u_long)count);
229 nentries = ALIGN(count, 32) / 32;
231 * Allocate enough space for a count by count bitmap matrix.
234 * We can probably save the malloc's between iterations just
235 * reallocing if necessary because count grew by too much.
237 if ((bitmap = (u_int32_t *)__db_calloc((size_t)count,
238 sizeof(u_int32_t) * nentries)) == NULL) {
239 __db_err(dbenv, "%s", strerror(ENOMEM));
244 (u_int32_t *)__db_calloc(sizeof(u_int32_t), nentries)) == NULL) {
245 __db_err(dbenv, "%s", strerror(ENOMEM));
250 if ((id_array = (locker_info *)__db_calloc((size_t)count,
251 sizeof(locker_info))) == NULL) {
252 __db_err(dbenv, "%s", strerror(ENOMEM));
259 * Now go back in and actually fill in the matrix.
262 if (lt->region->nlockers > count) {
270 * First we go through and assign each locker a deadlock detector id.
271 * Note that we fill in the idmap in the next loop since that's the
272 * only place where we conveniently have both the deadlock id and the
275 for (id = 0, i = 0; i < lt->region->table_size; i++)
276 for (op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj);
277 op != NULL; op = SH_TAILQ_NEXT(op, links, __db_lockobj))
278 if (op->type == DB_LOCK_LOCKER)
281 * We go through the hash table and find each object. For each object,
282 * we traverse the waiters list and add an entry in the waitsfor matrix
283 * for each waiter/holder combination.
285 for (i = 0; i < lt->region->table_size; i++) {
286 for (op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj);
287 op != NULL; op = SH_TAILQ_NEXT(op, links, __db_lockobj)) {
288 if (op->type != DB_LOCK_OBJTYPE)
290 CLEAR_MAP(tmpmap, nentries);
293 * First we go through and create a bit map that
294 * represents all the holders of this object.
296 for (lp = SH_TAILQ_FIRST(&op->holders, __db_lock);
298 lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
299 if (__lock_getobj(lt, lp->holder,
300 NULL, DB_LOCK_LOCKER, &lockerp) != 0) {
302 "warning unable to find object");
305 id_array[lockerp->dd_id].id = lp->holder;
306 id_array[lockerp->dd_id].valid = 1;
309 * If the holder has already been aborted, then
310 * we should ignore it for now.
312 if (lp->status == DB_LSTAT_HELD)
313 SET_MAP(tmpmap, lockerp->dd_id);
317 * Next, for each waiter, we set its row in the matrix
318 * equal to the map of holders we set up above.
321 lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
324 lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
325 if ((ret = __lock_getobj(lt, lp->holder,
326 NULL, DB_LOCK_LOCKER, &lockerp)) != 0) {
328 "warning unable to find object");
331 id_array[lockerp->dd_id].id = lp->holder;
332 id_array[lockerp->dd_id].valid = 1;
335 * If the transaction is pending abortion, then
336 * ignore it on this iteration.
338 if (lp->status != DB_LSTAT_WAITING)
341 entryp = bitmap + (nentries * lockerp->dd_id);
342 OR_MAP(entryp, tmpmap, nentries);
344 * If this is the first waiter on the queue,
345 * then we remove the waitsfor relationship
346 * with oneself. However, if it's anywhere
347 * else on the queue, then we have to keep
348 * it and we have an automatic deadlock.
351 CLR_MAP(entryp, lockerp->dd_id);
356 /* Now for each locker; record its last lock. */
357 for (id = 0; id < count; id++) {
358 if (!id_array[id].valid)
360 if ((ret = __lock_getobj(lt,
361 id_array[id].id, NULL, DB_LOCK_LOCKER, &lockerp)) != 0) {
363 "No locks for locker %lu", (u_long)id_array[id].id);
366 lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
368 id_array[id].last_lock = LOCK_TO_OFFSET(lt, lp);
369 lo = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
370 pptr = SH_DBT_PTR(&lo->lockobj);
371 if (lo->lockobj.size >= sizeof(db_pgno_t))
372 memcpy(&id_array[id].pgno, pptr,
375 id_array[id].pgno = 0;
379 /* Pass complete, reset the deadlock detector bit. */
380 lt->region->need_dd = 0;
381 UNLOCK_LOCKREGION(lt);
384 * Now we can release everything except the bitmap matrix that we
395 __dd_find(bmp, idmap, nlockers)
396 u_int32_t *bmp, nlockers;
399 u_int32_t i, j, nentries, *mymap, *tmpmap;
402 * For each locker, OR in the bits from the lockers on which that
405 nentries = ALIGN(nlockers, 32) / 32;
406 for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nentries) {
409 for (j = 0; j < nlockers; j++) {
410 if (ISSET_MAP(mymap, j)) {
411 /* Find the map for this bit. */
412 tmpmap = bmp + (nentries * j);
413 OR_MAP(mymap, tmpmap, nentries);
414 if (ISSET_MAP(mymap, i))
423 __dd_abort(dbenv, info)
427 struct __db_lock *lockp;
429 DB_LOCKOBJ *lockerp, *sh_obj;
435 /* Find the locker's last lock. */
437 __lock_getobj(lt, info->id, NULL, DB_LOCK_LOCKER, &lockerp)) != 0)
440 lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
441 if (LOCK_TO_OFFSET(lt, lockp) != info->last_lock ||
442 lockp == NULL || lockp->status != DB_LSTAT_WAITING)
445 /* Abort lock, take it off list, and wake up this lock. */
446 lockp->status = DB_LSTAT_ABORTED;
447 lt->region->ndeadlocks++;
448 SH_LIST_REMOVE(lockp, locker_links, __db_lock);
449 sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
450 SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
451 (void)__db_mutex_unlock(&lockp->mutex, lt->fd);
455 out: UNLOCK_LOCKREGION(lt);
461 __dd_debug(dbenv, idmap, bitmap, nlockers)
464 u_int32_t *bitmap, nlockers;
466 u_int32_t i, j, *mymap, nentries;
469 __db_err(dbenv, "Waitsfor array");
470 __db_err(dbenv, "waiter\twaiting on");
472 * Allocate space to print 10 bytes per item waited on.
474 if ((msgbuf = (char *)__db_malloc((nlockers + 1) * 10 + 64)) == NULL) {
475 __db_err(dbenv, "%s", strerror(ENOMEM));
479 nentries = ALIGN(nlockers, 32) / 32;
480 for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nentries) {
483 sprintf(msgbuf, /* Waiter. */
484 "%lx/%lu:\t", (u_long)idmap[i].id, (u_long)idmap[i].pgno);
485 for (j = 0; j < nlockers; j++)
486 if (ISSET_MAP(mymap, j))
487 sprintf(msgbuf, "%s %lx", msgbuf,
488 (u_long)idmap[j].id);
489 (void)sprintf(msgbuf,
490 "%s %lu", msgbuf, (u_long)idmap[i].last_lock);
491 __db_err(dbenv, msgbuf);