2 * See the file LICENSE for redistribution information.
4 * Copyright (c) 1996, 1997
5 * Sleepycat Software. All rights reserved.
11 static const char copyright[] =
12 "@(#) Copyright (c) 1997\n\
13 Sleepycat Software Inc. All rights reserved.\n";
14 static const char sccsid[] = "@(#)lock_deadlock.c 10.25 (Sleepycat) 11/1/97";
17 #ifndef NO_SYSTEM_INCLUDES
18 #include <sys/types.h>
29 #include "common_ext.h"
31 #define ISSET_MAP(M, N) (M[(N) / 32] & (1 << (N) % 32))
33 #define CLEAR_MAP(M, N) { \
35 for (__i = 0; __i < (N); __i++) \
39 #define SET_MAP(M, B) (M[(B) / 32] |= (1 << ((B) % 32)))
40 #define CLR_MAP(M, B) (M[(B) / 32] &= ~(1 << ((B) % 32)))
42 #define OR_MAP(D, S, N) { \
44 for (__i = 0; __i < (N); __i++) \
47 #define BAD_KILLID 0xffffffff
56 static int __dd_abort __P((DB_ENV *, locker_info *));
58 __P((DB_ENV *, u_int32_t **, u_int32_t *, locker_info **));
60 *__dd_find __P((u_int32_t *, locker_info *, u_int32_t));
63 static void __dd_debug __P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t));
67 lock_detect(lt, flags, atype)
74 u_int32_t *bitmap, *deadlock, i, killid, nentries, nlockers;
77 /* Validate arguments. */
79 __db_fchk(lt->dbenv, "lock_detect", flags, DB_LOCK_CONFLICT)) != 0)
82 /* Check if a detector run is necessary. */
84 if (LF_ISSET(DB_LOCK_CONFLICT)) {
85 /* Make a pass every time a lock waits. */
87 do_pass = dbenv->lk_info->region->need_dd != 0;
88 UNLOCK_LOCKREGION(lt);
94 /* Build the waits-for bitmap. */
95 if ((ret = __dd_build(dbenv, &bitmap, &nlockers, &idmap)) != 0)
101 if (dbenv->db_verbose != 0)
102 __dd_debug(dbenv, idmap, bitmap, nlockers);
104 /* Find a deadlock. */
105 deadlock = __dd_find(bitmap, idmap, nlockers);
106 nentries = ALIGN(nlockers, 32) / 32;
108 if (deadlock != NULL) {
113 * Find the first bit set in the current
114 * array and then look for a lower tid in
117 for (i = 0; i < nlockers; i++)
118 if (ISSET_MAP(deadlock, i))
121 if (killid == BAD_KILLID) {
123 "warning: could not find locker to abort");
128 * The oldest transaction has the lowest
131 for (i = killid + 1; i < nlockers; i++)
132 if (ISSET_MAP(deadlock, i) &&
133 idmap[i].id < idmap[killid].id)
136 case DB_LOCK_DEFAULT:
139 * We are trying to calculate the id of the
140 * locker whose entry is indicated by deadlock.
142 killid = (deadlock - bitmap) / nentries;
144 case DB_LOCK_YOUNGEST:
146 * Find the first bit set in the current
147 * array and then look for a lower tid in
150 for (i = 0; i < nlockers; i++)
151 if (ISSET_MAP(deadlock, i))
154 if (killid == BAD_KILLID) {
156 "warning: could not find locker to abort");
160 * The youngest transaction has the highest
163 for (i = killid + 1; i < nlockers; i++)
164 if (ISSET_MAP(deadlock, i) &&
165 idmap[i].id > idmap[killid].id)
173 /* Kill the locker with lockid idmap[killid]. */
174 if (dbenv->db_verbose != 0 && killid != BAD_KILLID)
175 __db_err(dbenv, "Aborting locker %lx",
176 (u_long)idmap[killid].id);
178 if (killid != BAD_KILLID &&
179 (ret = __dd_abort(dbenv, &idmap[killid])) != 0)
181 "warning: unable to abort locker %lx",
182 (u_long)idmap[killid].id);
191 * ========================================================================
195 __dd_build(dbenv, bmp, nlockers, idmap)
197 u_int32_t **bmp, *nlockers;
200 struct __db_lock *lp;
202 DB_LOCKOBJ *op, *lo, *lockerp;
204 locker_info *id_array;
205 u_int32_t *bitmap, count, *entryp, i, id, nentries, *tmpmap;
211 * We'll check how many lockers there are, add a few more in for
212 * good measure and then allocate all the structures. Then we'll
213 * verify that we have enough room when we go back in and get the
214 * mutex the second time.
217 retry: count = lt->region->nlockers;
218 lt->region->need_dd = 0;
219 UNLOCK_LOCKREGION(lt);
226 if (dbenv->db_verbose)
227 __db_err(dbenv, "%lu lockers", (u_long)count);
230 nentries = ALIGN(count, 32) / 32;
232 * Allocate enough space for a count by count bitmap matrix.
235 * We can probably save the malloc's between iterations just
236 * reallocing if necessary because count grew by too much.
238 if ((bitmap = (u_int32_t *)__db_calloc((size_t)count,
239 sizeof(u_int32_t) * nentries)) == NULL) {
240 __db_err(dbenv, "%s", strerror(ENOMEM));
245 (u_int32_t *)__db_calloc(sizeof(u_int32_t), nentries)) == NULL) {
246 __db_err(dbenv, "%s", strerror(ENOMEM));
251 if ((id_array = (locker_info *)__db_calloc((size_t)count,
252 sizeof(locker_info))) == NULL) {
253 __db_err(dbenv, "%s", strerror(ENOMEM));
260 * Now go back in and actually fill in the matrix.
263 if (lt->region->nlockers > count) {
271 * First we go through and assign each locker a deadlock detector id.
272 * Note that we fill in the idmap in the next loop since that's the
273 * only place where we conveniently have both the deadlock id and the
276 for (id = 0, i = 0; i < lt->region->table_size; i++)
277 for (op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj);
278 op != NULL; op = SH_TAILQ_NEXT(op, links, __db_lockobj))
279 if (op->type == DB_LOCK_LOCKER)
282 * We go through the hash table and find each object. For each object,
283 * we traverse the waiters list and add an entry in the waitsfor matrix
284 * for each waiter/holder combination.
286 for (i = 0; i < lt->region->table_size; i++) {
287 for (op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj);
288 op != NULL; op = SH_TAILQ_NEXT(op, links, __db_lockobj)) {
289 if (op->type != DB_LOCK_OBJTYPE)
291 CLEAR_MAP(tmpmap, nentries);
294 * First we go through and create a bit map that
295 * represents all the holders of this object.
297 for (lp = SH_TAILQ_FIRST(&op->holders, __db_lock);
299 lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
300 if (__lock_getobj(lt, lp->holder,
301 NULL, DB_LOCK_LOCKER, &lockerp) != 0) {
303 "warning unable to find object");
306 id_array[lockerp->dd_id].id = lp->holder;
307 id_array[lockerp->dd_id].valid = 1;
310 * If the holder has already been aborted, then
311 * we should ignore it for now.
313 if (lp->status == DB_LSTAT_HELD)
314 SET_MAP(tmpmap, lockerp->dd_id);
318 * Next, for each waiter, we set its row in the matrix
319 * equal to the map of holders we set up above.
322 lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
325 lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
326 if ((ret = __lock_getobj(lt, lp->holder,
327 NULL, DB_LOCK_LOCKER, &lockerp)) != 0) {
329 "warning unable to find object");
332 id_array[lockerp->dd_id].id = lp->holder;
333 id_array[lockerp->dd_id].valid = 1;
336 * If the transaction is pending abortion, then
337 * ignore it on this iteration.
339 if (lp->status != DB_LSTAT_WAITING)
342 entryp = bitmap + (nentries * lockerp->dd_id);
343 OR_MAP(entryp, tmpmap, nentries);
345 * If this is the first waiter on the queue,
346 * then we remove the waitsfor relationship
347 * with oneself. However, if it's anywhere
348 * else on the queue, then we have to keep
349 * it and we have an automatic deadlock.
352 CLR_MAP(entryp, lockerp->dd_id);
357 /* Now for each locker; record its last lock. */
358 for (id = 0; id < count; id++) {
359 if (!id_array[id].valid)
361 if ((ret = __lock_getobj(lt,
362 id_array[id].id, NULL, DB_LOCK_LOCKER, &lockerp)) != 0) {
364 "No locks for locker %lu", (u_long)id_array[id].id);
367 lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
369 id_array[id].last_lock = LOCK_TO_OFFSET(lt, lp);
370 lo = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
371 pptr = SH_DBT_PTR(&lo->lockobj);
372 if (lo->lockobj.size >= sizeof(db_pgno_t))
373 memcpy(&id_array[id].pgno, pptr,
376 id_array[id].pgno = 0;
380 /* Pass complete, reset the deadlock detector bit. */
381 lt->region->need_dd = 0;
382 UNLOCK_LOCKREGION(lt);
385 * Now we can release everything except the bitmap matrix that we
396 __dd_find(bmp, idmap, nlockers)
397 u_int32_t *bmp, nlockers;
400 u_int32_t i, j, nentries, *mymap, *tmpmap;
403 * For each locker, OR in the bits from the lockers on which that
406 nentries = ALIGN(nlockers, 32) / 32;
407 for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nentries) {
410 for (j = 0; j < nlockers; j++) {
411 if (ISSET_MAP(mymap, j)) {
412 /* Find the map for this bit. */
413 tmpmap = bmp + (nentries * j);
414 OR_MAP(mymap, tmpmap, nentries);
415 if (ISSET_MAP(mymap, i))
424 __dd_abort(dbenv, info)
428 struct __db_lock *lockp;
430 DB_LOCKOBJ *lockerp, *sh_obj;
436 /* Find the locker's last lock. */
438 __lock_getobj(lt, info->id, NULL, DB_LOCK_LOCKER, &lockerp)) != 0)
441 lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
442 if (LOCK_TO_OFFSET(lt, lockp) != info->last_lock ||
443 lockp == NULL || lockp->status != DB_LSTAT_WAITING)
446 /* Abort lock, take it off list, and wake up this lock. */
447 lockp->status = DB_LSTAT_ABORTED;
448 lt->region->ndeadlocks++;
449 SH_LIST_REMOVE(lockp, locker_links, __db_lock);
450 sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
451 SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
452 (void)__db_mutex_unlock(&lockp->mutex, lt->fd);
456 out: UNLOCK_LOCKREGION(lt);
462 __dd_debug(dbenv, idmap, bitmap, nlockers)
465 u_int32_t *bitmap, nlockers;
467 u_int32_t i, j, *mymap, nentries;
470 __db_err(dbenv, "Waitsfor array");
471 __db_err(dbenv, "waiter\twaiting on");
473 * Allocate space to print 10 bytes per item waited on.
475 if ((msgbuf = (char *)__db_malloc((nlockers + 1) * 10 + 64)) == NULL) {
476 __db_err(dbenv, "%s", strerror(ENOMEM));
480 nentries = ALIGN(nlockers, 32) / 32;
481 for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nentries) {
484 sprintf(msgbuf, /* Waiter. */
485 "%lx/%lu:\t", (u_long)idmap[i].id, (u_long)idmap[i].pgno);
486 for (j = 0; j < nlockers; j++)
487 if (ISSET_MAP(mymap, j))
488 sprintf(msgbuf, "%s %lx", msgbuf,
489 (u_long)idmap[j].id);
490 (void)sprintf(msgbuf,
491 "%s %lu", msgbuf, (u_long)idmap[i].last_lock);
492 __db_err(dbenv, msgbuf);