/* General filesystem local caching manager * * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL CACHE #include #include #include #include #include #include #include "internal.h" MODULE_DESCRIPTION("FS Cache Manager"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); unsigned fscache_defer_lookup = 1; module_param_named(defer_lookup, fscache_defer_lookup, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(fscache_defer_lookup, "Defer cookie lookup to background thread"); unsigned fscache_defer_create = 1; module_param_named(defer_create, fscache_defer_create, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(fscache_defer_create, "Defer cookie creation to background thread"); unsigned fscache_debug; module_param_named(debug, fscache_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(fscache_debug, "FS-Cache debugging mask"); struct kobject *fscache_root; struct workqueue_struct *fscache_object_wq; struct workqueue_struct *fscache_op_wq; DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); /* these values serve as lower bounds, will be adjusted in fscache_init() */ static unsigned fscache_object_max_active = 4; static unsigned fscache_op_max_active = 2; #ifdef CONFIG_SYSCTL static struct ctl_table_header *fscache_sysctl_header; static int fscache_max_active_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct workqueue_struct **wqp = table->extra1; unsigned int *datap = table->data; int ret; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret == 0) workqueue_set_max_active(*wqp, *datap); return ret; } static struct ctl_table fscache_sysctls[] = { { .procname = "object_max_active", .data = &fscache_object_max_active, .maxlen = sizeof(unsigned), .mode = 0644, .proc_handler = fscache_max_active_sysctl, .extra1 = &fscache_object_wq, }, { .procname = "operation_max_active", .data = &fscache_op_max_active, .maxlen = sizeof(unsigned), .mode = 0644, .proc_handler = fscache_max_active_sysctl, .extra1 = &fscache_op_wq, }, {} }; static struct ctl_table fscache_sysctls_root[] = { { .procname = "fscache", .mode = 0555, .child = fscache_sysctls, }, {} }; #endif /* * initialise the fs caching module */ static int __init fscache_init(void) { unsigned int nr_cpus = num_possible_cpus(); unsigned int cpu; int ret; fscache_object_max_active = clamp_val(nr_cpus, fscache_object_max_active, WQ_UNBOUND_MAX_ACTIVE); ret = -ENOMEM; fscache_object_wq = alloc_workqueue("fscache_object", WQ_UNBOUND, fscache_object_max_active); if (!fscache_object_wq) goto error_object_wq; fscache_op_max_active = clamp_val(fscache_object_max_active / 2, fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE); ret = -ENOMEM; fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND, fscache_op_max_active); if (!fscache_op_wq) goto error_op_wq; for_each_possible_cpu(cpu) init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu)); ret = fscache_proc_init(); if (ret < 0) goto error_proc; #ifdef CONFIG_SYSCTL ret = -ENOMEM; fscache_sysctl_header = register_sysctl_table(fscache_sysctls_root); if (!fscache_sysctl_header) goto error_sysctl; #endif fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar", sizeof(struct fscache_cookie), 0, 0, fscache_cookie_init_once); if (!fscache_cookie_jar) { pr_notice("Failed to allocate a cookie jar\n"); ret = -ENOMEM; goto error_cookie_jar; } fscache_root = kobject_create_and_add("fscache", kernel_kobj); if (!fscache_root) goto error_kobj; pr_notice("Loaded\n"); return 0; error_kobj: kmem_cache_destroy(fscache_cookie_jar); error_cookie_jar: #ifdef CONFIG_SYSCTL unregister_sysctl_table(fscache_sysctl_header); error_sysctl: #endif fscache_proc_cleanup(); error_proc: destroy_workqueue(fscache_op_wq); error_op_wq: destroy_workqueue(fscache_object_wq); error_object_wq: return ret; } fs_initcall(fscache_init); /* * clean up on module removal */ static void __exit fscache_exit(void) { _enter(""); kobject_put(fscache_root); kmem_cache_destroy(fscache_cookie_jar); #ifdef CONFIG_SYSCTL unregister_sysctl_table(fscache_sysctl_header); #endif fscache_proc_cleanup(); destroy_workqueue(fscache_op_wq); destroy_workqueue(fscache_object_wq); pr_notice("Unloaded\n"); } module_exit(fscache_exit); /* * wait_on_atomic_t() sleep function for uninterruptible waiting */ int fscache_wait_atomic_t(atomic_t *p) { schedule(); return 0; } lockdep warning with l2cap_chan_connect
The L2CAP connection's channel list lock (conn->chan_lock) must never be taken while already holding a channel lock (chan->lock) in order to avoid lock-inversion and lockdep warnings. So far the l2cap_chan_connect function has acquired the chan->lock early in the function and then later called l2cap_chan_add(conn, chan) which will try to take the conn->chan_lock. This violates the correct order of taking the locks and may lead to the following type of lockdep warnings: -> #1 (&conn->chan_lock){+.+...}: [<c109324d>] lock_acquire+0x9d/0x140 [<c188459c>] mutex_lock_nested+0x6c/0x420 [<d0aab48e>] l2cap_chan_add+0x1e/0x40 [bluetooth] [<d0aac618>] l2cap_chan_connect+0x348/0x8f0 [bluetooth] [<d0cc9a91>] lowpan_control_write+0x221/0x2d0 [bluetooth_6lowpan] -> #0 (&chan->lock){+.+.+.}: [<c10928d8>] __lock_acquire+0x1a18/0x1d20 [<c109324d>] lock_acquire+0x9d/0x140 [<c188459c>] mutex_lock_nested+0x6c/0x420 [<d0ab05fd>] l2cap_connect_cfm+0x1dd/0x3f0 [bluetooth] [<d0a909c4>] hci_le_meta_evt+0x11a4/0x1260 [bluetooth] [<d0a910eb>] hci_event_packet+0x3ab/0x3120 [bluetooth] [<d0a7cb08>] hci_rx_work+0x208/0x4a0 [bluetooth] CPU0 CPU1 ---- ---- lock(&conn->chan_lock); lock(&chan->lock); lock(&conn->chan_lock); lock(&chan->lock); Before calling l2cap_chan_add() the channel is not part of the conn->chan_l list, and can therefore only be accessed by the L2CAP user (such as l2cap_sock.c). We can therefore assume that it is the responsibility of the user to handle mutual exclusion until this point (which we can see is already true in l2cap_sock.c by it in many places touching chan members without holding chan->lock). Since the hci_conn and by exctension l2cap_conn creation in the l2cap_chan_connect() function depend on chan details we cannot simply add a mutex_lock(&conn->chan_lock) in the beginning of the function (since the conn object doesn't yet exist there). What we can do however is move the chan->lock taking later into the function where we already have the conn object and can that way take conn->chan_lock first. This patch implements the above strategy and does some other necessary changes such as using __l2cap_chan_add() which assumes conn->chan_lock is held, as well as adding a second needed label so the unlocking happens as it should. Reported-by: Jukka Rissanen <jukka.rissanen@linux.intel.com> Signed-off-by: Johan Hedberg <johan.hedberg@intel.com> Tested-by: Jukka Rissanen <jukka.rissanen@linux.intel.com> Acked-by: Jukka Rissanen <jukka.rissanen@linux.intel.com> Signed-off-by: Marcel Holtmann <marcel@holtmann.org>