/* * procfs-based user access to knfsd statistics * * /proc/net/rpc/nfsd * * Format: * rc * Statistsics for the reply cache * fh * statistics for filehandle lookup * io * statistics for IO throughput * th <10%-20%> <20%-30%> ... <90%-100%> <100%> * time (seconds) when nfsd thread usage above thresholds * and number of times that all threads were in use * ra cache-size <10% <20% <30% ... <100% not-found * number of times that read-ahead entry was found that deep in * the cache. * plus generic RPC stats (see net/sunrpc/stats.c) * * Copyright (C) 1995, 1996, 1997 Olaf Kirch */ #include #include #include #include #include "nfsd.h" struct nfsd_stats nfsdstats; struct svc_stat nfsd_svcstats = { .program = &nfsd_program, }; static int nfsd_proc_show(struct seq_file *seq, void *v) { int i; seq_printf(seq, "rc %u %u %u\nfh %u %u %u %u %u\nio %u %u\n", nfsdstats.rchits, nfsdstats.rcmisses, nfsdstats.rcnocache, nfsdstats.fh_stale, nfsdstats.fh_lookup, nfsdstats.fh_anon, nfsdstats.fh_nocache_dir, nfsdstats.fh_nocache_nondir, nfsdstats.io_read, nfsdstats.io_write); /* thread usage: */ seq_printf(seq, "th %u %u", nfsdstats.th_cnt, nfsdstats.th_fullcnt); for (i=0; i<10; i++) { unsigned int jifs = nfsdstats.th_usage[i]; unsigned int sec = jifs / HZ, msec = (jifs % HZ)*1000/HZ; seq_printf(seq, " %u.%03u", sec, msec); } /* newline and ra-cache */ seq_printf(seq, "\nra %u", nfsdstats.ra_size); for (i=0; i<11; i++) seq_printf(seq, " %u", nfsdstats.ra_depth[i]); seq_putc(seq, '\n'); /* show my rpc info */ svc_seq_show(seq, &nfsd_svcstats); #ifdef CONFIG_NFSD_V4 /* Show count for individual nfsv4 operations */ /* Writing operation numbers 0 1 2 also for maintaining uniformity */ seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1); for (i = 0; i <= LAST_NFS4_OP; i++) seq_printf(seq, " %u", nfsdstats.nfs4_opcount[i]); seq_putc(seq, '\n'); #endif return 0; } static int nfsd_proc_open(struct inode *inode, struct file *file) { return single_open(file, nfsd_proc_show, NULL); } static const struct file_operations nfsd_proc_fops = { .open = nfsd_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void nfsd_stat_init(void) { svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_fops); } void nfsd_stat_shutdown(void) { svc_proc_unregister(&init_net, "nfsd"); } q' value=''/>
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-30 19:27:10 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-31 09:13:49 -0500
commit79c6f448c8b79c321e4a1f31f98194e4f6b6cae7 (patch)
tree370efda701f03cccf21e02bb1fdd3b852547d75c /tools/testing/selftests/powerpc/pmu/event.c
parent0c744ea4f77d72b3dcebb7a8f2684633ec79be88 (diff)
tracing: Fix hwlat kthread migration
The hwlat tracer creates a kernel thread at start of the tracer. It is pinned to a single CPU and will move to the next CPU after each period of running. If the user modifies the migration thread's affinity, it will not change after that happens. The original code created the thread at the first instance it was called, but later was changed to destroy the thread after the tracer was finished, and would not be created until the next instance of the tracer was established. The code that initialized the affinity was only called on the initial instantiation of the tracer. After that, it was not initialized, and the previous affinity did not match the current newly created one, making it appear that the user modified the thread's affinity when it did not, and the thread failed to migrate again. Cc: stable@vger.kernel.org Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'tools/testing/selftests/powerpc/pmu/event.c')