summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_quotaops.c
blob: f82d79a8c694a8f32427b8d0e2923dbde1670d88 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
/*
 * Copyright (c) 2008, Christoph Hellwig
 * All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include "xfs.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_quota.h"
#include "xfs_trans.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_qm.h"
#include <linux/quota.h>


static void
xfs_qm_fill_state(
	struct qc_type_state	*tstate,
	struct xfs_mount	*mp,
	struct xfs_inode	*ip,
	xfs_ino_t		ino)
{
	struct xfs_quotainfo *q = mp->m_quotainfo;
	bool tempqip = false;

	tstate->ino = ino;
	if (!ip && ino == NULLFSINO)
		return;
	if (!ip) {
		if (xfs_iget(mp, NULL, ino, 0, 0, &ip))
			return;
		tempqip = true;
	}
	tstate->flags |= QCI_SYSFILE;
	tstate->blocks = ip->i_d.di_nblocks;
	tstate->nextents = ip->i_d.di_nextents;
	tstate->spc_timelimit = q->qi_btimelimit;
	tstate->ino_timelimit = q->qi_itimelimit;
	tstate->rt_spc_timelimit = q->qi_rtbtimelimit;
	tstate->spc_warnlimit = q->qi_bwarnlimit;
	tstate->ino_warnlimit = q->qi_iwarnlimit;
	tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit;
	if (tempqip)
		IRELE(ip);
}

/*
 * Return quota status information, such as enforcements, quota file inode
 * numbers etc.
 */
static int
xfs_fs_get_quota_state(
	struct super_block	*sb,
	struct qc_state		*state)
{
	struct xfs_mount *mp = XFS_M(sb);
	struct xfs_quotainfo *q = mp->m_quotainfo;

	memset(state, 0, sizeof(*state));
	if (!XFS_IS_QUOTA_RUNNING(mp))
		return 0;
	state->s_incoredqs = q->qi_dquots;
	if (XFS_IS_UQUOTA_RUNNING(mp))
		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED;
	if (XFS_IS_UQUOTA_ENFORCED(mp))
		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
	if (XFS_IS_GQUOTA_RUNNING(mp))
		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED;
	if (XFS_IS_GQUOTA_ENFORCED(mp))
		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
	if (XFS_IS_PQUOTA_RUNNING(mp))
		state->s_state[PRJQUOTA].flags |= QCI_ACCT_ENABLED;
	if (XFS_IS_PQUOTA_ENFORCED(mp))
		state->s_state[PRJQUOTA].flags |= QCI_LIMITS_ENFORCED;

	xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip,
			  mp->m_sb.sb_uquotino);
	xfs_qm_fill_state(&state->s_state[GRPQUOTA], mp, q->qi_gquotaip,
			  mp->m_sb.sb_gquotino);
	xfs_qm_fill_state(&state->s_state[PRJQUOTA], mp, q->qi_pquotaip,
			  mp->m_sb.sb_pquotino);
	return 0;
}

STATIC int
xfs_quota_type(int type)
{
	switch (type) {
	case USRQUOTA:
		return XFS_DQ_USER;
	case GRPQUOTA:
		return XFS_DQ_GROUP;
	default:
		return XFS_DQ_PROJ;
	}
}

#define XFS_QC_SETINFO_MASK (QC_TIMER_MASK | QC_WARNS_MASK)

/*
 * Adjust quota timers & warnings
 */
static int
xfs_fs_set_info(
	struct super_block	*sb,
	int			type,
	struct qc_info		*info)
{
	struct xfs_mount *mp = XFS_M(sb);
	struct qc_dqblk newlim;

	if (sb->s_flags & MS_RDONLY)
		return -EROFS;
	if (!XFS_IS_QUOTA_RUNNING(mp))
		return -ENOSYS;
	if (!XFS_IS_QUOTA_ON(mp))
		return -ESRCH;
	if (info->i_fieldmask & ~XFS_QC_SETINFO_MASK)
		return -EINVAL;
	if ((info->i_fieldmask & XFS_QC_SETINFO_MASK) == 0)
		return 0;

	newlim.d_fieldmask = info->i_fieldmask;
	newlim.d_spc_timer = info->i_spc_timelimit;
	newlim.d_ino_timer = info->i_ino_timelimit;
	newlim.d_rt_spc_timer = info->i_rt_spc_timelimit;
	newlim.d_ino_warns = info->i_ino_warnlimit;
	newlim.d_spc_warns = info->i_spc_warnlimit;
	newlim.d_rt_spc_warns = info->i_rt_spc_warnlimit;

	return xfs_qm_scall_setqlim(mp, 0, xfs_quota_type(type), &newlim);
}

static unsigned int
xfs_quota_flags(unsigned int uflags)
{
	unsigned int flags = 0;

	if (uflags & FS_QUOTA_UDQ_ACCT)
		flags |= XFS_UQUOTA_ACCT;
	if (uflags & FS_QUOTA_PDQ_ACCT)
		flags |= XFS_PQUOTA_ACCT;
	if (uflags & FS_QUOTA_GDQ_ACCT)
		flags |= XFS_GQUOTA_ACCT;
	if (uflags & FS_QUOTA_UDQ_ENFD)
		flags |= XFS_UQUOTA_ENFD;
	if (uflags & FS_QUOTA_GDQ_ENFD)
		flags |= XFS_GQUOTA_ENFD;
	if (uflags & FS_QUOTA_PDQ_ENFD)
		flags |= XFS_PQUOTA_ENFD;

	return flags;
}

STATIC int
xfs_quota_enable(
	struct super_block	*sb,
	unsigned int		uflags)
{
	struct xfs_mount	*mp = XFS_M(sb);

	if (sb->s_flags & MS_RDONLY)
		return -EROFS;
	if (!XFS_IS_QUOTA_RUNNING(mp))
		return -ENOSYS;

	return xfs_qm_scall_quotaon(mp, xfs_quota_flags(uflags));
}

STATIC int
xfs_quota_disable(
	struct super_block	*sb,
	unsigned int		uflags)
{
	struct xfs_mount	*mp = XFS_M(sb);

	if (sb->s_flags & MS_RDONLY)
		return -EROFS;
	if (!XFS_IS_QUOTA_RUNNING(mp))
		return -ENOSYS;
	if (!XFS_IS_QUOTA_ON(mp))
		return -EINVAL;

	return xfs_qm_scall_quotaoff(mp, xfs_quota_flags(uflags));
}

STATIC int
xfs_fs_rm_xquota(
	struct super_block	*sb,
	unsigned int		uflags)
{
	struct xfs_mount	*mp = XFS_M(sb);
	unsigned int		flags = 0;

	if (sb->s_flags & MS_RDONLY)
		return -EROFS;

	if (XFS_IS_QUOTA_ON(mp))
		return -EINVAL;

	if (uflags & FS_USER_QUOTA)
		flags |= XFS_DQ_USER;
	if (uflags & FS_GROUP_QUOTA)
		flags |= XFS_DQ_GROUP;
	if (uflags & FS_PROJ_QUOTA)
		flags |= XFS_DQ_PROJ;

	return xfs_qm_scall_trunc_qfiles(mp, flags);
}

STATIC int
xfs_fs_get_dqblk(
	struct super_block	*sb,
	struct kqid		qid,
	struct qc_dqblk		*qdq)
{
	struct xfs_mount	*mp = XFS_M(sb);
	xfs_dqid_t		id;

	if (!XFS_IS_QUOTA_RUNNING(mp))
		return -ENOSYS;
	if (!XFS_IS_QUOTA_ON(mp))
		return -ESRCH;

	id = from_kqid(&init_user_ns, qid);
	return xfs_qm_scall_getquota(mp, &id,
				      xfs_quota_type(qid.type), qdq, 0);
}

/* Return quota info for active quota >= this qid */
STATIC int
xfs_fs_get_nextdqblk(
	struct super_block	*sb,
	struct kqid		*qid,
	struct qc_dqblk		*qdq)
{
	int			ret;
	struct xfs_mount	*mp = XFS_M(sb);
	xfs_dqid_t		id;

	if (!XFS_IS_QUOTA_RUNNING(mp))
		return -ENOSYS;
	if (!XFS_IS_QUOTA_ON(mp))
		return -ESRCH;

	id = from_kqid(&init_user_ns, *qid);
	ret = xfs_qm_scall_getquota(mp, &id,
				    xfs_quota_type(qid->type), qdq,
				    XFS_QMOPT_DQNEXT);
	if (ret)
		return ret;

	/* ID may be different, so convert back what we got */
	*qid = make_kqid(current_user_ns(), qid->type, id);
	return 0;
	
}

STATIC int
xfs_fs_set_dqblk(
	struct super_block	*sb,
	struct kqid		qid,
	struct qc_dqblk		*qdq)
{
	struct xfs_mount	*mp = XFS_M(sb);

	if (sb->s_flags & MS_RDONLY)
		return -EROFS;
	if (!XFS_IS_QUOTA_RUNNING(mp))
		return -ENOSYS;
	if (!XFS_IS_QUOTA_ON(mp))
		return -ESRCH;

	return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
				     xfs_quota_type(qid.type), qdq);
}

const struct quotactl_ops xfs_quotactl_operations = {
	.get_state		= xfs_fs_get_quota_state,
	.set_info		= xfs_fs_set_info,
	.quota_enable		= xfs_quota_enable,
	.quota_disable		= xfs_quota_disable,
	.rm_xquota		= xfs_fs_rm_xquota,
	.get_dqblk		= xfs_fs_get_dqblk,
	.get_nextdqblk		= xfs_fs_get_nextdqblk,
	.set_dqblk		= xfs_fs_set_dqblk,
};
ed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-03ethtool: do not vzalloc(0) on registers dumpStanislaw Gruszka1-3/+6 If ->get_regs_len() callback return 0, we allocate 0 bytes of memory, what print ugly warning in dmesg, which can be found further below. This happen on mac80211 devices where ieee80211_get_regs_len() just return 0 and driver only fills ethtool_regs structure and actually do not provide any dump. However I assume this can happen on other drivers i.e. when for some devices driver provide regs dump and for others do not. Hence preventing to to print warning in ethtool code seems to be reasonable. ethtool: vmalloc: allocation failure: 0 bytes, mode:0x24080c2(GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO) <snip> Call Trace: [<ffffffff813bde47>] dump_stack+0x63/0x8c [<ffffffff811b0a1f>] warn_alloc+0x13f/0x170 [<ffffffff811f0476>] __vmalloc_node_range+0x1e6/0x2c0 [<ffffffff811f0874>] vzalloc+0x54/0x60 [<ffffffff8169986c>] dev_ethtool+0xb4c/0x1b30 [<ffffffff816adbb1>] dev_ioctl+0x181/0x520 [<ffffffff816714d2>] sock_do_ioctl+0x42/0x50 <snip> Mem-Info: active_anon:435809 inactive_anon:173951 isolated_anon:0 active_file:835822 inactive_file:196932 isolated_file:0 unevictable:0 dirty:8 writeback:0 unstable:0 slab_reclaimable:157732 slab_unreclaimable:10022 mapped:83042 shmem:306356 pagetables:9507 bounce:0 free:130041 free_pcp:1080 free_cma:0 Node 0 active_anon:1743236kB inactive_anon:695804kB active_file:3343288kB inactive_file:787728kB unevictable:0kB isolated(anon):0kB isolated(file):0kB mapped:332168kB dirty:32kB writeback:0kB shmem:0kB shmem_thp: 0kB shmem_pmdmapped: 0kB anon_thp: 1225424kB writeback_tmp:0kB unstable:0kB pages_scanned:0 all_unreclaimable? no Node 0 DMA free:15900kB min:136kB low:168kB high:200kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB writepending:0kB present:15984kB managed:15900kB mlocked:0kB slab_reclaimable:0kB slab_unreclaimable:0kB kernel_stack:0kB pagetables:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB lowmem_reserve[]: 0 3187 7643 7643 Node 0 DMA32 free:419732kB min:28124kB low:35152kB high:42180kB active_anon:541180kB inactive_anon:248988kB active_file:1466388kB inactive_file:389632kB unevictable:0kB writepending:0kB present:3370280kB managed:3290932kB mlocked:0kB slab_reclaimable:217184kB slab_unreclaimable:4180kB kernel_stack:160kB pagetables:984kB bounce:0kB free_pcp:2236kB local_pcp:660kB free_cma:0kB lowmem_reserve[]: 0 0 4456 4456 Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-03ipv6: sr: remove cleanup flag and fix HMAC computationDavid Lebrun2-32/+7 In the latest version of the IPv6 Segment Routing IETF draft [1] the cleanup flag is removed and the flags field length is shrunk from 16 bits to 8 bits. As a consequence, the input of the HMAC computation is modified in a non-backward compatible way by covering the whole octet of flags instead of only the cleanup bit. As such, if an implementation compatible with the latest draft computes the HMAC of an SRH who has other flags set to 1, then the HMAC result would differ from the current implementation. This patch carries those modifications to prevent conflict with other implementations of IPv6 SR. [1] https://tools.ietf.org/html/draft-ietf-6man-segment-routing-header-05 Signed-off-by: David Lebrun <david.lebrun@uclouvain.be> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-01Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/netLinus Torvalds10-103/+86 Pull networking fixes from David Miller: 1) Fix handling of interrupt status in stmmac driver. Just because we have masked the event from generating interrupts, doesn't mean the bit won't still be set in the interrupt status register. From Alexey Brodkin. 2) Fix DMA API debugging splats in gianfar driver, from Arseny Solokha. 3) Fix off-by-one error in __ip6_append_data(), from Vlad Yasevich. 4) cls_flow does not match on icmpv6 codes properly, from Simon Horman. 5) Initial MAC address can be set incorrectly in some scenerios, from Ivan Vecera. 6) Packet header pointer arithmetic fix in ip6_tnl_parse_tlv_end_lim(), from Dan Carpenter. 7) Fix divide by zero in __tcp_select_window(), from Eric Dumazet. 8) Fix crash in iwlwifi when unregistering thermal zone, from Jens Axboe. 9) Check for DMA mapping errors in starfire driver, from Alexey Khoroshilov. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (31 commits) tcp: fix 0 divide in __tcp_select_window() ipv6: pointer math error in ip6_tnl_parse_tlv_enc_lim() net: fix ndo_features_check/ndo_fix_features comment ordering net/sched: matchall: Fix configuration race be2net: fix initial MAC setting ipv6: fix flow labels when the traffic class is non-0 net: thunderx: avoid dereferencing xcv when NULL net/sched: cls_flower: Correct matching on ICMPv6 code ipv6: Paritially checksum full MTU frames net/mlx4_core: Avoid command timeouts during VF driver device shutdown gianfar: synchronize DMA API usage by free_skb_rx_queue w/ gfar_new_page net: ethtool: add support for 2500BaseT and 5000BaseT link modes can: bcm: fix hrtimer/tasklet termination in bcm op removal net: adaptec: starfire: add checks for dma mapping errors net: phy: micrel: KSZ8795 do not set SUPPORTED_[Asym_]Pause can: Fix kernel panic at security_sock_rcv_skb net: macb: Fix 64 bit addressing support for GEM stmmac: Discard masked flags in interrupt status register net/mlx5e: Check ets capability before ets query FW command net/mlx5e: Fix update of hash function/key via ethtool ... 2017-02-01tcp: fix 0 divide in __tcp_select_window()Eric Dumazet1-2/+4 syszkaller fuzzer was able to trigger a divide by zero, when TCP window scaling is not enabled. SO_RCVBUF can be used not only to increase sk_rcvbuf, also to decrease it below current receive buffers utilization. If mss is negative or 0, just return a zero TCP window. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-01ipv6: pointer math error in ip6_tnl_parse_tlv_enc_lim()Dan Carpenter1-1/+1 Casting is a high precedence operation but "off" and "i" are in terms of bytes so we need to have some parenthesis here. Fixes: fbfa743a9d2a ("ipv6: fix ip6_tnl_parse_tlv_enc_lim()") Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-01net/sched: matchall: Fix configuration raceYotam Gigi1-82/+45 In the current version, the matchall internal state is split into two structs: cls_matchall_head and cls_matchall_filter. This makes little sense, as matchall instance supports only one filter, and there is no situation where one exists and the other does not. In addition, that led to some races when filter was deleted while packet was processed. Unify that two structs into one, thus simplifying the process of matchall creation and deletion. As a result, the new, delete and get callbacks have a dummy implementation where all the work is done in destroy and change callbacks, as was done in cls_cgroup. Fixes: bf3994d2ed31 ("net/sched: introduce Match-all classifier") Reported-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Yotam Gigi <yotamg@mellanox.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>