2 * linux/fs/ocfs2/ioctl.c
4 * Copyright (C) 2006 Herbert Poetzl
5 * adapted from Remy Card's ext2/ioctl.c
9 #include <linux/mount.h>
10 #include <linux/blkdev.h>
11 #include <linux/compat.h>
13 #include <cluster/masklog.h>
25 #include "refcounttree.h"
28 #include "buffer_head_io.h"
30 #include "move_extents.h"
32 #define o2info_from_user(a, b) \
33 copy_from_user(&(a), (b), sizeof(a))
34 #define o2info_to_user(a, b) \
35 copy_to_user((typeof(a) __user *)b, &(a), sizeof(a))
38 * This call is void because we are already reporting an error that may
39 * be -EFAULT. The error will be returned from the ioctl(2) call. It's
40 * just a best-effort to tell userspace that this request caused the error.
42 static inline void o2info_set_request_error(struct ocfs2_info_request *kreq,
43 struct ocfs2_info_request __user *req)
45 kreq->ir_flags |= OCFS2_INFO_FL_ERROR;
46 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags));
49 static inline void o2info_set_request_filled(struct ocfs2_info_request *req)
51 req->ir_flags |= OCFS2_INFO_FL_FILLED;
54 static inline void o2info_clear_request_filled(struct ocfs2_info_request *req)
56 req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
59 static inline int o2info_coherent(struct ocfs2_info_request *req)
61 return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT));
64 static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
68 status = ocfs2_inode_lock(inode, NULL, 0);
73 ocfs2_get_inode_flags(OCFS2_I(inode));
74 *flags = OCFS2_I(inode)->ip_attr;
75 ocfs2_inode_unlock(inode, 0);
80 static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
83 struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
84 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
85 handle_t *handle = NULL;
86 struct buffer_head *bh = NULL;
90 mutex_lock(&inode->i_mutex);
92 status = ocfs2_inode_lock(inode, &bh, 1);
99 if (!inode_owner_or_capable(inode))
102 if (!S_ISDIR(inode->i_mode))
103 flags &= ~OCFS2_DIRSYNC_FL;
105 oldflags = ocfs2_inode->ip_attr;
106 flags = flags & mask;
107 flags |= oldflags & ~mask;
110 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
111 * the relevant capability.
114 if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) &
115 (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) {
116 if (!capable(CAP_LINUX_IMMUTABLE))
120 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
121 if (IS_ERR(handle)) {
122 status = PTR_ERR(handle);
127 ocfs2_inode->ip_attr = flags;
128 ocfs2_set_inode_flags(inode);
130 status = ocfs2_mark_inode_dirty(handle, inode, bh);
134 ocfs2_commit_trans(osb, handle);
137 ocfs2_inode_unlock(inode, 1);
139 mutex_unlock(&inode->i_mutex);
146 static int ocfs2_info_handle_blocksize(struct inode *inode,
147 struct ocfs2_info_request __user *req)
149 int status = -EFAULT;
150 struct ocfs2_info_blocksize oib;
152 if (o2info_from_user(oib, req))
155 oib.ib_blocksize = inode->i_sb->s_blocksize;
157 o2info_set_request_filled(&oib.ib_req);
159 if (o2info_to_user(oib, req))
165 o2info_set_request_error(&oib.ib_req, req);
170 static int ocfs2_info_handle_clustersize(struct inode *inode,
171 struct ocfs2_info_request __user *req)
173 int status = -EFAULT;
174 struct ocfs2_info_clustersize oic;
175 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
177 if (o2info_from_user(oic, req))
180 oic.ic_clustersize = osb->s_clustersize;
182 o2info_set_request_filled(&oic.ic_req);
184 if (o2info_to_user(oic, req))
190 o2info_set_request_error(&oic.ic_req, req);
195 static int ocfs2_info_handle_maxslots(struct inode *inode,
196 struct ocfs2_info_request __user *req)
198 int status = -EFAULT;
199 struct ocfs2_info_maxslots oim;
200 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
202 if (o2info_from_user(oim, req))
205 oim.im_max_slots = osb->max_slots;
207 o2info_set_request_filled(&oim.im_req);
209 if (o2info_to_user(oim, req))
215 o2info_set_request_error(&oim.im_req, req);
220 static int ocfs2_info_handle_label(struct inode *inode,
221 struct ocfs2_info_request __user *req)
223 int status = -EFAULT;
224 struct ocfs2_info_label oil;
225 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
227 if (o2info_from_user(oil, req))
230 memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
232 o2info_set_request_filled(&oil.il_req);
234 if (o2info_to_user(oil, req))
240 o2info_set_request_error(&oil.il_req, req);
245 static int ocfs2_info_handle_uuid(struct inode *inode,
246 struct ocfs2_info_request __user *req)
248 int status = -EFAULT;
249 struct ocfs2_info_uuid oiu;
250 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
252 if (o2info_from_user(oiu, req))
255 memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
257 o2info_set_request_filled(&oiu.iu_req);
259 if (o2info_to_user(oiu, req))
265 o2info_set_request_error(&oiu.iu_req, req);
270 static int ocfs2_info_handle_fs_features(struct inode *inode,
271 struct ocfs2_info_request __user *req)
273 int status = -EFAULT;
274 struct ocfs2_info_fs_features oif;
275 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
277 if (o2info_from_user(oif, req))
280 oif.if_compat_features = osb->s_feature_compat;
281 oif.if_incompat_features = osb->s_feature_incompat;
282 oif.if_ro_compat_features = osb->s_feature_ro_compat;
284 o2info_set_request_filled(&oif.if_req);
286 if (o2info_to_user(oif, req))
292 o2info_set_request_error(&oif.if_req, req);
297 static int ocfs2_info_handle_journal_size(struct inode *inode,
298 struct ocfs2_info_request __user *req)
300 int status = -EFAULT;
301 struct ocfs2_info_journal_size oij;
302 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
304 if (o2info_from_user(oij, req))
307 oij.ij_journal_size = i_size_read(osb->journal->j_inode);
309 o2info_set_request_filled(&oij.ij_req);
311 if (o2info_to_user(oij, req))
317 o2info_set_request_error(&oij.ij_req, req);
322 static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
323 struct inode *inode_alloc, u64 blkno,
324 struct ocfs2_info_freeinode *fi,
327 int status = 0, unlock = 0;
329 struct buffer_head *bh = NULL;
330 struct ocfs2_dinode *dinode_alloc = NULL;
333 mutex_lock(&inode_alloc->i_mutex);
335 if (o2info_coherent(&fi->ifi_req)) {
336 status = ocfs2_inode_lock(inode_alloc, &bh, 0);
343 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
350 dinode_alloc = (struct ocfs2_dinode *)bh->b_data;
352 fi->ifi_stat[slot].lfi_total =
353 le32_to_cpu(dinode_alloc->id1.bitmap1.i_total);
354 fi->ifi_stat[slot].lfi_free =
355 le32_to_cpu(dinode_alloc->id1.bitmap1.i_total) -
356 le32_to_cpu(dinode_alloc->id1.bitmap1.i_used);
360 ocfs2_inode_unlock(inode_alloc, 0);
363 mutex_unlock(&inode_alloc->i_mutex);
370 static int ocfs2_info_handle_freeinode(struct inode *inode,
371 struct ocfs2_info_request __user *req)
376 int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE;
377 struct ocfs2_info_freeinode *oifi = NULL;
378 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
379 struct inode *inode_alloc = NULL;
381 oifi = kzalloc(sizeof(struct ocfs2_info_freeinode), GFP_KERNEL);
388 if (o2info_from_user(*oifi, req))
391 oifi->ifi_slotnum = osb->max_slots;
393 for (i = 0; i < oifi->ifi_slotnum; i++) {
394 if (o2info_coherent(&oifi->ifi_req)) {
395 inode_alloc = ocfs2_get_system_file_inode(osb, type, i);
397 mlog(ML_ERROR, "unable to get alloc inode in "
403 ocfs2_sprintf_system_inode_name(namebuf,
406 status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
416 status = ocfs2_info_scan_inode_alloc(osb, inode_alloc, blkno, oifi, i);
425 o2info_set_request_filled(&oifi->ifi_req);
427 if (o2info_to_user(*oifi, req))
433 o2info_set_request_error(&oifi->ifi_req, req);
440 static void o2ffg_update_histogram(struct ocfs2_info_free_chunk_list *hist,
441 unsigned int chunksize)
445 index = __ilog2_u32(chunksize);
446 if (index >= OCFS2_INFO_MAX_HIST)
447 index = OCFS2_INFO_MAX_HIST - 1;
449 hist->fc_chunks[index]++;
450 hist->fc_clusters[index] += chunksize;
453 static void o2ffg_update_stats(struct ocfs2_info_freefrag_stats *stats,
454 unsigned int chunksize)
456 if (chunksize > stats->ffs_max)
457 stats->ffs_max = chunksize;
459 if (chunksize < stats->ffs_min)
460 stats->ffs_min = chunksize;
462 stats->ffs_avg += chunksize;
463 stats->ffs_free_chunks_real++;
466 static void ocfs2_info_update_ffg(struct ocfs2_info_freefrag *ffg,
467 unsigned int chunksize)
469 o2ffg_update_histogram(&(ffg->iff_ffs.ffs_fc_hist), chunksize);
470 o2ffg_update_stats(&(ffg->iff_ffs), chunksize);
473 static int ocfs2_info_freefrag_scan_chain(struct ocfs2_super *osb,
474 struct inode *gb_inode,
475 struct ocfs2_dinode *gb_dinode,
476 struct ocfs2_chain_rec *rec,
477 struct ocfs2_info_freefrag *ffg,
480 int status = 0, used;
483 struct buffer_head *bh = NULL;
484 struct ocfs2_group_desc *bg = NULL;
486 unsigned int max_bits, num_clusters;
487 unsigned int offset = 0, cluster, chunk;
488 unsigned int chunk_free, last_chunksize = 0;
490 if (!le32_to_cpu(rec->c_free))
495 blkno = le64_to_cpu(rec->c_blkno);
497 blkno = le64_to_cpu(bg->bg_next_group);
504 if (o2info_coherent(&ffg->iff_req))
505 status = ocfs2_read_group_descriptor(gb_inode,
509 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
512 mlog(ML_ERROR, "Can't read the group descriptor # "
513 "%llu from device.", (unsigned long long)blkno);
518 bg = (struct ocfs2_group_desc *)bh->b_data;
520 if (!le16_to_cpu(bg->bg_free_bits_count))
523 max_bits = le16_to_cpu(bg->bg_bits);
526 for (chunk = 0; chunk < chunks_in_group; chunk++) {
528 * last chunk may be not an entire one.
530 if ((offset + ffg->iff_chunksize) > max_bits)
531 num_clusters = max_bits - offset;
533 num_clusters = ffg->iff_chunksize;
536 for (cluster = 0; cluster < num_clusters; cluster++) {
537 used = ocfs2_test_bit(offset,
538 (unsigned long *)bg->bg_bitmap);
540 * - chunk_free counts free clusters in #N chunk.
541 * - last_chunksize records the size(in) clusters
542 * for the last real free chunk being counted.
549 if (used && last_chunksize) {
550 ocfs2_info_update_ffg(ffg,
558 if (chunk_free == ffg->iff_chunksize)
559 ffg->iff_ffs.ffs_free_chunks++;
563 * need to update the info for last free chunk.
566 ocfs2_info_update_ffg(ffg, last_chunksize);
568 } while (le64_to_cpu(bg->bg_next_group));
576 static int ocfs2_info_freefrag_scan_bitmap(struct ocfs2_super *osb,
577 struct inode *gb_inode, u64 blkno,
578 struct ocfs2_info_freefrag *ffg)
581 int status = 0, unlock = 0, i;
583 struct buffer_head *bh = NULL;
584 struct ocfs2_chain_list *cl = NULL;
585 struct ocfs2_chain_rec *rec = NULL;
586 struct ocfs2_dinode *gb_dinode = NULL;
589 mutex_lock(&gb_inode->i_mutex);
591 if (o2info_coherent(&ffg->iff_req)) {
592 status = ocfs2_inode_lock(gb_inode, &bh, 0);
599 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
606 gb_dinode = (struct ocfs2_dinode *)bh->b_data;
607 cl = &(gb_dinode->id2.i_chain);
610 * Chunksize(in) clusters from userspace should be
611 * less than clusters in a group.
613 if (ffg->iff_chunksize > le16_to_cpu(cl->cl_cpg)) {
618 memset(&ffg->iff_ffs, 0, sizeof(struct ocfs2_info_freefrag_stats));
620 ffg->iff_ffs.ffs_min = ~0U;
621 ffg->iff_ffs.ffs_clusters =
622 le32_to_cpu(gb_dinode->id1.bitmap1.i_total);
623 ffg->iff_ffs.ffs_free_clusters = ffg->iff_ffs.ffs_clusters -
624 le32_to_cpu(gb_dinode->id1.bitmap1.i_used);
626 chunks_in_group = le16_to_cpu(cl->cl_cpg) / ffg->iff_chunksize + 1;
628 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
629 rec = &(cl->cl_recs[i]);
630 status = ocfs2_info_freefrag_scan_chain(osb, gb_inode,
638 if (ffg->iff_ffs.ffs_free_chunks_real)
639 ffg->iff_ffs.ffs_avg = (ffg->iff_ffs.ffs_avg /
640 ffg->iff_ffs.ffs_free_chunks_real);
643 ocfs2_inode_unlock(gb_inode, 0);
646 mutex_unlock(&gb_inode->i_mutex);
656 static int ocfs2_info_handle_freefrag(struct inode *inode,
657 struct ocfs2_info_request __user *req)
661 int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE;
663 struct ocfs2_info_freefrag *oiff;
664 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
665 struct inode *gb_inode = NULL;
667 oiff = kzalloc(sizeof(struct ocfs2_info_freefrag), GFP_KERNEL);
674 if (o2info_from_user(*oiff, req))
677 * chunksize from userspace should be power of 2.
679 if ((oiff->iff_chunksize & (oiff->iff_chunksize - 1)) ||
680 (!oiff->iff_chunksize)) {
685 if (o2info_coherent(&oiff->iff_req)) {
686 gb_inode = ocfs2_get_system_file_inode(osb, type,
689 mlog(ML_ERROR, "unable to get global_bitmap inode\n");
694 ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type,
696 status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
706 status = ocfs2_info_freefrag_scan_bitmap(osb, gb_inode, blkno, oiff);
710 o2info_set_request_filled(&oiff->iff_req);
712 if (o2info_to_user(*oiff, req)) {
720 o2info_set_request_error(&oiff->iff_req, req);
727 static int ocfs2_info_handle_unknown(struct inode *inode,
728 struct ocfs2_info_request __user *req)
730 int status = -EFAULT;
731 struct ocfs2_info_request oir;
733 if (o2info_from_user(oir, req))
736 o2info_clear_request_filled(&oir);
738 if (o2info_to_user(oir, req))
744 o2info_set_request_error(&oir, req);
750 * Validate and distinguish OCFS2_IOC_INFO requests.
752 * - validate the magic number.
753 * - distinguish different requests.
754 * - validate size of different requests.
756 static int ocfs2_info_handle_request(struct inode *inode,
757 struct ocfs2_info_request __user *req)
759 int status = -EFAULT;
760 struct ocfs2_info_request oir;
762 if (o2info_from_user(oir, req))
766 if (oir.ir_magic != OCFS2_INFO_MAGIC)
769 switch (oir.ir_code) {
770 case OCFS2_INFO_BLOCKSIZE:
771 if (oir.ir_size == sizeof(struct ocfs2_info_blocksize))
772 status = ocfs2_info_handle_blocksize(inode, req);
774 case OCFS2_INFO_CLUSTERSIZE:
775 if (oir.ir_size == sizeof(struct ocfs2_info_clustersize))
776 status = ocfs2_info_handle_clustersize(inode, req);
778 case OCFS2_INFO_MAXSLOTS:
779 if (oir.ir_size == sizeof(struct ocfs2_info_maxslots))
780 status = ocfs2_info_handle_maxslots(inode, req);
782 case OCFS2_INFO_LABEL:
783 if (oir.ir_size == sizeof(struct ocfs2_info_label))
784 status = ocfs2_info_handle_label(inode, req);
786 case OCFS2_INFO_UUID:
787 if (oir.ir_size == sizeof(struct ocfs2_info_uuid))
788 status = ocfs2_info_handle_uuid(inode, req);
790 case OCFS2_INFO_FS_FEATURES:
791 if (oir.ir_size == sizeof(struct ocfs2_info_fs_features))
792 status = ocfs2_info_handle_fs_features(inode, req);
794 case OCFS2_INFO_JOURNAL_SIZE:
795 if (oir.ir_size == sizeof(struct ocfs2_info_journal_size))
796 status = ocfs2_info_handle_journal_size(inode, req);
798 case OCFS2_INFO_FREEINODE:
799 if (oir.ir_size == sizeof(struct ocfs2_info_freeinode))
800 status = ocfs2_info_handle_freeinode(inode, req);
802 case OCFS2_INFO_FREEFRAG:
803 if (oir.ir_size == sizeof(struct ocfs2_info_freefrag))
804 status = ocfs2_info_handle_freefrag(inode, req);
807 status = ocfs2_info_handle_unknown(inode, req);
815 static int ocfs2_get_request_ptr(struct ocfs2_info *info, int idx,
816 u64 *req_addr, int compat_flag)
818 int status = -EFAULT;
819 u64 __user *bp = NULL;
824 * pointer bp stores the base address of a pointers array,
825 * which collects all addresses of separate request.
827 bp = (u64 __user *)(unsigned long)compat_ptr(info->oi_requests);
832 bp = (u64 __user *)(unsigned long)(info->oi_requests);
834 if (o2info_from_user(*req_addr, bp + idx))
843 * OCFS2_IOC_INFO handles an array of requests passed from userspace.
845 * ocfs2_info_handle() recevies a large info aggregation, grab and
846 * validate the request count from header, then break it into small
847 * pieces, later specific handlers can handle them one by one.
849 * Idea here is to make each separate request small enough to ensure
850 * a better backward&forward compatibility, since a small piece of
851 * request will be less likely to be broken if disk layout get changed.
853 static int ocfs2_info_handle(struct inode *inode, struct ocfs2_info *info,
858 struct ocfs2_info_request __user *reqp;
860 if ((info->oi_count > OCFS2_INFO_MAX_REQUEST) ||
861 (!info->oi_requests)) {
866 for (i = 0; i < info->oi_count; i++) {
868 status = ocfs2_get_request_ptr(info, i, &req_addr, compat_flag);
872 reqp = (struct ocfs2_info_request __user *)(unsigned long)req_addr;
878 status = ocfs2_info_handle_request(inode, reqp);
887 long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
889 struct inode *inode = file_inode(filp);
893 struct ocfs2_space_resv sr;
894 struct ocfs2_new_group_input input;
895 struct reflink_arguments args;
896 const char __user *old_path;
897 const char __user *new_path;
899 struct ocfs2_info info;
900 void __user *argp = (void __user *)arg;
903 case OCFS2_IOC_GETFLAGS:
904 status = ocfs2_get_inode_attr(inode, &flags);
908 flags &= OCFS2_FL_VISIBLE;
909 return put_user(flags, (int __user *) arg);
910 case OCFS2_IOC_SETFLAGS:
911 if (get_user(flags, (int __user *) arg))
914 status = mnt_want_write_file(filp);
917 status = ocfs2_set_inode_attr(inode, flags,
918 OCFS2_FL_MODIFIABLE);
919 mnt_drop_write_file(filp);
921 case OCFS2_IOC_RESVSP:
922 case OCFS2_IOC_RESVSP64:
923 case OCFS2_IOC_UNRESVSP:
924 case OCFS2_IOC_UNRESVSP64:
925 if (copy_from_user(&sr, (int __user *) arg, sizeof(sr)))
928 return ocfs2_change_file_space(filp, cmd, &sr);
929 case OCFS2_IOC_GROUP_EXTEND:
930 if (!capable(CAP_SYS_RESOURCE))
933 if (get_user(new_clusters, (int __user *)arg))
936 status = mnt_want_write_file(filp);
939 status = ocfs2_group_extend(inode, new_clusters);
940 mnt_drop_write_file(filp);
942 case OCFS2_IOC_GROUP_ADD:
943 case OCFS2_IOC_GROUP_ADD64:
944 if (!capable(CAP_SYS_RESOURCE))
947 if (copy_from_user(&input, (int __user *) arg, sizeof(input)))
950 status = mnt_want_write_file(filp);
953 status = ocfs2_group_add(inode, &input);
954 mnt_drop_write_file(filp);
956 case OCFS2_IOC_REFLINK:
957 if (copy_from_user(&args, argp, sizeof(args)))
959 old_path = (const char __user *)(unsigned long)args.old_path;
960 new_path = (const char __user *)(unsigned long)args.new_path;
961 preserve = (args.preserve != 0);
963 return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve);
965 if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
968 return ocfs2_info_handle(inode, &info, 0);
971 struct super_block *sb = inode->i_sb;
972 struct request_queue *q = bdev_get_queue(sb->s_bdev);
973 struct fstrim_range range;
976 if (!capable(CAP_SYS_ADMIN))
979 if (!blk_queue_discard(q))
982 if (copy_from_user(&range, argp, sizeof(range)))
985 range.minlen = max_t(u64, q->limits.discard_granularity,
987 ret = ocfs2_trim_fs(sb, &range);
991 if (copy_to_user(argp, &range, sizeof(range)))
996 case OCFS2_IOC_MOVE_EXT:
997 return ocfs2_ioctl_move_extents(filp, argp);
1003 #ifdef CONFIG_COMPAT
1004 long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1007 struct reflink_arguments args;
1008 struct inode *inode = file_inode(file);
1009 struct ocfs2_info info;
1010 void __user *argp = (void __user *)arg;
1013 case OCFS2_IOC32_GETFLAGS:
1014 cmd = OCFS2_IOC_GETFLAGS;
1016 case OCFS2_IOC32_SETFLAGS:
1017 cmd = OCFS2_IOC_SETFLAGS;
1019 case OCFS2_IOC_RESVSP:
1020 case OCFS2_IOC_RESVSP64:
1021 case OCFS2_IOC_UNRESVSP:
1022 case OCFS2_IOC_UNRESVSP64:
1023 case OCFS2_IOC_GROUP_EXTEND:
1024 case OCFS2_IOC_GROUP_ADD:
1025 case OCFS2_IOC_GROUP_ADD64:
1028 case OCFS2_IOC_REFLINK:
1029 if (copy_from_user(&args, argp, sizeof(args)))
1031 preserve = (args.preserve != 0);
1033 return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
1034 compat_ptr(args.new_path), preserve);
1035 case OCFS2_IOC_INFO:
1036 if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
1039 return ocfs2_info_handle(inode, &info, 1);
1040 case OCFS2_IOC_MOVE_EXT:
1043 return -ENOIOCTLCMD;
1046 return ocfs2_ioctl(file, cmd, arg);