static unsigned long
nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
{
- __u64 tcno;
-
- BUG_ON(cno == 0); /* checkpoint number 0 is invalid */
- tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
+ __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
return (unsigned long)tcno;
}
struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
unsigned int count;
- BUG_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
+ WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
count = le32_to_cpu(cp->cp_checkpoints_count) - n;
cp->cp_checkpoints_count = cpu_to_le32(count);
return count;
* %-ENOMEM - Insufficient amount of memory available.
*
* %-ENOENT - No such checkpoint.
+ *
+ * %-EINVAL - invalid checkpoint.
*/
int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
__u64 cno,
void *kaddr;
int ret;
- BUG_ON(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
- (cno < nilfs_mdt_cno(cpfile) && create));
+ if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
+ (cno < nilfs_mdt_cno(cpfile) && create)))
+ return -EINVAL;
down_write(&NILFS_MDT(cpfile)->mi_sem);
unsigned long tnicps;
int ret, ncps, nicps, count, i;
- if ((start == 0) || (start > end)) {
- printk(KERN_CRIT "%s: start = %llu, end = %llu\n",
- __func__,
- (unsigned long long)start,
- (unsigned long long)end);
- BUG();
+ if (unlikely(start == 0 || start > end)) {
+ printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
+ "[%llu, %llu)\n", __func__,
+ (unsigned long long)start, (unsigned long long)end);
+ return -EINVAL;
}
- /* cannot delete the latest checkpoint */
- if (start == nilfs_mdt_cno(cpfile) - 1)
- return -EPERM;
-
down_write(&NILFS_MDT(cpfile)->mi_sem);
ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
if (ret < 0) {
if (ret != -ENOENT)
- goto out_sem;
+ break;
/* skip hole */
ret = 0;
continue;
cpfile, cno, cp_bh, kaddr);
nicps = 0;
for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
- BUG_ON(nilfs_checkpoint_snapshot(cp));
+ WARN_ON(nilfs_checkpoint_snapshot(cp));
if (!nilfs_checkpoint_invalid(cp)) {
nilfs_checkpoint_set_invalid(cp);
nicps++;
continue;
printk(KERN_ERR "%s: cannot delete block\n",
__func__);
- goto out_sem;
+ break;
}
}
nilfs_mdt_mark_dirty(cpfile);
kunmap_atomic(kaddr, KM_USER0);
}
+
brelse(header_bh);
out_sem:
ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
}
-static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 cno,
- struct nilfs_cpinfo *ci, size_t nci)
+static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
+ void *buf, unsigned cisz, size_t nci)
{
struct nilfs_checkpoint *cp;
+ struct nilfs_cpinfo *ci = buf;
struct buffer_head *bh;
size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
- __u64 cur_cno = nilfs_mdt_cno(cpfile);
+ __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
void *kaddr;
int n, ret;
int ncps, i;
+ if (cno == 0)
+ return -ENOENT; /* checkpoint number 0 is invalid */
down_read(&NILFS_MDT(cpfile)->mi_sem);
for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
kaddr = kmap_atomic(bh->b_page, KM_USER0);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
- if (!nilfs_checkpoint_invalid(cp))
- nilfs_cpfile_checkpoint_to_cpinfo(
- cpfile, cp, &ci[n++]);
+ if (!nilfs_checkpoint_invalid(cp)) {
+ nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
+ ci);
+ ci = (void *)ci + cisz;
+ n++;
+ }
}
kunmap_atomic(kaddr, KM_USER0);
brelse(bh);
}
ret = n;
+ if (n > 0) {
+ ci = (void *)ci - cisz;
+ *cnop = ci->ci_cno + 1;
+ }
out:
up_read(&NILFS_MDT(cpfile)->mi_sem);
return ret;
}
-static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 cno,
- struct nilfs_cpinfo *ci, size_t nci)
+static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
+ void *buf, unsigned cisz, size_t nci)
{
struct buffer_head *bh;
struct nilfs_cpfile_header *header;
struct nilfs_checkpoint *cp;
- __u64 curr, next;
+ struct nilfs_cpinfo *ci = buf;
+ __u64 curr = *cnop, next;
unsigned long curr_blkoff, next_blkoff;
void *kaddr;
- int n, ret;
+ int n = 0, ret;
down_read(&NILFS_MDT(cpfile)->mi_sem);
- if (cno == 0) {
+ if (curr == 0) {
ret = nilfs_cpfile_get_header_block(cpfile, &bh);
if (ret < 0)
goto out;
ret = 0;
goto out;
}
- } else
- curr = cno;
+ } else if (unlikely(curr == ~(__u64)0)) {
+ ret = 0;
+ goto out;
+ }
+
curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
- if (ret < 0)
+ if (unlikely(ret < 0)) {
+ if (ret == -ENOENT)
+ ret = 0; /* No snapshots (started from a hole block) */
goto out;
+ }
kaddr = kmap_atomic(bh->b_page, KM_USER0);
- for (n = 0; n < nci; n++) {
- cp = nilfs_cpfile_block_get_checkpoint(
- cpfile, curr, bh, kaddr);
- nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, &ci[n]);
- next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
- if (next == 0) {
- curr = next;
- n++;
+ while (n < nci) {
+ cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
+ curr = ~(__u64)0; /* Terminator */
+ if (unlikely(nilfs_checkpoint_invalid(cp) ||
+ !nilfs_checkpoint_snapshot(cp)))
break;
- }
+ nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
+ ci = (void *)ci + cisz;
+ n++;
+ next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
+ if (next == 0)
+ break; /* reach end of the snapshot list */
+
next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
if (curr_blkoff != next_blkoff) {
kunmap_atomic(kaddr, KM_USER0);
brelse(bh);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
0, &bh);
- if (ret < 0)
+ if (unlikely(ret < 0)) {
+ WARN_ON(ret == -ENOENT);
goto out;
+ }
kaddr = kmap_atomic(bh->b_page, KM_USER0);
}
curr = next;
}
kunmap_atomic(kaddr, KM_USER0);
brelse(bh);
+ *cnop = curr;
ret = n;
out:
* @ci:
* @nci:
*/
-ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile,
- __u64 cno, int mode,
- struct nilfs_cpinfo *ci, size_t nci)
+
+ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
+ void *buf, unsigned cisz, size_t nci)
{
switch (mode) {
case NILFS_CHECKPOINT:
- return nilfs_cpfile_do_get_cpinfo(cpfile, cno, ci, nci);
+ return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
case NILFS_SNAPSHOT:
- return nilfs_cpfile_do_get_ssinfo(cpfile, cno, ci, nci);
+ return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
default:
return -EINVAL;
}
int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
{
struct nilfs_cpinfo ci;
+ __u64 tcno = cno;
ssize_t nci;
- int ret;
- /* checkpoint number 0 is invalid */
- if (cno == 0)
- return -ENOENT;
- nci = nilfs_cpfile_do_get_cpinfo(cpfile, cno, &ci, 1);
+ nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
if (nci < 0)
return nci;
else if (nci == 0 || ci.ci_cno != cno)
return -ENOENT;
-
- /* cannot delete the latest checkpoint nor snapshots */
- ret = nilfs_cpinfo_snapshot(&ci);
- if (ret < 0)
- return ret;
- else if (ret > 0 || cno == nilfs_mdt_cno(cpfile) - 1)
- return -EPERM;
+ else if (nilfs_cpinfo_snapshot(&ci))
+ return -EBUSY;
return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
}
void *kaddr;
int ret;
+ if (cno == 0)
+ return -ENOENT; /* checkpoint number 0 is invalid */
down_write(&NILFS_MDT(cpfile)->mi_sem);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
void *kaddr;
int ret;
+ if (cno == 0)
+ return -ENOENT; /* checkpoint number 0 is invalid */
down_write(&NILFS_MDT(cpfile)->mi_sem);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
void *kaddr;
int ret;
+ /* CP number is invalid if it's zero or larger than the
+ largest exist one.*/
+ if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
+ return -ENOENT;
down_read(&NILFS_MDT(cpfile)->mi_sem);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
goto out;
kaddr = kmap_atomic(bh->b_page, KM_USER0);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
- ret = nilfs_checkpoint_snapshot(cp);
+ if (nilfs_checkpoint_invalid(cp))
+ ret = -ENOENT;
+ else
+ ret = nilfs_checkpoint_snapshot(cp);
kunmap_atomic(kaddr, KM_USER0);
brelse(bh);
case NILFS_CHECKPOINT:
/*
* Check for protecting existing snapshot mounts:
- * bd_mount_sem is used to make this operation atomic and
+ * ns_mount_mutex is used to make this operation atomic and
* exclusive with a new mount job. Though it doesn't cover
* umount, it's enough for the purpose.
*/
- down(&nilfs->ns_bdev->bd_mount_sem);
+ mutex_lock(&nilfs->ns_mount_mutex);
if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
/* Current implementation does not have to protect
plain read-only mounts since they are exclusive
ret = -EBUSY;
} else
ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
- up(&nilfs->ns_bdev->bd_mount_sem);
+ mutex_unlock(&nilfs->ns_mount_mutex);
return ret;
case NILFS_SNAPSHOT:
return nilfs_cpfile_set_snapshot(cpfile, cno);