Libcephfs : ceph_readdirplus_r() with ceph_ll_lookup_vino() : ceph version 17.2.5 (98318ae89f1a893a6ded3a640405cdbb33e08757) quincy (stable)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello All,

I found a weird issue with ceph_readdirplus_r() when used along with ceph_ll_lookup_vino(). 
On ceph version 17.2.5 (98318ae89f1a893a6ded3a640405cdbb33e08757) quincy (stable)

Any help is really appreciated.

Thanks in advance,
-Joe

Test Scenario :

A. Create a Ceph Fs Subvolume "4" and created a directory in root of subvolume "user_root"

root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23# ceph fs subvolume ls cephfs
[
    {
        "name": "4"
    }
]
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23#

root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23# ls -l
total 0
drwxrwxrwx 2 root root 0 Sep 22 09:16 user_root
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23#

B. In the "user_root" directory create some files and directories 

root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root# mkdir dir1 dir2
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root# ls
dir1  dir2
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root# echo "Hello Worldls!" > file1
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root# echo "Hello Worldls!" > file2
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root# ls
dir1  dir2  file1  file2
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root# cat file*
Hello Worldls!
Hello Worldls!

C. Create a subvolume snapshot   "sofs-4-5". Please ignore the older snapshots.
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23# ceph fs subvolume snapshot ls cephfs 4
[
    {
        "name": "sofs-4-1"
    },
    {
        "name": "sofs-4-2"
    },
    {
        "name": "sofs-4-3"
    },
    {
        "name": "sofs-4-4"
    },
    {
        "name": "sofs-4-5"
    }
]
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23#

Here "sofs-4-5" has snapshot id 6. 
Got this from libcephfs and have verified at Line snapshot_inode_lookup.cpp#L212. (Attached to the email)

           #Content within the snapshot
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23# cd .snap/
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/.snap# ls
_sofs-4-1_1099511627778  _sofs-4-2_1099511627778  _sofs-4-3_1099511627778  _sofs-4-4_1099511627778  _sofs-4-5_1099511627778
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/.snap# cd _sofs-4-5_1099511627778/
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/.snap/_sofs-4-5_1099511627778# ls
user_root
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/.snap/_sofs-4-5_1099511627778# cd user_root/
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/.snap/_sofs-4-5_1099511627778/user_root# ls
dir1  dir2  file1  file2
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/.snap/_sofs-4-5_1099511627778/user_root# cat file*
Hello Worldls!
Hello Worldls!
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/.snap/_sofs-4-5_1099511627778/user_root#

D. Delete all the files and directories in "user_root"

root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root# rm -rf *
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root# ls
root@ss-joe-01(bash):/mnt/cephfs/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root#

E. Using Libcephfs in a C++ program do the following,(Attached to this email)
  1. Get the Inode of "user_root" using ceph_ll_walk().
  2. Open the directory using Inode received from  ceph_ll_walk()  and do  ceph_readdirplus_r()
    We don't see any dentries(except "." and "..")  as we have deleted all files and directories in the active filesystem. This is expected and correct!


    =================================/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/=====================================

    Path/Name        :"/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/"
    Inode Address    : 0x7f5ce0009900
    Inode Number     : 1099511629282
    Snapshot Number  : 18446744073709551614
    Inode Number     : 1099511629282
    Snapshot Number  : 18446744073709551614
    . Ino: 1099511629282 SnapId: 18446744073709551614 Address: 0x7f5ce0009900
    .. Ino: 1099511627779 SnapId: 18446744073709551614 Address: 0x7f5ce00090f0


  3. Using  ceph_ll_lookup_vino() get the Inode * of "user_root" for snapshot 6, Here "sofs-4-5" has snapshot id 6.
    Got this from libcephfs and have verified at Line snapshot_inode_lookup.cpp#L212. (Attached to the email
  4. Open the directory using Inode * received from  ceph_ll_lookup_vino() and do  ceph_readdirplus_r()
    We don't see any dentries (except "." and "..") This is NOT expected and NOT correct, as there are files and directories in the snaphot 6.

    =================================1099511629282:6=====================================

    Path/Name        :"1099511629282:6"
    Inode Address    : 0x7f5ce000a110
    Inode Number     : 1099511629282
    Snapshot Number  : 6
    Inode Number     : 1099511629282
    Snapshot Number  : 6
    . Ino: 1099511629282 SnapId: 6 Address: 0x7f5ce000a110
    .. Ino: 1099511629282 SnapId: 6 Address: 0x7f5ce000a110


  5. Get the Inode of "user_root/ .snap/_sofs-4-5_1099511627778 / " using ceph_ll_walk().
  6. Open the directory using Inode received from  ceph_ll_walk()  and do  ceph_readdirplus_r()
    We see ALL dentries of all files and directories in the snapshot. This is expected and correct!

    =================================/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/.snap/_sofs-4-5_1099511627778/=====================================

    Path/Name        :"/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/.snap/_sofs-4-5_1099511627778/"
    Inode Address    : 0x7f5ce000a110
    Inode Number     : 1099511629282
    Snapshot Number  : 6
    Inode Number     : 1099511629282
    Snapshot Number  : 6
    . Ino: 1099511629282 SnapId: 6 Address: 0x7f5ce000a110
    .. Ino: 1099511629282 SnapId: 18446744073709551615 Address: 0x5630ab946340
    file1 Ino: 1099511628291 SnapId: 6 Address: 0x7f5ce000aa90
    dir1 Ino: 1099511628289 SnapId: 6 Address: 0x7f5ce000b180
    dir2 Ino: 1099511628290 SnapId: 6 Address: 0x7f5ce000b800
    file2 Ino: 1099511628292 SnapId: 6 Address: 0x7f5ce000be80

  7. Now Again using  ceph_ll_lookup_vino() get the Inode * of "user_root" for snapshot 6, Here "sofs-4-5" has snapshot id 6.
  8.  Open the directory using Inode * received from  ceph_ll_lookup_vino() and do  ceph_readdirplus_r()
    Now! we see all the files and Directories in the snapshot!

    =================================1099511629282:6=====================================

    Path/Name        :"1099511629282:6"
    Inode Address    : 0x7f5ce000a110
    Inode Number     : 1099511629282
    Snapshot Number  : 6
    Inode Number     : 1099511629282
    Snapshot Number  : 6
    . Ino: 1099511629282 SnapId: 6 Address: 0x7f5ce000a110
    .. Ino: 1099511629282 SnapId: 18446744073709551615 Address: 0x5630ab946340
    file1 Ino: 1099511628291 SnapId: 6 Address: 0x7f5ce000aa90
    dir1 Ino: 1099511628289 SnapId: 6 Address: 0x7f5ce000b180
    dir2 Ino: 1099511628290 SnapId: 6 Address: 0x7f5ce000b800
    file2 Ino: 1099511628292 SnapId: 6 Address: 0x7f5ce000be80

Am I missing something using these APIs?

File attached to this email
Full out of the program attached to the email. - snapshot_inode_lookup.cpp_output.txt <Attached>
C++ Program - snapshot_inode_lookup.cpp  <Attached>
/etc/ceph/ceph.conf - <attached>
Ceph Client Log during the run of this C++ program - client.log<attached>

Compile Command:
g++ -o snapshot_inode_lookup  ./snapshot_inode_lookup.cpp -g -ldl -ldw -lcephfs -lboost_filesystem --std=c++17

Linux Details,
root@ss-joe-01(bash):/home/hydrauser# uname -a
Linux ss-joe-01 5.10.0-23-amd64 #1 SMP Debian 5.10.179-1 (2023-05-12) x86_64 GNU/Linux
root@ss-joe-01(bash):/home/hydrauser#

Ceph Details,

root@ss-joe-01(bash):/home/hydrauser# ceph -v
ceph version 17.2.5 (98318ae89f1a893a6ded3a640405cdbb33e08757) quincy (stable)
root@ss-joe-01(bash):/home/hydrauser#
root@ss-joe-01(bash):/home/hydrauser# ceph -s
  cluster:
    id:     fb43d857-d165-4189-87fc-cf1debce9170
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ss-joe-01,ss-joe-02,ss-joe-03 (age 4d)
    mgr: ss-joe-01(active, since 4d), standbys: ss-joe-03, ss-joe-02
    mds: 1/1 daemons up
    osd: 3 osds: 3 up (since 4d), 3 in (since 4d)

  data:
    volumes: 1/1 healthy
    pools:   3 pools, 49 pgs
    objects: 39 objects, 1.0 MiB
    usage:   96 MiB used, 30 GiB / 30 GiB avail
    pgs:     49 active+clean

root@ss-joe-01(bash):/home/hydrauser#
root@ss-joe-01(bash):/home/hydrauser# dpkg -l | grep ceph
ii  ceph                                               17.2.5-1~bpo11+1               amd64        distributed storage and file system
ii  ceph-base                                          17.2.5-1~bpo11+1               amd64        common ceph daemon libraries and management tools
ii  ceph-base-dbg                                      17.2.5-1~bpo11+1               amd64        debugging symbols for ceph-base
ii  ceph-common                                        17.2.5-1~bpo11+1               amd64        common utilities to mount and interact with a ceph storage cluster
ii  ceph-common-dbg                                    17.2.5-1~bpo11+1               amd64        debugging symbols for ceph-common
ii  ceph-fuse                                          17.2.5-1~bpo11+1               amd64        FUSE-based client for the Ceph distributed file system
ii  ceph-fuse-dbg                                      17.2.5-1~bpo11+1               amd64        debugging symbols for ceph-fuse
ii  ceph-mds                                           17.2.5-1~bpo11+1               amd64        metadata server for the ceph distributed file system
ii  ceph-mds-dbg                                       17.2.5-1~bpo11+1               amd64        debugging symbols for ceph-mds
ii  ceph-mgr                                           17.2.5-1~bpo11+1               amd64        manager for the ceph distributed storage system
ii  ceph-mgr-cephadm                                   17.2.5-1~bpo11+1               all          cephadm orchestrator module for ceph-mgr
ii  ceph-mgr-dashboard                                 17.2.5-1~bpo11+1               all          dashboard module for ceph-mgr
ii  ceph-mgr-dbg                                       17.2.5-1~bpo11+1               amd64        debugging symbols for ceph-mgr
ii  ceph-mgr-diskprediction-local                      17.2.5-1~bpo11+1               all          diskprediction-local module for ceph-mgr
ii  ceph-mgr-k8sevents                                 17.2.5-1~bpo11+1               all          kubernetes events module for ceph-mgr
ii  ceph-mgr-modules-core                              17.2.5-1~bpo11+1               all          ceph manager modules which are always enabled
ii  ceph-mon                                           17.2.5-1~bpo11+1               amd64        monitor server for the ceph storage system
ii  ceph-mon-dbg                                       17.2.5-1~bpo11+1               amd64        debugging symbols for ceph-mon
ii  ceph-osd                                           17.2.5-1~bpo11+1               amd64        OSD server for the ceph storage system
ii  ceph-osd-dbg                                       17.2.5-1~bpo11+1               amd64        debugging symbols for ceph-osd
ii  ceph-volume                                        17.2.5-1~bpo11+1               all          tool to facilidate OSD deployment
ii  cephadm                                            17.2.5-1~bpo11+1               amd64        cephadm utility to bootstrap ceph daemons with systemd and containers
ii  libcephfs2                                         17.2.5-1~bpo11+1               amd64        Ceph distributed file system client library
ii  libcephfs2-dbg                                     17.2.5-1~bpo11+1               amd64        debugging symbols for libcephfs2
ii  libsqlite3-mod-ceph                                17.2.5-1~bpo11+1               amd64        SQLite3 VFS for Ceph
ii  libsqlite3-mod-ceph-dbg                            17.2.5-1~bpo11+1               amd64        debugging symbols for libsqlite3-mod-ceph
ii  python3-ceph-argparse                              17.2.5-1~bpo11+1               all          Python 3 utility libraries for Ceph CLI
ii  python3-ceph-common                                17.2.5-1~bpo11+1               all          Python 3 utility libraries for Ceph
ii  python3-cephfs                                     17.2.5-1~bpo11+1               amd64        Python 3 libraries for the Ceph libcephfs library
ii  python3-cephfs-dbg                                 17.2.5-1~bpo11+1               amd64        Python 3 libraries for the Ceph libcephfs library
root@ss-joe-01(bash):/home/hydrauser#


root@ss-joe-01(bash):/home/hydrauser# ./snapshot_inode_lookup
=================================/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/=====================================

Path/Name        :"/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/"
Inode Address    : 0x7fc1f0009990
Inode Number     : 1099511629282
Snapshot Number  : 18446744073709551614
Inode Number     : 1099511629282
Snapshot Number  : 18446744073709551614
. Ino: 1099511629282 SnapId: 18446744073709551614 Address: 0x7fc1f0009990
.. Ino: 1099511627779 SnapId: 18446744073709551614 Address: 0x7fc1f0009180

=================================1099511629282:6=====================================

Path/Name        :"1099511629282:6"
Inode Address    : 0x7fc1f000a1a0
Inode Number     : 1099511629282
Snapshot Number  : 6
Inode Number     : 1099511629282
Snapshot Number  : 6
. Ino: 1099511629282 SnapId: 6 Address: 0x7fc1f000a1a0
.. Ino: 1099511629282 SnapId: 6 Address: 0x7fc1f000a1a0

=================================/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/.snap/_sofs-4-5_1099511627778/=====================================

Path/Name        :"/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/.snap/_sofs-4-5_1099511627778/"
Inode Address    : 0x7fc1f000a1a0
Inode Number     : 1099511629282
Snapshot Number  : 6
Inode Number     : 1099511629282
Snapshot Number  : 6
. Ino: 1099511629282 SnapId: 6 Address: 0x7fc1f000a1a0
.. Ino: 1099511629282 SnapId: 18446744073709551615 Address: 0x55fda038cbe0
file1 Ino: 1099511628291 SnapId: 6 Address: 0x7fc1f000ab20
dir1 Ino: 1099511628289 SnapId: 6 Address: 0x7fc1f000b210
dir2 Ino: 1099511628290 SnapId: 6 Address: 0x7fc1f000b890
file2 Ino: 1099511628292 SnapId: 6 Address: 0x7fc1f000bf10

=================================1099511629282:6=====================================

Path/Name        :"1099511629282:6"
Inode Address    : 0x7fc1f000a1a0
Inode Number     : 1099511629282
Snapshot Number  : 6
Inode Number     : 1099511629282
Snapshot Number  : 6
. Ino: 1099511629282 SnapId: 6 Address: 0x7fc1f000a1a0
.. Ino: 1099511629282 SnapId: 18446744073709551615 Address: 0x55fda038cbe0
file1 Ino: 1099511628291 SnapId: 6 Address: 0x7fc1f000ab20
dir1 Ino: 1099511628289 SnapId: 6 Address: 0x7fc1f000b210
dir2 Ino: 1099511628290 SnapId: 6 Address: 0x7fc1f000b890
file2 Ino: 1099511628292 SnapId: 6 Address: 0x7fc1f000bf10

root@ss-joe-01(bash):/home/hydrauser#
#define _LARGEFILE_SOURCE
#define _LARGEFILE64_SOURCE
#define _FILE_OFFSET_BITS 64

#include <assert.h>
#include <bits/stdc++.h>
#include <cephfs/libcephfs.h>
#include <dirent.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/types.h>
#include <sys/uio.h>

#include <boost/algorithm/string.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/filesystem.hpp>
#include <boost/thread.hpp>
#include <boost/variant.hpp>
#include <condition_variable>
#include <iostream>
#include <mutex>
#include <ostream>
#include <regex>
#include <shared_mutex>
#include <string>

using namespace std;

#define SNAPSHOT_NUMBER 6

/*=============Types to match ceph vinodeno_t==============*/
typedef struct inodeno_t {
  uint64_t val;
} inodeno_t;

typedef struct _snapid_t {
  uint64_t val;
} snapid_t;

typedef struct vinodeno_t {
  inodeno_t ino;
  snapid_t snapid;
} vinodeno_t;
/*=====================================================*/

unsigned int attrmask2ceph_want() {
  unsigned int want = 0;

  want |= CEPH_STATX_MODE;
  want |= CEPH_STATX_UID;
  want |= CEPH_STATX_GID;
  want |= CEPH_STATX_SIZE;
  want |= CEPH_STATX_NLINK;
  want |= CEPH_STATX_BLOCKS;
  want |= CEPH_STATX_ATIME;
  want |= CEPH_STATX_CTIME;
  want |= CEPH_STATX_MTIME;
  want |= CEPH_STATX_BTIME;
  want |= CEPH_STATX_VERSION;

  return want;
}

int readdirplus_(struct ceph_mount_info *cmount,
                 struct ceph_dir_result *dirpp) {
  int ret = -1;
  bool eof_flag = false;
  int want = CEPH_STATX_ALL_STATS;

  while (!eof_flag) {
    struct ceph_statx stx;
    struct dirent de;
    struct Inode *ceph_inode = NULL;
    ret = ceph_readdirplus_r(cmount, dirpp, &de, &stx, attrmask2ceph_want(), 0,
                             &ceph_inode);
    if (ret < 0) {
      printf("Failed ceph_readdirplus_r() %d\n", ret);
      goto out;
    } else if (ret == 1) {
      cout << de.d_name << " Ino: " << stx.stx_ino << " SnapId: " << stx.stx_dev
           << " Address: " << ceph_inode << endl;
      ceph_ll_put(cmount, ceph_inode);
    } else if (ret == 0) {
      eof_flag = true;
    }
  }
out:
  return ret;
}

int readdir_all(struct ceph_mount_info *cmount, Inode *dir_inode,
                UserPerm *perms) {
  int ret = -1;
  struct ceph_dir_result *dirpp = NULL;
  ret = ceph_ll_opendir(cmount, dir_inode, &dirpp, perms);
  if (ret) {
    printf("Failed ceph_ll_opendir() %d\n", ret);
    goto out;
  }

  ret = readdirplus_(cmount, dirpp);
  if (ret) {
    printf("Failed readdir_() %d\n", ret);
    goto out;
  }
  cout << endl;

  ceph_ll_releasedir(cmount, dirpp);
  ret = 0;
out:
  return ret;
}

void printDetails(boost::filesystem::path &path, Inode *ceph_inode,
                  uint64_t inode_number, uint64_t snapshot_id,
                  struct ceph_statx &statx) {
  cout << endl;
  cout << "Path/Name        :" << path << endl;
  cout << "Inode Address    : " << ceph_inode << endl;
  cout << "Inode Number     : " << inode_number << endl;
  cout << "Snapshot Number  : " << snapshot_id << endl;
  cout << "Inode Number     : " << statx.stx_ino << endl;
  cout << "Snapshot Number  : " << statx.stx_dev << endl;
}

int lookupsnapshot(struct ceph_mount_info *cmount,
                   boost::filesystem::path user_root_dir_path) {
  int ret = -1;
  boost::filesystem::path subvolume_snapshot_path =
      "/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/"
      ".snap/_sofs-4-5_1099511627778/";
  Inode *user_root_dir_snapshot_inode = NULL;
  Inode *user_root_dir_inode = NULL;
  struct ceph_statx statx = {};
  UserPerm *perms = NULL;
  uint64_t snapshot_number = SNAPSHOT_NUMBER;
  vinodeno_t vino = {0, 0};
  perms = ceph_mount_perms(cmount);
  assert(perms);
  uint64_t user_root_dir_inode_number = 0;
  boost::filesystem::path temp_path;
  /**********************************************************
                user_root dir path Lookup and Readir
  ***********************************************************/
  cout << "=================================" << user_root_dir_path.c_str()
       << "=====================================" << endl;
  ret = ceph_ll_walk(cmount, user_root_dir_path.c_str(), &user_root_dir_inode,
                     &statx, CEPH_STATX_ALL_STATS, 0, perms);
  if (ret != 0) {
    printf("Failed to get Subvolume root inode\n");
    goto out;
  }
  printDetails(user_root_dir_path, user_root_dir_inode, statx.stx_ino,
               statx.stx_dev, statx);
  ret = readdir_all(cmount, user_root_dir_inode, perms);
  if (ret) {
    cout << "Failed readdir_all for " << user_root_dir_path << endl;
    goto out;
  }
  // Grab the
  user_root_dir_inode_number = statx.stx_ino;
  ceph_ll_put(cmount, user_root_dir_inode);
  /**********************************************************
                First Snapshot Vinode Lookup and Readir
  ***********************************************************/
  cout << "=================================" << user_root_dir_inode_number
       << ":" << snapshot_number
       << "=====================================" << endl;
  vino.ino.val = user_root_dir_inode_number;
  vino.snapid.val = snapshot_number;
  ret = ceph_ll_lookup_vino(cmount, vino, &user_root_dir_snapshot_inode);
  if (ret != 0) {
    printf("Failed to get Subvolume root inode\n");
    goto out;
  }
  ret = ceph_ll_getattr(cmount, user_root_dir_snapshot_inode, &statx,
                        CEPH_STATX_ALL_STATS, 0, perms);
  if (ret) {
    printf("Failed ceph_ll_getattr :%d\n", ret);
    goto out;
  }
  temp_path = "";
  temp_path.append(std::to_string(user_root_dir_inode_number) + ":" +
                   std::to_string(snapshot_number));
  printDetails(temp_path, user_root_dir_snapshot_inode, vino.ino.val,
               vino.snapid.val, statx);
  ret = readdir_all(cmount, user_root_dir_snapshot_inode, perms);
  if (ret) {
    cout << "Failed readdir_all for " << temp_path << endl;
    goto out;
  }
  ceph_ll_put(cmount, user_root_dir_snapshot_inode);
  /**********************************************************
                Snapshot path Lookup and Readir
  ***********************************************************/
  cout << "=================================" << subvolume_snapshot_path.c_str()
       << "=====================================" << endl;
  ret = ceph_ll_walk(cmount, subvolume_snapshot_path.c_str(),
                     &user_root_dir_snapshot_inode, &statx,
                     CEPH_STATX_ALL_STATS, 0, perms);
  if (ret != 0) {
    printf("Failed to get Subvolume root inode\n");
    goto out;
  }
  // Extract the snapshot from statx
  snapshot_number = statx.stx_dev;
  // To ensure that snapshot numbers match!
  assert(snapshot_number == SNAPSHOT_NUMBER);

  printDetails(subvolume_snapshot_path, user_root_dir_snapshot_inode,
               statx.stx_ino, statx.stx_dev, statx);
  ret = readdir_all(cmount, user_root_dir_snapshot_inode, perms);
  if (ret) {
    cout << "Failed readdir_all for " << subvolume_snapshot_path << endl;
    goto out;
  }
  ceph_ll_put(cmount, user_root_dir_snapshot_inode);
  /**********************************************************
              Second Snapshot Vinode Lookup and Readir
***********************************************************/
  cout << "=================================" << user_root_dir_inode_number
       << ":" << snapshot_number
       << "=====================================" << endl;
  vino.ino.val = user_root_dir_inode_number;
  vino.snapid.val = snapshot_number;
  ret = ceph_ll_lookup_vino(cmount, vino, &user_root_dir_snapshot_inode);
  if (ret != 0) {
    printf("Failed to get Subvolume root inode\n");
    goto out;
  }
  temp_path = "";
  temp_path.append(std::to_string(user_root_dir_inode_number) + ":" +
                   std::to_string(snapshot_number));
  printDetails(temp_path, user_root_dir_snapshot_inode, vino.ino.val,
               vino.snapid.val, statx);
  ret = readdir_all(cmount, user_root_dir_snapshot_inode, perms);
  if (ret) {
    cout << "Failed readdir_all for " << temp_path << endl;
    goto out;
  }
  ceph_ll_put(cmount, user_root_dir_snapshot_inode);
out:
  return ret;
}

int main() {
  int ret = -1;
  struct ceph_mount_info *cmount;
  const char id[] = "admin";
  Inode *user_root_dir_inode = NULL;
  struct ceph_statx statx = {};
  UserPerm *perms = NULL;
  boost::filesystem::path ceph_conf_path = "/etc/ceph/ceph.conf";
  boost::filesystem::path user_root_dir_path =
      "/volumes/_nogroup/4/f0fae76f-196d-4ebd-b8d0-528985505b23/user_root/";

  assert(ceph_conf_path.size() != 0);

  /* allocates ceph_mount_info */
  ret = ceph_create(&cmount, id);
  if (ret != 0) {
    printf("Unable to create Ceph handle for %d", id);
    goto out;
  }

  ret = ceph_conf_read_file(cmount, ceph_conf_path.c_str());
  if (ret != 0) {
    printf("Unable to read Ceph configuration for %s \n", ceph_conf_path);
    goto out;
  }

  ret =
      ceph_conf_set(cmount, "client_mountpoint", (user_root_dir_path.c_str()));
  if (ret) {
    printf("Unable to set Ceph client_mountpoint for %s, %d\n",
           user_root_dir_path.c_str(), " ", ret);
    goto out;
  }

  ret = ceph_conf_parse_env(cmount, NULL);
  if (ret != 0) {
    printf("Unable to ceph_conf_parse_env.\n");
    goto out;
  }

  ret = ceph_mount(cmount, NULL);
  if (ret != 0) {
    printf("Unable to ceph_mount\n");
    goto out;
  }
  perms = ceph_mount_perms(cmount);
  assert(perms);

  ret = ceph_ll_walk(cmount, user_root_dir_path.c_str(), &user_root_dir_inode,
                     &statx, CEPH_STATX_ALL_STATS, 0, perms);
  if (ret != 0) {
    printf("Failed to get Subvolume root inode\n");
    goto out;
  }

  lookupsnapshot(cmount, user_root_dir_path);
  ret = 0;
out:
  if (ret) {
    if (cmount) {
      if (user_root_dir_inode) ceph_ll_put(cmount, user_root_dir_inode);
      ceph_shutdown(cmount);
    }
  }
  return ret;
}

Attachment: ceph.config
Description: Binary data

Attachment: client.log
Description: Binary data

_______________________________________________
Dev mailing list -- dev@xxxxxxx
To unsubscribe send an email to dev-leave@xxxxxxx

[Index of Archives]     [CEPH Users]     [Ceph Devel]     [Ceph Large]     [Information on CEPH]     [Linux BTRFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux