<html>
  <head>
    <meta content="text/html; charset=ISO-8859-1"
      http-equiv="Content-Type">
  </head>
  <body bgcolor="#FFFFFF" text="#000000">
    Hi,<br>
    <br>
    Thank you so much.<br>
    After this all sounds good, but I am not sure because df is
    different on nodes.<br>
    <br>
    root@srvhttp0 results]# df<br>
    Filesystem&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1K-blocks&nbsp;&nbsp;&nbsp; Used Available Use% Mounted on<br>
    /dev/mapper/fedora-root&nbsp;&nbsp; 2587248 2128160&nbsp;&nbsp;&nbsp; 307948&nbsp; 88% /<br>
    devtmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 493056&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 0&nbsp;&nbsp;&nbsp; 493056&nbsp;&nbsp; 0% /dev<br>
    tmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 506240&nbsp;&nbsp; 50648&nbsp;&nbsp;&nbsp; 455592&nbsp; 11% /dev/shm<br>
    tmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 506240&nbsp;&nbsp;&nbsp;&nbsp; 236&nbsp;&nbsp;&nbsp; 506004&nbsp;&nbsp; 1% /run<br>
    tmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 506240&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 0&nbsp;&nbsp;&nbsp; 506240&nbsp;&nbsp; 0%
    /sys/fs/cgroup<br>
    tmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 506240&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 12&nbsp;&nbsp;&nbsp; 506228&nbsp;&nbsp; 1% /tmp<br>
    /dev/xvda1&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 487652&nbsp; 106846&nbsp;&nbsp;&nbsp; 351110&nbsp; 24% /boot<br>
    <font color="#ff0000">/dev/xvdb1&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 2085888&nbsp; 551292&nbsp;&nbsp;
      1534596&nbsp; 27% /gv</font><br>
    localhost:/gv_html&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 2085888&nbsp; 587776&nbsp;&nbsp; 1498112&nbsp; 29%
    /var/www/html<br>
    [root@srvhttp0 results]# cd /gv<br>
    [root@srvhttp0 gv]# ls -la<br>
    total 8<br>
    drwxr-xr-x&nbsp;&nbsp; 3 root root&nbsp;&nbsp; 17 Jan 28 14:43 .<br>
    dr-xr-xr-x. 19 root root 4096 Jan 26 10:10 ..<br>
    drwxr-xr-x&nbsp;&nbsp; 4 root root&nbsp;&nbsp; 37 Jan 28 14:43 html<br>
    [root@srvhttp0 gv]# <br>
    <br>
    <br>
    [root@srvhttp1 html]# df<br>
    Filesystem&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1K-blocks&nbsp;&nbsp;&nbsp; Used Available Use% Mounted on<br>
    /dev/mapper/fedora-root&nbsp;&nbsp; 2587248 2355180&nbsp;&nbsp;&nbsp;&nbsp; 80928&nbsp; 97% /<br>
    devtmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 126416&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 0&nbsp;&nbsp;&nbsp; 126416&nbsp;&nbsp; 0% /dev<br>
    tmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 139600&nbsp;&nbsp; 35252&nbsp;&nbsp;&nbsp; 104348&nbsp; 26% /dev/shm<br>
    tmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 139600&nbsp;&nbsp;&nbsp;&nbsp; 208&nbsp;&nbsp;&nbsp; 139392&nbsp;&nbsp; 1% /run<br>
    tmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 139600&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 0&nbsp;&nbsp;&nbsp; 139600&nbsp;&nbsp; 0%
    /sys/fs/cgroup<br>
    tmpfs&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 139600&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 8&nbsp;&nbsp;&nbsp; 139592&nbsp;&nbsp; 1% /tmp<br>
    /dev/xvda1&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 487652&nbsp; 106846&nbsp;&nbsp;&nbsp; 351110&nbsp; 24% /boot<br>
    <font color="#ff0000">/dev/xvdb1&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 2085888&nbsp; 587752&nbsp;&nbsp;
      1498136&nbsp; 29% /gv</font><br>
    localhost:/gv_html&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 2085888&nbsp; 587776&nbsp;&nbsp; 1498112&nbsp; 29%
    /var/www/html<br>
    [root@srvhttp1 html]# <br>
    [root@srvhttp1 html]# cd /gv<br>
    [root@srvhttp1 gv]# ll -a<br>
    total 12<br>
    drwxr-xr-x&nbsp;&nbsp; 3 root root&nbsp;&nbsp; 17 Jan 28 14:42 .<br>
    dr-xr-xr-x. 19 root root 4096 Out 18 11:16 ..<br>
    drwxr-xr-x&nbsp;&nbsp; 4 root root&nbsp;&nbsp; 37 Jan 28 14:42 html<br>
    [root@srvhttp1 gv]# <br>
    <br>
    <div class="moz-cite-prefix">Em 28-01-2014 12:01, Franco Broi
      escreveu:<br>
    </div>
    <blockquote
cite="mid:12CA6E0F1387FA4BA882F4D32604D944146AF572@AUS1EXMBX04.ioinc.ioroot.tld"
      type="cite">
      <meta http-equiv="Content-Type" content="text/html;
        charset=ISO-8859-1">
      <style>
<!--
.EmailQuote
        {margin-left:1pt;
        padding-left:4pt;
        border-left:#800000 2px solid}
-->
</style>
      <div>
        <p dir="ltr">Every peer has a copy of the files but I'm not sure
          it's 100% safe to remove them entirely. I've never really got
          a definitive answer from the Gluster devs but if your files
          were trashed anyway you don't have anything to lose.</p>
        <p dir="ltr">This is what I did.</p>
        <p dir="ltr">On the bad node stop glusterd</p>
        <p dir="ltr">Make a copy of the /var/lib/glusterd dir, then
          remove it.</p>
        <p dir="ltr">Start glusterd</p>
        <p dir="ltr">peer probe the good node.</p>
        <p dir="ltr">Restart glusterd</p>
        <p dir="ltr">And that should be it. Check the files are there. </p>
        <p dir="ltr">If it doesn't work you can restore the files from
          the backup copy.<br>
        </p>
        <div class="x_quote">On 28 Jan 2014 21:48, Jefferson Carlos
          Machado <a class="moz-txt-link-rfc2396E" href="mailto:lista.linux@results.com.br">&lt;lista.linux@results.com.br&gt;</a> wrote:<br
            type="attribution">
        </div>
      </div>
      <font size="2"><span style="font-size:10pt">
          <div class="PlainText">Hi,<br>
            <br>
            I have only 2 nodes in this cluster.<br>
            So can I remove the config files?<br>
            <br>
            Regards,<br>
            Em 28-01-2014 04:17, Franco Broi escreveu:<br>
            &gt; I think Jefferson's problem might have been due to
            corrupted config<br>
            &gt; files, maybe because the /var partition was full as
            suggested by Paul<br>
            &gt; Boven but as has been pointed out before, the error
            messages don't make<br>
            &gt; it obvious what's wrong.<br>
            &gt;<br>
            &gt; He got glusterd started but now the peers can't
            communicate, probably<br>
            &gt; because a uuid is wrong. This is an weird problem to
            debug because the<br>
            &gt; clients can see the data but df may not show the full
            size and you<br>
            &gt; wouldn't now anything was wrong until like Jefferson
            you looked in the<br>
            &gt; gluster log file.<br>
            &gt;<br>
            &gt; [2014-01-27 15:48:19.580353] E
            [socket.c:2788:socket_connect] 0-management: connection
            attempt failed (Connection refused)<br>
            &gt; [2014-01-27 15:48:19.583374] I
            [glusterd-utils.c:1079:glusterd_volume_brickinfo_get]
            0-management: Found brick<br>
            &gt; [2014-01-27 15:48:22.584029] E
            [socket.c:2788:socket_connect] 0-management: connection
            attempt failed (Connection refused)<br>
            &gt; [2014-01-27 15:48:22.607477] I
            [glusterd-utils.c:1079:glusterd_volume_brickinfo_get]
            0-management: Found brick<br>
            &gt; [2014-01-27 15:48:25.608186] E
            [socket.c:2788:socket_connect] 0-management: connection
            attempt failed (Connection refused)<br>
            &gt; [2014-01-27 15:48:25.612032] I
            [glusterd-utils.c:1079:glusterd_volume_brickinfo_get]
            0-management: Found brick<br>
            &gt; [2014-01-27 15:48:28.612638] E
            [socket.c:2788:socket_connect] 0-management: connection
            attempt failed (Connection refused)<br>
            &gt; [2014-01-27 15:48:28.615509] I
            [glusterd-utils.c:1079:glusterd_volume_brickinfo_get]
            0-management: Found brick<br>
            &gt;<br>
            &gt; I think the advice should be, if you have a working
            peer, use a peer<br>
            &gt; probe and glusterd restart to restore the files but in
            order for this to<br>
            &gt; work, you have to remove all the config files first so
            that glutserd<br>
            &gt; will start in the first place.<br>
            &gt;<br>
            &gt;<br>
            &gt; On Tue, 2014-01-28 at 08:32 +0530, shwetha wrote:<br>
            &gt;&gt; Hi Jefferson,<br>
            &gt;&gt;<br>
            &gt;&gt; glusterd don't start because it's not able to find
            the brick path for<br>
            &gt;&gt; the volume Or the brick path doesn't exist any
            more.<br>
            &gt;&gt;<br>
            &gt;&gt; Please refer to the bug<br>
            &gt;&gt; <a moz-do-not-send="true"
              href="https://bugzilla.redhat.com/show_bug.cgi?id=1036551">https://bugzilla.redhat.com/show_bug.cgi?id=1036551</a><br>
            &gt;&gt;<br>
            &gt;&gt; Check if the brick path is available .<br>
            &gt;&gt;<br>
            &gt;&gt; -Shwetha<br>
            &gt;&gt;<br>
            &gt;&gt; On 01/27/2014 05:23 PM, Jefferson Carlos Machado
            wrote:<br>
            &gt;&gt;<br>
            &gt;&gt;&gt; Hi,<br>
            &gt;&gt;&gt;<br>
            &gt;&gt;&gt; Please, help me!!<br>
            &gt;&gt;&gt;<br>
            &gt;&gt;&gt; After reboot my system the service glusterd
            dont start.<br>
            &gt;&gt;&gt;<br>
            &gt;&gt;&gt; the
            /var/log/glusterfs/etc-glusterfs-glusterd.vol.log<br>
            &gt;&gt;&gt;<br>
            &gt;&gt;&gt; [2014-01-27 09:27:02.898807] I
            [glusterfsd.c:1910:main]<br>
            &gt;&gt;&gt; 0-/usr/sbin/glusterd: Started running
            /usr/sbin/glusterd version<br>
            &gt;&gt;&gt; 3.4.2 (/usr/sbin/glusterd -p /run/glusterd.pid)<br>
            &gt;&gt;&gt; [2014-01-27 09:27:02.909147] I
            [glusterd.c:961:init] 0-management:<br>
            &gt;&gt;&gt; Using /var/lib/glusterd as working directory<br>
            &gt;&gt;&gt; [2014-01-27 09:27:02.913247] I
            [socket.c:3480:socket_init]<br>
            &gt;&gt;&gt; 0-socket.management: SSL support is NOT enabled<br>
            &gt;&gt;&gt; [2014-01-27 09:27:02.913273] I
            [socket.c:3495:socket_init]<br>
            &gt;&gt;&gt; 0-socket.management: using system polling
            thread<br>
            &gt;&gt;&gt; [2014-01-27 09:27:02.914337] W
            [rdma.c:4197:__gf_rdma_ctx_create]<br>
            &gt;&gt;&gt; 0-rpc-transport/rdma: rdma_cm event channel
            creation failed (No such<br>
            &gt;&gt;&gt; device)<br>
            &gt;&gt;&gt; [2014-01-27 09:27:02.914359] E
            [rdma.c:4485:init] 0-rdma.management:<br>
            &gt;&gt;&gt; Failed to initialize IB Device<br>
            &gt;&gt;&gt; [2014-01-27 09:27:02.914375] E<br>
            &gt;&gt;&gt; [rpc-transport.c:320:rpc_transport_load]
            0-rpc-transport: 'rdma'<br>
            &gt;&gt;&gt; initialization failed<br>
            &gt;&gt;&gt; [2014-01-27 09:27:02.914535] W<br>
            &gt;&gt;&gt; [rpcsvc.c:1389:rpcsvc_transport_create]
            0-rpc-service: cannot create<br>
            &gt;&gt;&gt; listener, initing the transport failed<br>
            &gt;&gt;&gt; [2014-01-27 09:27:05.337557] I<br>
            &gt;&gt;&gt;
            [glusterd-store.c:1339:glusterd_restore_op_version]
            0-glusterd:<br>
            &gt;&gt;&gt; retrieved op-version: 2<br>
            &gt;&gt;&gt; [2014-01-27 09:27:05.373853] E<br>
            &gt;&gt;&gt;
            [glusterd-store.c:1858:glusterd_store_retrieve_volume] 0-:
            Unknown<br>
            &gt;&gt;&gt; key: brick-0<br>
            &gt;&gt;&gt; [2014-01-27 09:27:05.373927] E<br>
            &gt;&gt;&gt;
            [glusterd-store.c:1858:glusterd_store_retrieve_volume] 0-:
            Unknown<br>
            &gt;&gt;&gt; key: brick-1<br>
            &gt;&gt;&gt; [2014-01-27 09:27:06.166721] I
            [glusterd.c:125:glusterd_uuid_init]<br>
            &gt;&gt;&gt; 0-management: retrieved UUID:
            28f232e9-564f-4866-8014-32bb020766f2<br>
            &gt;&gt;&gt; [2014-01-27 09:27:06.169422] E<br>
            &gt;&gt;&gt;
            [glusterd-store.c:2487:glusterd_resolve_all_bricks]
            0-glusterd:<br>
            &gt;&gt;&gt; resolve brick failed in restore<br>
            &gt;&gt;&gt; [2014-01-27 09:27:06.169491] E
            [xlator.c:390:xlator_init]<br>
            &gt;&gt;&gt; 0-management: Initialization of volume
            'management' failed, review<br>
            &gt;&gt;&gt; your volfile again<br>
            &gt;&gt;&gt; [2014-01-27 09:27:06.169516] E
            [graph.c:292:glusterfs_graph_init]<br>
            &gt;&gt;&gt; 0-management: initializing translator failed<br>
            &gt;&gt;&gt; [2014-01-27 09:27:06.169532] E<br>
            &gt;&gt;&gt; [graph.c:479:glusterfs_graph_activate] 0-graph:
            init failed<br>
            &gt;&gt;&gt; [2014-01-27 09:27:06.169769] W
            [glusterfsd.c:1002:cleanup_and_exit]<br>
            &gt;&gt;&gt; (--&gt;/usr/sbin/glusterd(main+0x3df)
            [0x7f23c76588ef]<br>
            &gt;&gt;&gt;
            (--&gt;/usr/sbin/glusterd(glusterfs_volumes_init+0xb0)
            [0x7f23c765b6e0]<br>
            &gt;&gt;&gt;
            (--&gt;/usr/sbin/glusterd(glusterfs_process_volfp+0x103)<br>
            &gt;&gt;&gt; [0x7f23c765b5f3]))) 0-: received signum (0),
            shutting down<br>
            &gt;&gt;&gt;<br>
            &gt;&gt;&gt; _______________________________________________<br>
            &gt;&gt;&gt; Gluster-users mailing list<br>
            &gt;&gt;&gt; <a class="moz-txt-link-abbreviated" href="mailto:Gluster-users@gluster.org">Gluster-users@gluster.org</a><br>
            &gt;&gt;&gt; <a moz-do-not-send="true"
              href="http://supercolony.gluster.org/mailman/listinfo/gluster-users">http://supercolony.gluster.org/mailman/listinfo/gluster-users</a><br>
            &gt;&gt;<br>
            &gt;&gt; _______________________________________________<br>
            &gt;&gt; Gluster-users mailing list<br>
            &gt;&gt; <a class="moz-txt-link-abbreviated" href="mailto:Gluster-users@gluster.org">Gluster-users@gluster.org</a><br>
            &gt;&gt; <a moz-do-not-send="true"
              href="http://supercolony.gluster.org/mailman/listinfo/gluster-users">http://supercolony.gluster.org/mailman/listinfo/gluster-users</a><br>
            &gt;<br>
            <br>
          </div>
        </span></font><br>
      <hr>
      <font color="Gray" face="Arial" size="1"><br>
        <br>
        This email and any files transmitted with it are confidential
        and are intended solely for the use of the individual or entity
        to whom they are addressed. If you are not the original
        recipient or the person responsible for delivering the email to
        the intended recipient, be advised that you have received this
        email in error, and that any use, dissemination, forwarding,
        printing, or copying of this email is strictly prohibited. If
        you received this email in error, please immediately notify the
        sender and delete the original.<br>
        <br>
      </font>
    </blockquote>
    <br>
  </body>
</html>