<div style="line-height:1.7;color:#000000;font-size:14px;font-family:Arial"><div>hi all,<br><br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; I add some codes in glusterfs 3.6.1, this codes is used to print log when a lock be hold for more than 20 seconds. <br><br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Now I use two nodes to replay the problem, 192.168.51.115 and 
192.168.51.116. I create a replica volume with 3 bricks in 51.115, than 
mount the volume in 51.116 and 51.115 at the same time, I run "dd 
if=/dev/zero of=/mnt/ida/data1 bs=128k" in 51.116 and "dd if=/dev/zero 
of=/mnt/ida/data2 bs=128k", several miniutes later, problem arise:<br>&nbsp;&nbsp;&nbsp; &nbsp;&nbsp; <br></div>git diff<br>diff --git a/xlators/features/locks/src/inodelk.c b/xlators/features/locks/src/inodelk.c<br>index ef06531..cc46d21 100644<br>--- a/xlators/features/locks/src/inodelk.c<br>+++ b/xlators/features/locks/src/inodelk.c<br>@@ -192,6 +192,19 @@ __owner_has_lock (pl_dom_list_t *dom, pl_inode_lock_t *newlock)<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return 0;<br><div>&nbsp;}<br><br></div>+void<br>+lock_timeout (void *p)<br>+{<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; pl_inode_lock_t *lock = (pl_inode_lock_t *)p;<br>+<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; gf_log (" ", GF_LOG_INFO,<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; "%s (pid=%d) lk-owner:%s %"PRId64" - %"PRId64" =&gt; 20s timeouted",<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; lock-&gt;fl_type == F_UNLCK ? "Unlock" : "Lock",<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; lock-&gt;client_pid,<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; lkowner_utoa (&amp;lock-&gt;owner),<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; lock-&gt;user_flock.l_start,<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; lock-&gt;user_flock.l_len);<br>+}<br>&nbsp;<br>&nbsp;/* Determines if lock can be granted and adds the lock. If the lock<br>&nbsp; * is blocking, adds it to the blocked_inodelks list of the domain.<br>@@ -202,6 +215,7 @@ __lock_inodelk (xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,<br>&nbsp;{<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; pl_inode_lock_t *conf = NULL;<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int ret = -EINVAL;<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; struct timespec tv = {0, };<br>&nbsp;<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; conf = __inodelk_grantable (dom, lock);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (conf) {<br>@@ -247,6 +261,8 @@ __lock_inodelk (xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; __pl_inodelk_ref (lock);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; gettimeofday (&amp;lock-&gt;granted_time, NULL);<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; tv.tv_sec = 20;<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; lock-&gt;timer = gf_timer_call_after (this-&gt;ctx, tv, lock_timeout, lock);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; list_add (&amp;lock-&gt;list, &amp;dom-&gt;inodelk_list);<br>&nbsp;<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; ret = 0;<br>@@ -298,6 +314,7 @@ __inode_unlock_lock (xlator_t *this, pl_inode_lock_t *lock, pl_dom_list_t *dom)<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; goto out;<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; __delete_inode_lock (conf);<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (conf-&gt;timer) gf_timer_call_cancel (this-&gt;ctx, conf-&gt;timer);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; gf_log (this-&gt;name, GF_LOG_DEBUG,<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; " Matching lock found for unlock %llu-%llu, by %s on %p",<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; (unsigned long long)lock-&gt;fl_start,<br>@@ -460,6 +477,7 @@ pl_inodelk_client_cleanup (xlator_t *this, pl_ctx_t *ctx)<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; list_add_tail (&amp;l-&gt;client_list,<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &amp;unwind);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (l-&gt;timer) gf_timer_call_cancel (this-&gt;ctx, l-&gt;timer);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; pthread_mutex_unlock (&amp;pl_inode-&gt;mutex);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }<br>diff --git a/xlators/features/locks/src/locks.h b/xlators/features/locks/src/locks.h<br>index f761b3d..863f810 100644<br>--- a/xlators/features/locks/src/locks.h<br>+++ b/xlators/features/locks/src/locks.h<br>@@ -22,6 +22,7 @@<br>&nbsp;#include "client_t.h"<br>&nbsp;<br>&nbsp;#include "lkowner.h"<br>+#include "timer.h"<br>&nbsp;<br>&nbsp;struct __pl_fd;<br>&nbsp;<br>@@ -82,6 +83,7 @@ struct __pl_inode_lock {<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; char&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; *connection_id; /* stores the client connection id */<br>&nbsp;<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; struct list_head&nbsp;&nbsp; client_list; /* list of all locks from a client */<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; gf_timer_t&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; *timer;<br>&nbsp;};<br>&nbsp;typedef struct __pl_inode_lock pl_inode_lock_t;<br>&nbsp;<br>diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c<br>index af25a10..5b207e3 100644<br>--- a/xlators/features/locks/src/posix.c<br>+++ b/xlators/features/locks/src/posix.c<br>@@ -1745,6 +1745,7 @@ pl_forget (xlator_t *this,<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; "Pending inode locks found, releasing.");<br>&nbsp;<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; list_for_each_entry_safe (ino_l, ino_tmp, &amp;dom-&gt;inodelk_list, list) {<br>+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (ino_l-&gt;timer) gf_timer_call_cancel (this-&gt;ctx, ino_l-&gt;timer);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; __delete_inode_lock (ino_l);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; __pl_inodelk_unref (ino_l);<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; } <br><div><br>[root@dcs115 d1]# ls -lh <br>total 21G<br>-rw-r--r-- 2 root root&nbsp; 14G Dec 10 14:18 data<br>-rw-r--r-- 2 root root 3.7G Dec 10 14:18 data2<br><br><span style="font-size: 18px;"><b>dmesg:</b></span><br>nfs: server 192.168.100.115 not responding, still trying<br>nfs: server 192.168.100.115 not responding, still trying<br>nfs: server 192.168.100.115 not responding, still trying<br>nfs: server 192.168.100.115 not responding, still trying<br>nfs: server 192.168.100.115 not responding, still trying<br><br><span style="font-size: 18px;"><b>nfs log:</b></span><br>[2014-12-10 06:15:44.109630] I [dht-common.c:1822:dht_lookup_cbk] 0-ida-dht: Entry /data missing on subvol ida-replicate-0<br>[2014-12-10 06:17:35.990049] I [dht-common.c:1822:dht_lookup_cbk] 0-ida-dht: Entry /data2 missing on subvol ida-replicate-0<br>[2014-12-10
 06:19:41.234231] C [rpc-clnt-ping.c:109:rpc_clnt_ping_timer_expired] 
0-ida-client-0: server 192.168.100.115:49152 has not responded in the 
last 42 seconds, disconnecting.<br>[2014-12-10 06:19:41.234347] C 
[rpc-clnt-ping.c:109:rpc_clnt_ping_timer_expired] 0-ida-client-1: server
 192.168.100.115:49153 has not responded in the last 42 seconds, 
disconnecting.<br>[2014-12-10 06:19:42.234466] C 
[rpc-clnt-ping.c:109:rpc_clnt_ping_timer_expired] 0-ida-client-2: server
 192.168.100.115:49154 has not responded in the last 42 seconds, 
disconnecting.<br>~&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; <br><span style="font-size: 18px;"><b>all bricks has the same logs:</b></span><br>[2014-12-10
 06:16:05.196094] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:1c7f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:17:05.204617] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:1c7f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:17:41.207394] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:1c7f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.208998] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=1) 
lk-owner:1c7f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209057] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=1) 
lk-owner:1c7f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209070] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=1) 
lk-owner:1c7f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209080] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=1) 
lk-owner:1c7f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209090] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:887f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209100] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:887f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209111] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:887f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209122] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:887f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209132] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:887f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209143] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:887f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:18:07.209153] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=0) 
lk-owner:887f1b0200000000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:19:03.224007] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=-6) 
lk-owner:80823be0627f0000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:19:07.224577] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=-6) 
lk-owner:a45e3be0627f0000 0 - 0 =&gt; 20s timeouted<br>[2014-12-10 
06:19:41.234401] W [socket.c:1244:__socket_read_simple_msg] 
0-tcp.ida-server: reading from socket failed. Error (No data available),
 peer (192.168.100.115:947)<br>[2014-12-10 06:19:41.234513] I 
[server.c:518:server_rpc_notify] 0-ida-server: disconnecting connection 
from dcs115-3200-2014/12/10-06:14:38:192810-ida-client-0-0-0<br>[2014-12-10
 06:19:41.234560] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server:
 releasing lock on 3c841f93-c637-4119-a7a8-0b39871a0956 held by 
{client=0x209d290, pid=1 lk-owner=1c7f1b0200000000}<br>[2014-12-10 
06:19:41.234593] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on 3c841f93-c637-4119-a7a8-0b39871a0956 held by 
{client=0x209d290, pid=1 lk-owner=1c7f1b0200000000}<br>[2014-12-10 
06:19:41.234619] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on 3c841f93-c637-4119-a7a8-0b39871a0956 held by 
{client=0x209d290, pid=1 lk-owner=1c7f1b0200000000}<br>[2014-12-10 
06:19:41.234645] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on 3c841f93-c637-4119-a7a8-0b39871a0956 held by 
{client=0x209d290, pid=1 lk-owner=1c7f1b0200000000}<br>[2014-12-10 
06:19:41.234670] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on be43df9a-b6dd-4745-aff4-e8162c5e9f6f held by 
{client=0x209d290, pid=0 lk-owner=887f1b0200000000}<br>[2014-12-10 
06:19:41.234697] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on be43df9a-b6dd-4745-aff4-e8162c5e9f6f held by 
{client=0x209d290, pid=0 lk-owner=887f1b0200000000}<br>[2014-12-10 
06:19:41.234722] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on be43df9a-b6dd-4745-aff4-e8162c5e9f6f held by 
{client=0x209d290, pid=0 lk-owner=887f1b0200000000}<br>[2014-12-10 
06:19:41.234747] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on be43df9a-b6dd-4745-aff4-e8162c5e9f6f held by 
{client=0x209d290, pid=0 lk-owner=887f1b0200000000}<br>[2014-12-10 
06:19:41.234779] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on be43df9a-b6dd-4745-aff4-e8162c5e9f6f held by 
{client=0x209d290, pid=0 lk-owner=887f1b0200000000}<br>[2014-12-10 
06:19:41.234823] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on be43df9a-b6dd-4745-aff4-e8162c5e9f6f held by 
{client=0x209d290, pid=0 lk-owner=887f1b0200000000}<br>[2014-12-10 
06:19:41.234845] W [inodelk.c:409:pl_inodelk_log_cleanup] 0-ida-server: 
releasing lock on be43df9a-b6dd-4745-aff4-e8162c5e9f6f held by 
{client=0x209d290, pid=0 lk-owner=887f1b0200000000}<br>[2014-12-10 
06:19:41.235008] I [client_t.c:417:gf_client_unref] 0-ida-server: 
Shutting down connection 
dcs115-3200-2014/12/10-06:14:38:192810-ida-client-0-0-0<br>[2014-12-10 
06:20:02.270229] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=-6) 
lk-owner:80823be0627f0000 9223372036854775805 - 1 =&gt; 20s timeouted<br>[2014-12-10
 06:20:02.270267] I [inodelk.c:206:lock_timeout] 0- : Lock (pid=-6) 
lk-owner:a45e3be0627f0000 9223372036854775805 - 1 =&gt; 20s timeouted<br><br></div><br><div></div><div id="divNeteaseMailCard"></div><br>ÔÚ 2014-12-09 17:43:41£¬"Raghavendra Bhat" &lt;rabhat@redhat.com&gt; Ð´µÀ£º<br> <blockquote id="isReplyContent" style="PADDING-LEFT: 1ex; MARGIN: 0px 0px 0px 0.8ex; BORDER-LEFT: #ccc 1px solid">
  
    
  
  
    <div class="moz-cite-prefix">On Tuesday 09 December 2014 02:47 PM,
      shuau li wrote:<br>
    </div>
    <blockquote cite="mid:38f9fa5b.f5b5.14a2e57115f.Coremail.lishuai_ujs@126.com" type="cite">
      <div style="line-height:1.7;color:#000000;font-size:14px;font-family:Arial">
        <div>hi all,<br>
          <br>
          &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; In my environment, I meet a nfs problem.When I do dd
          write for a while, nfs client will report "nfs server no
          responding".I think this maybe a bug of nfs server, Is anybody
          meet similar problem? <br>
          <br>
          The details of my environment and test&nbsp; are as follow:<br>
          <br>
          glusterfs version:<br>
          &nbsp;&nbsp;&nbsp; 3.6.1<br>
          3 nodes:<br>
          &nbsp; cpu with 8 core 3.20 GHz, 16 GB memory, 10000baseT/Full
          network card<br>
          &nbsp;<br>
          I use two nodes create a replica volume with two bricks,
          another nodes act as client, client use protocol nfs3 to mount
          volume through the 10000baseT/Full network card, then use
          command "dd if=/dev/zero of=/mnt/ida/testdata bs=128k" to
          write data. <br>
          <br>
          <span id="result_box" class="short_text" lang="en"><span class="hps">Generally, </span></span><span id="result_box" class="short_text" lang="en"><span class="hps">only</span> <span class="hps">data</span> <span class="hps">about</span> <span class="hps">10G</span></span>
          can be write successfully. Then client will say "nfs server ip
          no responding".At the same time, I use gdb to trace nfs
          process in nfs server, the result is that nfs server keeps
          staying in epoll_wait, why nfs server&nbsp; can not listen requests
          from nfs client ?<br>
          <br>
          <span id="result_box" class="short_text" lang="en"><span class="hps">In addition, I noticed the difference between
              fuse and nfs. When access through fuse, two threads will
              work together, one </span></span><span id="result_box" class="short_text" lang="en"><span class="alt-edited">responsible</span></span>
          for fuse-bridge, another <span id="result_box" class="short_text" lang="en"><span class="alt-edited">responsible</span></span>
          for listening socket. But in nfs, only one thread work in
          background, both responsible for nfs server and listening
          socket. Will the cause process <span id="result_box" class="short_text" lang="en"><span class="hps">bottleneck</span></span>?<br>
          <br>
          <div id="gt-src-tools">
            <div id="gt-src-tools-l">
              <div style="display: inline-block;" id="gt-input-tool">
                <div id="itamenu"><span class="ita-kd-inputtools-div"></span></div>
              </div>
            </div>
          </div>
          <div id="gt-res-content" class="almost_half_cell">
            <div dir="ltr" style="zoom:1"><span id="result_box" class="short_text" lang="en"><span class="hps">Best
                  wishes£¡£¡</span></span></div>
          </div>
          <br>
          <span id="result_box" class="short_text" lang="en"><span class="hps">Looking forward to your</span> <span class="hps">reply£¡</span></span><br>
          &nbsp;<br>
          &nbsp;&nbsp;&nbsp; <br>
        </div>
      </div>
      <br>
    </blockquote>
    <br>
    Hi,<br>
    <br>
    Can you please attach the nfs server and brick log files? You can
    find the log files in /var/log/glusterfs.<br>
    <br>
    Regards,<br>
    Raghavendra Bhat<br>
    <blockquote cite="mid:38f9fa5b.f5b5.14a2e57115f.Coremail.lishuai_ujs@126.com" type="cite"><br>
      <span title="neteasefooter"><span id="netease_mail_footer"></span></span><br>
      <fieldset class="mimeAttachmentHeader"></fieldset>
      <br>
      <pre wrap="">_______________________________________________
Gluster-users mailing list
<a class="moz-txt-link-abbreviated" href="mailto:Gluster-users@gluster.org">Gluster-users@gluster.org</a>
<a class="moz-txt-link-freetext" href="http://supercolony.gluster.org/mailman/listinfo/gluster-users">http://supercolony.gluster.org/mailman/listinfo/gluster-users</a></pre>
    </blockquote>
    <br>
  

</blockquote></div><br><br><span title="neteasefooter"><span id="netease_mail_footer"></span></span>