gnunet-svn
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[GNUnet-SVN] r10641 - gnunet/src/fs


From: gnunet
Subject: [GNUnet-SVN] r10641 - gnunet/src/fs
Date: Thu, 18 Mar 2010 10:57:08 +0100

Author: grothoff
Date: 2010-03-18 10:57:08 +0100 (Thu, 18 Mar 2010)
New Revision: 10641

Modified:
   gnunet/src/fs/gnunet-service-fs.c
Log:
fix

Modified: gnunet/src/fs/gnunet-service-fs.c
===================================================================
--- gnunet/src/fs/gnunet-service-fs.c   2010-03-18 09:53:54 UTC (rev 10640)
+++ gnunet/src/fs/gnunet-service-fs.c   2010-03-18 09:57:08 UTC (rev 10641)
@@ -1178,10 +1178,11 @@
     {
       GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
                  "Transmission of request failed, will try again later.\n");
-      pr->task = GNUNET_SCHEDULER_add_delayed (sched,
-                                              get_processing_delay (),
-                                              &forward_request_task,
-                                              pr); 
+      if (pr->task == GNUNET_SCHEDULER_NO_TASK)
+       pr->task = GNUNET_SCHEDULER_add_delayed (sched,
+                                                get_processing_delay (),
+                                                &forward_request_task,
+                                                pr); 
       return;    
     }
   GNUNET_STATISTICS_update (stats,
@@ -1194,10 +1195,11 @@
                       pr->used_pids_size,
                       pr->used_pids_size * 2 + 2);
   pr->used_pids[pr->used_pids_off++] = tpid;
-  pr->task = GNUNET_SCHEDULER_add_delayed (sched,
-                                          get_processing_delay (),
-                                          &forward_request_task,
-                                          pr);
+  if (pr->task == GNUNET_SCHEDULER_NO_TASK)
+    pr->task = GNUNET_SCHEDULER_add_delayed (sched,
+                                            get_processing_delay (),
+                                            &forward_request_task,
+                                            pr);
 }
 
 
@@ -1312,10 +1314,11 @@
       GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
                  "Selected peer disconnected!\n");
 #endif
-      pr->task = GNUNET_SCHEDULER_add_delayed (sched,
-                                              get_processing_delay (),
-                                              &forward_request_task,
-                                              pr);
+      if (pr->task == GNUNET_SCHEDULER_NO_TASK)
+       pr->task = GNUNET_SCHEDULER_add_delayed (sched,
+                                                get_processing_delay (),
+                                                &forward_request_task,
+                                                pr);
       return;
     }
   no_route = GNUNET_NO;
@@ -1338,10 +1341,11 @@
                                    gettext_noop ("# reply bandwidth 
reservation requests failed"),
                                    1,
                                    GNUNET_NO);
-         pr->task = GNUNET_SCHEDULER_add_delayed (sched,
-                                                  get_processing_delay (),
-                                                  &forward_request_task,
-                                                  pr);
+         if (pr->task == GNUNET_SCHEDULER_NO_TASK)
+           pr->task = GNUNET_SCHEDULER_add_delayed (sched,
+                                                    get_processing_delay (),
+                                                    &forward_request_task,
+                                                    pr);
          return;  /* this target round failed */
        }
       /* FIXME: if we are "quite" busy, we may still want to skip
@@ -1497,7 +1501,8 @@
   struct ConnectedPeer *cp; 
 
   pr->task = GNUNET_SCHEDULER_NO_TASK;
-  GNUNET_assert (pr->irc == NULL);
+  if (pr->irc != NULL)
+    return; /* already pending */
   /* (1) select target */
   psc.pr = pr;
   psc.target_score = DBL_MIN;





reply via email to

[Prev in Thread] Current Thread [Next in Thread]