Coverage Report - org.mule.transport.vm.VMMessageReceiver
 
Classes in this File Line Coverage Branch Coverage Complexity
VMMessageReceiver
98%
51/52
83%
15/18
1.786
VMMessageReceiver$VMReceiverWorker
100%
5/5
100%
2/2
1.786
 
 1  
 /*
 2  
  * $Id: VMMessageReceiver.java 12181 2008-06-26 20:05:55Z dirk.olmes $
 3  
  * --------------------------------------------------------------------------------------
 4  
  * Copyright (c) MuleSource, Inc.  All rights reserved.  http://www.mulesource.com
 5  
  *
 6  
  * The software in this package is published under the terms of the CPAL v1.0
 7  
  * license, a copy of which has been included with this distribution in the
 8  
  * LICENSE.txt file.
 9  
  */
 10  
 
 11  
 package org.mule.transport.vm;
 12  
 
 13  
 import org.mule.DefaultMuleMessage;
 14  
 import org.mule.api.MuleException;
 15  
 import org.mule.api.MuleMessage;
 16  
 import org.mule.api.endpoint.InboundEndpoint;
 17  
 import org.mule.api.lifecycle.CreateException;
 18  
 import org.mule.api.service.Service;
 19  
 import org.mule.api.transport.Connector;
 20  
 import org.mule.transport.PollingReceiverWorker;
 21  
 import org.mule.transport.TransactedPollingMessageReceiver;
 22  
 import org.mule.util.queue.Queue;
 23  
 import org.mule.util.queue.QueueSession;
 24  
 
 25  
 import java.util.ArrayList;
 26  
 import java.util.LinkedList;
 27  
 import java.util.List;
 28  
 
 29  
 import edu.emory.mathcs.backport.java.util.concurrent.RejectedExecutionException;
 30  
 
 31  
 /**
 32  
  * <code>VMMessageReceiver</code> is a listener for events from a Mule service which then simply passes
 33  
  * the events on to the target service.
 34  
  */
 35  
 public class VMMessageReceiver extends TransactedPollingMessageReceiver
 36  
 {
 37  
 
 38  
     private VMConnector connector;
 39  348
     private final Object lock = new Object();
 40  
 
 41  
     public VMMessageReceiver(Connector connector, Service service, InboundEndpoint endpoint)
 42  
         throws CreateException
 43  
     {
 44  348
         super(connector, service, endpoint);
 45  348
         this.setReceiveMessagesInTransaction(endpoint.getTransactionConfig().isTransacted());
 46  348
         this.connector = (VMConnector) connector;
 47  348
     }
 48  
 
 49  
     /*
 50  
      * We only need to start scheduling this receiver if event queueing is enabled on the connector; otherwise
 51  
      * events are delivered via onEvent/onCall.
 52  
      */
 53  
     // @Override
 54  
     protected void schedule() throws RejectedExecutionException, NullPointerException, IllegalArgumentException
 55  
     {
 56  344
         if (connector.isQueueEvents())
 57  
         {
 58  70
             super.schedule();
 59  
         }
 60  344
     }
 61  
 
 62  
     protected void doDispose()
 63  
     {
 64  
         // template method
 65  694
     }
 66  
 
 67  
     protected void doConnect() throws Exception
 68  
     {
 69  344
         if (connector.isQueueEvents())
 70  
         {
 71  
             // Ensure we can create a vm queue
 72  70
             QueueSession queueSession = connector.getQueueSession();
 73  70
             Queue q = queueSession.getQueue(endpoint.getEndpointURI().getAddress());
 74  70
             if (logger.isDebugEnabled())
 75  
             {
 76  0
                 logger.debug("Current queue depth for queue: " + endpoint.getEndpointURI().getAddress() + " is: "
 77  
                              + q.size());
 78  
             }
 79  
         }
 80  344
     }
 81  
 
 82  
     protected void doDisconnect() throws Exception
 83  
     {
 84  
         // template method
 85  344
     }
 86  
 
 87  
     public void onMessage(MuleMessage message) throws MuleException
 88  
     {
 89  
         // Rewrite the message to treat it as a new message
 90  4044
         MuleMessage newMessage = new DefaultMuleMessage(message.getPayload(), message);
 91  
 
 92  
         /*
 93  
          * TODO review: onEvent can only be called by the VMMessageDispatcher - why is
 94  
          * this lock here and do we still need it? what can break if this receiver is run
 95  
          * concurrently by multiple dispatchers, which are isolated?
 96  
          */
 97  4044
         synchronized (lock)
 98  
         {
 99  4044
             routeMessage(newMessage);
 100  4044
         }
 101  4044
     }
 102  
 
 103  
     public Object onCall(MuleMessage message, boolean synchronous) throws MuleException
 104  
     {
 105  
         // Rewrite the message to treat it as a new message
 106  4062
         MuleMessage newMessage = new DefaultMuleMessage(message.getPayload(), message);
 107  4062
         return routeMessage(newMessage, synchronous);
 108  
     }
 109  
 
 110  
     /**
 111  
      * It's impossible to process all messages in the receive transaction
 112  
      */
 113  
     protected List getMessages() throws Exception
 114  
     {
 115  39
         if (isReceiveMessagesInTransaction())
 116  
         {
 117  5
             MuleMessage message = getFirstMessage();
 118  5
             if (message == null)
 119  
             {
 120  3
                 return null;
 121  
             }
 122  
             
 123  2
             List messages = new ArrayList(1);
 124  2
             messages.add(message);
 125  2
             return messages;
 126  
         }
 127  
         else
 128  
         {
 129  34
             return getFirstMessages();
 130  
         }
 131  
     }
 132  
     
 133  
     protected List getFirstMessages() throws Exception
 134  
     {
 135  
         // The queue from which to pull events
 136  34
         QueueSession qs = connector.getQueueSession();
 137  34
         Queue queue = qs.getQueue(endpoint.getEndpointURI().getAddress());
 138  
 
 139  
         // The list of retrieved messages that will be returned
 140  34
         List messages = new LinkedList();
 141  
 
 142  
         /*
 143  
          * Determine how many messages to batch in this poll: we need to drain the queue quickly, but not by
 144  
          * slamming the workManager too hard. It is impossible to determine this more precisely without proper
 145  
          * load statistics/feedback or some kind of "event cost estimate". Therefore we just try to use half
 146  
          * of the receiver's workManager, since it is shared with receivers for other endpoints.
 147  
          */
 148  34
         int maxThreads = connector.getReceiverThreadingProfile().getMaxThreadsActive();
 149  
         // also make sure batchSize is always at least 1
 150  34
         int batchSize = Math.max(1, Math.min(queue.size(), ((maxThreads / 2) - 1)));
 151  
 
 152  
         // try to get the first event off the queue
 153  34
         MuleMessage message = (MuleMessage) queue.poll(connector.getQueueTimeout());
 154  
 
 155  16
         if (message != null)
 156  
         {
 157  
             // keep first dequeued event
 158  16
             messages.add(message);
 159  
 
 160  
             // keep batching if more events are available
 161  36
             for (int i = 0; i < batchSize && message != null; i++)
 162  
             {
 163  20
                 message = (MuleMessage) queue.poll(0);
 164  20
                 if (message != null)
 165  
                 {
 166  4
                     messages.add(message);
 167  
                 }
 168  
             }
 169  
         }
 170  
 
 171  
         // let our workManager handle the batch of events
 172  16
         return messages;
 173  
     }
 174  
     
 175  
     protected MuleMessage getFirstMessage() throws Exception
 176  
     {
 177  
         // The queue from which to pull events
 178  4
         QueueSession qs = connector.getQueueSession();
 179  5
         Queue queue = qs.getQueue(endpoint.getEndpointURI().getAddress());
 180  
         // try to get the first event off the queue
 181  5
         return (MuleMessage) queue.poll(connector.getQueueTimeout());
 182  
     }
 183  
 
 184  
     protected void processMessage(Object msg) throws Exception
 185  
     {
 186  
         // getMessages() returns UMOEvents
 187  22
         MuleMessage message = (MuleMessage) msg;
 188  
 
 189  
         // Rewrite the message to treat it as a new message
 190  22
         MuleMessage newMessage = new DefaultMuleMessage(message.getPayload(), message);
 191  22
         routeMessage(newMessage);
 192  22
     }
 193  
 
 194  
     /*
 195  
      * We create our own "polling" worker here since we need to evade the standard scheduler.
 196  
      */
 197  
     // @Override
 198  
     protected PollingReceiverWorker createWork()
 199  
     {
 200  70
         return new VMReceiverWorker(this);
 201  
     }
 202  
     
 203  
     /*
 204  
      * Even though the VM transport is "polling" for messages, the nonexistent cost of accessing the queue is
 205  
      * a good reason to not use the regular scheduling mechanism in order to both minimize latency and
 206  
      * maximize throughput.
 207  
      */
 208  
     protected static class VMReceiverWorker extends PollingReceiverWorker
 209  
     {
 210  
 
 211  
         public VMReceiverWorker(VMMessageReceiver pollingMessageReceiver)
 212  
         {
 213  70
             super(pollingMessageReceiver);
 214  70
         }
 215  
 
 216  
         public void run()
 217  
         {
 218  
             /*
 219  
              * We simply run our own polling loop all the time as long as the receiver is started. The
 220  
              * blocking wait defined by VMConnector.getQueueTimeout() will prevent this worker's receiver
 221  
              * thread from busy-waiting.
 222  
              */
 223  61
             while (this.getReceiver().isConnected())
 224  
             {
 225  39
                 super.run();
 226  
             }
 227  22
         }
 228  
     }
 229  
 
 230  
 }