FileDocCategorySizeDatePackage
RequestQueue.javaAPI DocAndroid 5.1 API10470Thu Mar 12 22:22:56 GMT 2015com.android.volley

RequestQueue

public class RequestQueue extends Object
A request dispatch queue with a thread pool of dispatchers. Calling {@link #add(Request)} will enqueue the given Request for dispatch, resolving from either cache or network on a worker thread, and then delivering a parsed response on the main thread.

Fields Summary
private AtomicInteger
mSequenceGenerator
Used for generating monotonically-increasing sequence numbers for requests.
private final Map
mWaitingRequests
Staging area for requests that already have a duplicate request in flight.
  • containsKey(cacheKey) indicates that there is a request in flight for the given cache key.
  • get(cacheKey) returns waiting requests for the given cache key. The in flight request is not contained in that list. Is null if no requests are staged.
private final Set
mCurrentRequests
The set of all requests currently being processed by this RequestQueue. A Request will be in this set if it is waiting in any queue or currently being processed by any dispatcher.
private final PriorityBlockingQueue
mCacheQueue
The cache triage queue.
private final PriorityBlockingQueue
mNetworkQueue
The queue of requests that are actually going out to the network.
private static final int
DEFAULT_NETWORK_THREAD_POOL_SIZE
Number of network request dispatcher threads to start.
private final Cache
mCache
Cache interface for retrieving and storing responses.
private final Network
mNetwork
Network interface for performing requests.
private final ResponseDelivery
mDelivery
Response delivery mechanism.
private NetworkDispatcher[]
mDispatchers
The network dispatchers.
private CacheDispatcher
mCacheDispatcher
The cache dispatcher.
Constructors Summary
public RequestQueue(Cache cache, Network network, int threadPoolSize, ResponseDelivery delivery)
Creates the worker pool. Processing will not begin until {@link #start()} is called.

param
cache A Cache to use for persisting responses to disk
param
network A Network interface for performing HTTP requests
param
threadPoolSize Number of network dispatcher threads to create
param
delivery A ResponseDelivery interface for posting responses and errors


                                                             
          
              
        mCache = cache;
        mNetwork = network;
        mDispatchers = new NetworkDispatcher[threadPoolSize];
        mDelivery = delivery;
    
public RequestQueue(Cache cache, Network network, int threadPoolSize)
Creates the worker pool. Processing will not begin until {@link #start()} is called.

param
cache A Cache to use for persisting responses to disk
param
network A Network interface for performing HTTP requests
param
threadPoolSize Number of network dispatcher threads to create

        this(cache, network, threadPoolSize,
                new ExecutorDelivery(new Handler(Looper.getMainLooper())));
    
public RequestQueue(Cache cache, Network network)
Creates the worker pool. Processing will not begin until {@link #start()} is called.

param
cache A Cache to use for persisting responses to disk
param
network A Network interface for performing HTTP requests

        this(cache, network, DEFAULT_NETWORK_THREAD_POOL_SIZE);
    
Methods Summary
public Requestadd(Request request)
Adds a Request to the dispatch queue.

param
request The request to service
return
The passed-in request

        // Tag the request as belonging to this queue and add it to the set of current requests.
        request.setRequestQueue(this);
        synchronized (mCurrentRequests) {
            mCurrentRequests.add(request);
        }

        // Process requests in the order they are added.
        request.setSequence(getSequenceNumber());
        request.addMarker("add-to-queue");

        // If the request is uncacheable, skip the cache queue and go straight to the network.
        if (!request.shouldCache()) {
            mNetworkQueue.add(request);
            return request;
        }

        // Insert request into stage if there's already a request with the same cache key in flight.
        synchronized (mWaitingRequests) {
            String cacheKey = request.getCacheKey();
            if (mWaitingRequests.containsKey(cacheKey)) {
                // There is already a request in flight. Queue up.
                Queue<Request<?>> stagedRequests = mWaitingRequests.get(cacheKey);
                if (stagedRequests == null) {
                    stagedRequests = new LinkedList<Request<?>>();
                }
                stagedRequests.add(request);
                mWaitingRequests.put(cacheKey, stagedRequests);
                if (VolleyLog.DEBUG) {
                    VolleyLog.v("Request for cacheKey=%s is in flight, putting on hold.", cacheKey);
                }
            } else {
                // Insert 'null' queue for this cacheKey, indicating there is now a request in
                // flight.
                mWaitingRequests.put(cacheKey, null);
                mCacheQueue.add(request);
            }
            return request;
        }
    
public voidcancelAll(com.android.volley.RequestQueue$RequestFilter filter)
Cancels all requests in this queue for which the given filter applies.

param
filter The filtering function to use

        synchronized (mCurrentRequests) {
            for (Request<?> request : mCurrentRequests) {
                if (filter.apply(request)) {
                    request.cancel();
                }
            }
        }
    
public voidcancelAll(java.lang.Object tag)
Cancels all requests in this queue with the given tag. Tag must be non-null and equality is by identity.

        if (tag == null) {
            throw new IllegalArgumentException("Cannot cancelAll with a null tag");
        }
        cancelAll(new RequestFilter() {
            @Override
            public boolean apply(Request<?> request) {
                return request.getTag() == tag;
            }
        });
    
voidfinish(Request request)
Called from {@link Request#finish(String)}, indicating that processing of the given request has finished.

Releases waiting requests for request.getCacheKey() if request.shouldCache().

        // Remove from the set of requests currently being processed.
        synchronized (mCurrentRequests) {
            mCurrentRequests.remove(request);
        }

        if (request.shouldCache()) {
            synchronized (mWaitingRequests) {
                String cacheKey = request.getCacheKey();
                Queue<Request<?>> waitingRequests = mWaitingRequests.remove(cacheKey);
                if (waitingRequests != null) {
                    if (VolleyLog.DEBUG) {
                        VolleyLog.v("Releasing %d waiting requests for cacheKey=%s.",
                                waitingRequests.size(), cacheKey);
                    }
                    // Process all queued up requests. They won't be considered as in flight, but
                    // that's not a problem as the cache has been primed by 'request'.
                    mCacheQueue.addAll(waitingRequests);
                }
            }
        }
    
public CachegetCache()
Gets the {@link Cache} instance being used.

        return mCache;
    
public intgetSequenceNumber()
Gets a sequence number.

        return mSequenceGenerator.incrementAndGet();
    
public voidstart()
Starts the dispatchers in this queue.

        stop();  // Make sure any currently running dispatchers are stopped.
        // Create the cache dispatcher and start it.
        mCacheDispatcher = new CacheDispatcher(mCacheQueue, mNetworkQueue, mCache, mDelivery);
        mCacheDispatcher.start();

        // Create network dispatchers (and corresponding threads) up to the pool size.
        for (int i = 0; i < mDispatchers.length; i++) {
            NetworkDispatcher networkDispatcher = new NetworkDispatcher(mNetworkQueue, mNetwork,
                    mCache, mDelivery);
            mDispatchers[i] = networkDispatcher;
            networkDispatcher.start();
        }
    
public voidstop()
Stops the cache and network dispatchers.

        if (mCacheDispatcher != null) {
            mCacheDispatcher.quit();
        }
        for (int i = 0; i < mDispatchers.length; i++) {
            if (mDispatchers[i] != null) {
                mDispatchers[i].quit();
            }
        }