RequestQueuepublic class RequestQueue extends Object A request dispatch queue with a thread pool of dispatchers.
Calling {@link #add(Request)} will enqueue the given Request for dispatch,
resolving from either cache or network on a worker thread, and then delivering
a parsed response on the main thread. |
Fields Summary |
---|
private AtomicInteger | mSequenceGeneratorUsed for generating monotonically-increasing sequence numbers for requests. | private final Map | mWaitingRequestsStaging area for requests that already have a duplicate request in flight.
- containsKey(cacheKey) indicates that there is a request in flight for the given cache
key.
- get(cacheKey) returns waiting requests for the given cache key. The in flight request
is not contained in that list. Is null if no requests are staged.
| private final Set | mCurrentRequestsThe set of all requests currently being processed by this RequestQueue. A Request
will be in this set if it is waiting in any queue or currently being processed by
any dispatcher. | private final PriorityBlockingQueue | mCacheQueueThe cache triage queue. | private final PriorityBlockingQueue | mNetworkQueueThe queue of requests that are actually going out to the network. | private static final int | DEFAULT_NETWORK_THREAD_POOL_SIZENumber of network request dispatcher threads to start. | private final Cache | mCacheCache interface for retrieving and storing responses. | private final Network | mNetworkNetwork interface for performing requests. | private final ResponseDelivery | mDeliveryResponse delivery mechanism. | private NetworkDispatcher[] | mDispatchersThe network dispatchers. | private CacheDispatcher | mCacheDispatcherThe cache dispatcher. |
Constructors Summary |
---|
public RequestQueue(Cache cache, Network network, int threadPoolSize, ResponseDelivery delivery)Creates the worker pool. Processing will not begin until {@link #start()} is called.
mCache = cache;
mNetwork = network;
mDispatchers = new NetworkDispatcher[threadPoolSize];
mDelivery = delivery;
| public RequestQueue(Cache cache, Network network, int threadPoolSize)Creates the worker pool. Processing will not begin until {@link #start()} is called.
this(cache, network, threadPoolSize,
new ExecutorDelivery(new Handler(Looper.getMainLooper())));
| public RequestQueue(Cache cache, Network network)Creates the worker pool. Processing will not begin until {@link #start()} is called.
this(cache, network, DEFAULT_NETWORK_THREAD_POOL_SIZE);
|
Methods Summary |
---|
public Request | add(Request request)Adds a Request to the dispatch queue.
// Tag the request as belonging to this queue and add it to the set of current requests.
request.setRequestQueue(this);
synchronized (mCurrentRequests) {
mCurrentRequests.add(request);
}
// Process requests in the order they are added.
request.setSequence(getSequenceNumber());
request.addMarker("add-to-queue");
// If the request is uncacheable, skip the cache queue and go straight to the network.
if (!request.shouldCache()) {
mNetworkQueue.add(request);
return request;
}
// Insert request into stage if there's already a request with the same cache key in flight.
synchronized (mWaitingRequests) {
String cacheKey = request.getCacheKey();
if (mWaitingRequests.containsKey(cacheKey)) {
// There is already a request in flight. Queue up.
Queue<Request<?>> stagedRequests = mWaitingRequests.get(cacheKey);
if (stagedRequests == null) {
stagedRequests = new LinkedList<Request<?>>();
}
stagedRequests.add(request);
mWaitingRequests.put(cacheKey, stagedRequests);
if (VolleyLog.DEBUG) {
VolleyLog.v("Request for cacheKey=%s is in flight, putting on hold.", cacheKey);
}
} else {
// Insert 'null' queue for this cacheKey, indicating there is now a request in
// flight.
mWaitingRequests.put(cacheKey, null);
mCacheQueue.add(request);
}
return request;
}
| public void | cancelAll(com.android.volley.RequestQueue$RequestFilter filter)Cancels all requests in this queue for which the given filter applies.
synchronized (mCurrentRequests) {
for (Request<?> request : mCurrentRequests) {
if (filter.apply(request)) {
request.cancel();
}
}
}
| public void | cancelAll(java.lang.Object tag)Cancels all requests in this queue with the given tag. Tag must be non-null
and equality is by identity.
if (tag == null) {
throw new IllegalArgumentException("Cannot cancelAll with a null tag");
}
cancelAll(new RequestFilter() {
@Override
public boolean apply(Request<?> request) {
return request.getTag() == tag;
}
});
| void | finish(Request request)Called from {@link Request#finish(String)}, indicating that processing of the given request
has finished.
Releases waiting requests for request.getCacheKey() if
request.shouldCache() .
// Remove from the set of requests currently being processed.
synchronized (mCurrentRequests) {
mCurrentRequests.remove(request);
}
if (request.shouldCache()) {
synchronized (mWaitingRequests) {
String cacheKey = request.getCacheKey();
Queue<Request<?>> waitingRequests = mWaitingRequests.remove(cacheKey);
if (waitingRequests != null) {
if (VolleyLog.DEBUG) {
VolleyLog.v("Releasing %d waiting requests for cacheKey=%s.",
waitingRequests.size(), cacheKey);
}
// Process all queued up requests. They won't be considered as in flight, but
// that's not a problem as the cache has been primed by 'request'.
mCacheQueue.addAll(waitingRequests);
}
}
}
| public Cache | getCache()Gets the {@link Cache} instance being used.
return mCache;
| public int | getSequenceNumber()Gets a sequence number.
return mSequenceGenerator.incrementAndGet();
| public void | start()Starts the dispatchers in this queue.
stop(); // Make sure any currently running dispatchers are stopped.
// Create the cache dispatcher and start it.
mCacheDispatcher = new CacheDispatcher(mCacheQueue, mNetworkQueue, mCache, mDelivery);
mCacheDispatcher.start();
// Create network dispatchers (and corresponding threads) up to the pool size.
for (int i = 0; i < mDispatchers.length; i++) {
NetworkDispatcher networkDispatcher = new NetworkDispatcher(mNetworkQueue, mNetwork,
mCache, mDelivery);
mDispatchers[i] = networkDispatcher;
networkDispatcher.start();
}
| public void | stop()Stops the cache and network dispatchers.
if (mCacheDispatcher != null) {
mCacheDispatcher.quit();
}
for (int i = 0; i < mDispatchers.length; i++) {
if (mDispatchers[i] != null) {
mDispatchers[i].quit();
}
}
|
|