FileDocCategorySizeDatePackage
TrackerChecker.javaAPI DocAzureus 3.0.3.414455Mon Jun 18 17:15:40 BST 2007org.gudy.azureus2.core3.tracker.client.impl.bt

TrackerChecker

public class TrackerChecker extends Object implements SystemTime.consumer, AEDiagnosticsEvidenceGenerator
author
Olivier

Fields Summary
private static final LogIDs
LOGID
private HashMap
trackers
List of Trackers. key = Tracker URL string value = TrackerStatus object
private AEMonitor
trackers_mon
private TRTrackerBTScraperImpl
scraper
TRTrackerScraperImpl object associated with this object.
private long
nextScrapeCheckOn
Constructors Summary
protected TrackerChecker(TRTrackerBTScraperImpl _scraper)
Initialize TrackerChecker.

note
Since there is only one TRTrackerScraperImpl, there will only be one TrackerChecker instance.

    
                             
  
      
    scraper   = _scraper;
       
    trackers  = new HashMap();
    
    Thread t = new AEThread("Tracker Scrape") {
       public void runSupport() {
        runScrapes();
      }
    };
    
    t.setDaemon(true);
    t.setPriority(Thread.MIN_PRIORITY);
    t.start();
    
    AEDiagnostics.addEvidenceGenerator( this );
    
    SystemTime.registerClockChangeListener( this );
  
Methods Summary
private TRTrackerBTScraperResponseImplcheckForNextScrape()
Finds the torrent that will be needing a scrape next.

		// search for the next scrape

		long earliestBlocked = Long.MAX_VALUE;
		TRTrackerBTScraperResponseImpl earliestBlockedResponse = null;
		long earliestNonBlocked = Long.MAX_VALUE;
		TRTrackerBTScraperResponseImpl earliestNonBlockedResponse = null;

		try {
			trackers_mon.enter();

			Iterator iter = trackers.values().iterator();

			while (iter.hasNext()) {

				TrackerStatus ts = (TrackerStatus) iter.next();

				if (!ts.isTrackerScrapeUrlValid()) {
					continue;
				}

				boolean hasActiveScrapes = ts.getNumActiveScrapes() > 0;

				Map hashmap = ts.getHashes();

				try {
					ts.getHashesMonitor().enter();

					Iterator iterHashes = hashmap.values().iterator();

					while (iterHashes.hasNext()) {

						TRTrackerBTScraperResponseImpl response = (TRTrackerBTScraperResponseImpl) iterHashes.next();

						if (response.getStatus() != TRTrackerScraperResponse.ST_SCRAPING) {
							long nextScrapeStartTime = response.getNextScrapeStartTime();

							if (hasActiveScrapes) {
								if (nextScrapeStartTime < earliestBlocked) {
									earliestBlocked = nextScrapeStartTime;
									earliestBlockedResponse = response;
								}
							} else {
								if (nextScrapeStartTime < earliestNonBlocked) {
									earliestNonBlocked = nextScrapeStartTime;
									earliestNonBlockedResponse = response;
								}
							}
						}
					}
				} finally {

					ts.getHashesMonitor().exit();
				}
			}
		} finally {

			trackers_mon.exit();
		}

		boolean hasEarlierBlockedScrape = earliestBlocked != Long.MAX_VALUE
				&& earliestBlocked < earliestNonBlocked;
		// If the earlist non-blocked scrape is still 2 seconds away,
		// return the blocked scrape with in hopes that it gets unblocked soon
		if (hasEarlierBlockedScrape
				&& earliestNonBlocked - SystemTime.getCurrentTime() > 2000) {
			return earliestBlockedResponse;
		} else {
			return earliestNonBlockedResponse;
		}
	
public voidconsume(long offset)

	
  		if ( Math.abs( offset ) < 60*1000 ){
  			
  			return;
  		}
  		
	    try{
	    	trackers_mon.enter();
	    	
	    	Iterator iter = trackers.values().iterator();
	      
	    	while (iter.hasNext()) {
	    		
	    		TrackerStatus ts = (TrackerStatus) iter.next();
	    			    		
	    		Map hashmap = ts.getHashes();
	    		  
	    		try{
	    			ts.getHashesMonitor().enter();
	        	
	    			Iterator iterHashes = hashmap.values().iterator();
	    			
	    			while( iterHashes.hasNext() ) {
	            
	    				TRTrackerBTScraperResponseImpl response = (TRTrackerBTScraperResponseImpl)iterHashes.next();    				
	            
	    				long	time = response.getNextScrapeStartTime();
	
	    				if ( time > 0 ){
	    						    					
	    					response.setNextScrapeStartTime( time + offset );
	    				}
	    			}
	    		}finally{
	        	
	    			ts.getHashesMonitor().exit();
	    		}
	    	} 
	    }finally{
	    	
	    	trackers_mon.exit();
	    }
  	
public voidgenerate(IndentWriter writer)

		writer.println( "BTScraper - now = " + SystemTime.getCurrentTime());
		
		try{
			writer.indent();

		    try{
		    	trackers_mon.enter();
			    	
			    Iterator iter = trackers.entrySet().iterator();
			    
			    while (iter.hasNext()){
			    	
			    	Map.Entry	entry = (Map.Entry)iter.next();
			    	
			        TrackerStatus 	ts = (TrackerStatus)entry.getValue();
			    	
			    	writer.println( "Tracker: " + ts.getString());   	
			        
			        try{
			        	writer.indent();
			        	
			        	ts.getHashesMonitor().enter();
			        	
				        Map hashmap = 	ts.getHashes();
				        
				        Iterator iter_hashes = hashmap.entrySet().iterator();
	
				        while (iter_hashes.hasNext()){
				        	
					    	Map.Entry	hash_entry = (Map.Entry)iter_hashes.next();
					    	
					    	TRTrackerBTScraperResponseImpl	response = (TRTrackerBTScraperResponseImpl)hash_entry.getValue();
					    	
					    	writer.println( response.getString());
				        }
			        }finally{
			        	
			        	ts.getHashesMonitor().exit();
			        	
			        	writer.exdent();
			        }
			    }
		    }finally{
		    	
		    	trackers_mon.exit();
		    }
			    
		}finally{
			
			writer.exdent();
		}
	
protected org.gudy.azureus2.core3.tracker.client.impl.TRTrackerScraperResponseImplgetHashData(TRTrackerAnnouncer tracker_client)
Retrieves the last cached Scraper Response based on a TRTrackerClient's current URL (announce-list entry or announce) and its torrent's hash.

return
The cached scrape response. Can be null.

    try {
      return getHashData(tracker_client.getTrackerUrl(), 
                         tracker_client.getTorrent().getHashWrapper());

    } catch (TOTorrentException e) {
    	Debug.printStackTrace( e );
      return null;
    }
  
protected org.gudy.azureus2.core3.tracker.client.impl.TRTrackerScraperResponseImplgetHashData(TOTorrent torrent, java.net.URL target_url)
Retrieves the last cached Scraper Response based on a TOTorrent's Announce URL (not announce-list) and hash.

return
The cached scrape response. Can be null.

    try {
      return getHashData(target_url==null?torrent.getAnnounceURL():target_url, 
                         torrent.getHashWrapper());
      
    } catch(TOTorrentException e) {
    	Debug.printStackTrace( e );
      return null;
    }
  
protected org.gudy.azureus2.core3.tracker.client.impl.TRTrackerScraperResponseImplgetHashData(java.net.URL trackerUrl, HashWrapper hash)
Retrieves the last cached Scraper Response for the supplied tracker URL and hash. If no cache has exists for the hash, one is created.

return
The cached scrape response. Can be null.

    // can be null when first called and url not yet set up...
    if ( trackerUrl == null ){
      return( null );
    }
  
    TRTrackerScraperResponseImpl data = null;

    	// DON'T USE URL as a key in the trackers map, use the string version. If you
    	// use a URL then the "containsKey" method does a URL.equals test. This does not
    	// simply check on str equivalence, it tries to resolve the host name. this can
    	// result in significant hangs (several seconds....)
    
    String	url_str = trackerUrl.toString();
      
    TrackerStatus ts = null;
    
     try{
        trackers_mon.enter();
    	
        ts = (TrackerStatus) trackers.get(url_str);
    
        if ( ts != null ){
	      
	      data = ts.getHashData( hash );
	      
	    }else{
    
	    	//System.out.println( "adding hash for " + trackerUrl + " : " + ByteFormatter.nicePrint(hashBytes, true));
      
	    	ts = new TrackerStatus(this, scraper.getScraper(),trackerUrl);
      
	        trackers.put(url_str, ts);

	        if( !ts.isTrackerScrapeUrlValid() ) {
  
		      	if (Logger.isEnabled()){
							Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
									LogEvent.LT_ERROR, "Can't scrape using url '" + trackerUrl
											+ "' as it doesn't end in " + "'/announce', skipping."));
		      	}
	        }
	    }
    
    }finally{
      	
        trackers_mon.exit();
    }
    
    	// do outside monitor to avoid deadlock situation as ts.addHash invokes
		// listeners....

    if ( data == null ){
    	
    	 data = ts.addHash(hash);
    }
    
    return data;
  
public longgetNextScrapeCheckOn()

		return nextScrapeCheckOn;
	
protected voidremoveHash(TOTorrent torrent)
Removes the scrape task and data associated with the TOTorrent's Announce URL, announce-list data and hash.

    try{
      removeHash(torrent.getAnnounceURL().toString(), torrent.getHashWrapper());
      
      TOTorrentAnnounceURLSet[] sets = torrent.getAnnounceURLGroup().getAnnounceURLSets();
      
      for (int i=0;i<sets.length;i++){
      	
      	URL[]	urls = sets[i].getAnnounceURLs();
      	
      	for (int j=0;j<urls.length;j++){
      		
      		removeHash(urls[j].toString(), torrent.getHashWrapper());
      	}
      }
      
      
    } catch (TOTorrentException e) {
    	Debug.printStackTrace( e );
    }
  
protected voidremoveHash(java.lang.String trackerUrl, HashWrapper hash)
Removes the scrape task and data associated with the supplied tracker URL and torrent hash.


    TrackerStatus ts = (TrackerStatus) trackers.get(trackerUrl);
    if (ts != null){
      //System.out.println( "removing hash for " + trackerUrl );
      ts.removeHash(hash);
    }
  
private voidrunScrapes()
Loop indefinitely, waiting for the next scrape, and scraping.

		TRTrackerBTScraperResponseImpl nextResponseScraping = null;

		while (true) {

			long delay;

			if (nextResponseScraping == null) {

				delay = 60000; // nothing going on, recheck in a min

			} else {

				long scrape_time = nextResponseScraping.getNextScrapeStartTime();

				long time_to_scrape = scrape_time - SystemTime.getCurrentTime()
						+ SystemTime.TIME_GRANULARITY_MILLIS;

				if (time_to_scrape <= 0) {

					if (nextResponseScraping.getTrackerStatus().getNumActiveScrapes() > 0) {
						// check if done scraping every 2 seconds, if no other
						// scrapes are scheduled.  If other scrapes are sceduled,
						// we would have got them from checkForNextScrape()
						delay = 2000;
					} else {

						try {
							nextResponseScraping.getTrackerStatus().updateSingleHash(
									nextResponseScraping.getHash(), false);

							delay = 0; // pick up next scrape fairly quickly

						} catch (Throwable e) {

							Debug.printStackTrace(e);

							delay = 30000;
						}
					}
				} else {

					delay = time_to_scrape;

					if (delay > 30000) {
						delay = 30000; // don't sleep too long in case new hashes are added etc.
					}
				}
			}

			try {
				nextScrapeCheckOn = SystemTime.getCurrentTime() + delay;
				Thread.sleep(delay);

			} catch (Exception e) {
			}

			TRTrackerBTScraperResponseImpl oldResponse = nextResponseScraping;
			nextResponseScraping = checkForNextScrape();

			if (Logger.isEnabled() && nextResponseScraping != oldResponse && nextResponseScraping != null ) {
				Logger.log(new LogEvent(
						TorrentUtils.getDownloadManager(nextResponseScraping.getHash()),
						LOGID,
						LogEvent.LT_INFORMATION,
						"Next scrape will be "
								+ nextResponseScraping.getURL()
								+ " in "
								+ ((nextResponseScraping.getNextScrapeStartTime() - SystemTime.getCurrentTime())/1000)
								+ " sec,type="
								+ (nextResponseScraping.getTrackerStatus().getSupportsMultipeHashScrapes()
										? "multi" : "single")
										+ ",active="+nextResponseScraping.getTrackerStatus().getNumActiveScrapes()));
			}
		}
	
protected voidsyncUpdate(TOTorrent torrent, java.net.URL target_url)

    if (torrent == null){
      return;
    }
    
    try {
      HashWrapper hash = torrent.getHashWrapper();
    
      TrackerStatus matched_ts = null;
      
      try{
      	trackers_mon.enter();
      	
        Iterator iter = trackers.values().iterator();
        
        while (iter.hasNext()){
        	
          TrackerStatus ts = (TrackerStatus) iter.next();

          if ( 	target_url == null ||
          		target_url.toString().equals( ts.getTrackerURL().toString())){
          	
	          Map hashmap = ts.getHashes();
	
		      try{
		    	  ts.getHashesMonitor().enter();

		          if ( hashmap.get( hash ) != null ){
		          	
		        	matched_ts	= ts;
		        	  
		        	break;
		          }
		      }finally{
		    	  
		    	  ts.getHashesMonitor().exit();
		      }
          }
        }
      }finally{
      	
      	trackers_mon.exit();
      }
      
      if ( matched_ts != null ){
    	  
    	  matched_ts.updateSingleHash( hash, true, false );
      }
    }
    catch (Throwable e) {
      Debug.out( "scrape syncUpdate() exception", e );
    }