scraper = _scraper; tracker_checker = new TrackerChecker( this );
try{ class_mon.enter(); if ( singleton == null ){ singleton = new TRTrackerBTScraperImpl( _scraper ); } return( singleton ); }finally{ class_mon.exit(); }
return( scraper );
tracker_checker.removeHash( torrent );
if (torrent == null){ return null; } if ( force ){ tracker_checker.syncUpdate( torrent, target_url ); } TRTrackerScraperResponse res = tracker_checker.getHashData( torrent, target_url ); // System.out.println( "scrape: " + torrent + " -> " + (res==null?"null":""+res.getSeeds())); return( res );
TRTrackerScraperResponse res = tracker_checker.getHashData( tracker_client ); // System.out.println( "scrape: " + tracker_client + " -> " + (res==null?"null":""+res.getSeeds())); return( res );
if ( torrent != null && result != null ){ TRTrackerScraperResponseImpl resp = tracker_checker.getHashData( torrent, url ); // only override details if underlying scrape is failing if ( resp != null && resp.getStatus() == TRTrackerScraperResponse.ST_ERROR ){ resp.setSeedsPeers( result.getSeedCount(), result.getNonSeedCount()); resp.setScrapeStartTime( result.getScrapeStartTime()); // leave nextScrapeStartTime alone as we still want the existing // scraping mechanism to kick in and check the torrent's tracker resp.setStatus( result.getResponseType()==DownloadScrapeResult.RT_SUCCESS? TRTrackerScraperResponse.ST_ONLINE: TRTrackerScraperResponse.ST_ERROR, result.getStatus() + " (" + result.getURL() + ")"); scraper.scrapeReceived( resp ); } }