Usages of

com.google.common.cache.Weigher.Weigher()
private static Weigher<String, byte[]> cacheWeighter; // code omitted ... public HttpStorageCaching() { threadpool = Executors.newFixedThreadPool(100); asyncHttp = Async.newInstance().use(threadpool); cacheWeighter = new Weigher<String, byte[]>() { @Override public int weigh(String key, byte[] value) { return value.length; } }; cache = CacheBuilder.newBuilder() .weigher(cacheWeighter) .maximumWeight(CACHE_WEIGHT) .build(); }
@Test public void testWeigher() throws Exception { CacheLoader<String, String> loader = new CacheLoader<String, String>() { public String load(String key) { return key.toUpperCase(); } }; Weigher<String, String> weighByLength = new Weigher<String, String>() { public int weigh( String key, String value) { return value.length(); } }; LoadingCache<String, String> cache = CacheBuilder.newBuilder() .maximumWeight(15) .weigher(weighByLength) .build(loader); cache.put("test", "TEST"); cache.put("test1", "TEST1"); cache.put("test2", "TEST2"); cache.put("test3", "TEST3"); assertThat(cache.size()).isEqualTo(3); assertThat(cache.asMap().toString()).isEqualTo("{test3=TEST3, test2=TEST2, test1=TEST1}"); }
public KernelNodeStore(final MicroKernel kernel, long cacheSize) { this.kernel = checkNotNull(kernel); Weigher<String, KernelNodeState> weigher = new Weigher<String, KernelNodeState>() { @Override public int weigh(String key, KernelNodeState state) { return state.getMemory(); } }; this.cache = CacheLIRS.newBuilder() .maximumWeight(cacheSize) .recordStats() .weigher(weigher) .build(new CacheLoader<String, KernelNodeState>() { @Override public KernelNodeState load(String key) { int slash = key.indexOf('/'); String revision = key.substring(0, slash); String path = key.substring(slash); return new KernelNodeState(KernelNodeStore.this, path, revision, cache); } @Override public ListenableFuture<KernelNodeState> reload( String key, KernelNodeState oldValue) { // LoadingCache.reload() is only used to re-calculate the // memory usage on KernelNodeState.init(). Therefore // we simply return the old value as is (OAK-643) SettableFuture<KernelNodeState> future = SettableFuture.create(); future.set(oldValue); return future; } }); cacheStats = new CacheStats(cache, "NodeStore", weigher, cacheSize); try { this.root = cache.get(kernel.getHeadRevision() + '/'); } catch (Exception e) { throw new RuntimeException(e); } changeDispatcher = new ChangeDispatcher(root); }
/** * @param s */ protected OBOFormatParser(MyStream s) { stream = s; Weigher<String, String> stringWeigher = new Weigher<String, String>() { @Override public int weigh(String key, String value) { return key.length(); } }; CacheLoader<String, String> loader = new CacheLoader<String, String>() { @Override public String load(String key) throws Exception { return key; } }; if (LOG.isDebugEnabled()) { stringCache = CacheBuilder.newBuilder().recordStats().maximumWeight(8192 * 1024).weigher(stringWeigher) .build(loader); } else { stringCache = CacheBuilder.newBuilder().maximumWeight(8192 * 1024).weigher(stringWeigher).build(loader); } }
private QueryContext createQueryContext(Session session) { QueryContext newQueryContext = new QueryContext(context.getConf(), session); // Set default space uri and its root uri newQueryContext.setDefaultSpaceUri(TablespaceManager.getDefault().getUri()); newQueryContext.setDefaultSpaceRootUri(TablespaceManager.getDefault().getRootUri()); if (TajoConstants.IS_TEST_MODE) { newQueryContext.putAll(CommonTestingUtil.getSessionVarsForTest()); } // Set queryCache in session int queryCacheSize = context.getConf().getIntVar(TajoConf.ConfVars.QUERY_SESSION_QUERY_CACHE_SIZE); if (queryCacheSize > 0 && session.getQueryCache() == null) { Weigher<String, Expr> weighByLength = new Weigher<String, Expr>() { public int weigh(String key, Expr expr) { return key.length(); } }; LoadingCache<String, Expr> cache = CacheBuilder.newBuilder() .maximumWeight(queryCacheSize * 1024) .weigher(weighByLength) .expireAfterAccess(1, TimeUnit.HOURS) .build(new CacheLoader<String, Expr>() { public Expr load(String sql) throws SQLSyntaxError { return analyzer.parse(sql); } }); session.setQueryCache(cache); } return newQueryContext; }
@Nonnull public static <Q,L> CandidateGenerator<Q,L> wrap(@Nonnull final CandidateGenerator<Q,L> inner) { if (checkNotNull(inner, "inner") instanceof CachedCandidateGenerator) { LOG.warn("Ignoring attempt to cache wrap a KnowledgeBase that was already cached."); return inner; } final CacheLoader<Q, Set<L>> searchLoader = new CacheLoader<Q, Set<L>>() { @Nullable @Override public Set<L> load(Q key) throws Exception { return inner.findCandidates(key); } @Override public Map<Q, Set<L>> loadAll(Iterable<? extends Q> keys) throws Exception { return inner.batchFindCandidates(Sets.newHashSet(keys)); } }; final Weigher<Q, Set<L>> searchWeighter = new Weigher<Q, Set<L>>() { public int weigh(@Nonnull Q key, @Nonnull Set<L> values) { return 1 + values.size(); } }; final LoadingCache<Q, Set<L>> searchCache = CacheBuilder .newBuilder() .weigher(searchWeighter) .maximumWeight(1 << 16) .build(searchLoader); return new CachedCandidateGenerator<Q,L>(searchCache); }
@SuppressWarnings("serial") private void createLoadedJobCache(Configuration conf) { // Set property for old "loaded jobs" cache loadedJobCacheSize = conf.getInt( JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE, JHAdminConfig.DEFAULT_MR_HISTORY_LOADED_JOB_CACHE_SIZE); // Check property for new "loaded tasks" cache perform sanity checking useLoadedTasksCache = false; try { String taskSizeString = conf .get(JHAdminConfig.MR_HISTORY_LOADED_TASKS_CACHE_SIZE); if (taskSizeString != null) { loadedTasksCacheSize = Math.max(Integer.parseInt(taskSizeString), 1); useLoadedTasksCache = true; } } catch (NumberFormatException nfe) { LOG.error("The property " + JHAdminConfig.MR_HISTORY_LOADED_TASKS_CACHE_SIZE + " is not an integer value. Please set it to a positive" + " integer value."); } CacheLoader<JobId, Job> loader; loader = new CacheLoader<JobId, Job>() { @Override public Job load(JobId key) throws Exception { return loadJob(key); } }; if (!useLoadedTasksCache) { loadedJobCache = CacheBuilder.newBuilder() .maximumSize(loadedJobCacheSize) .initialCapacity(loadedJobCacheSize) .concurrencyLevel(1) .build(loader); } else { Weigher<JobId, Job> weightByTasks; weightByTasks = new Weigher<JobId, Job>() { /** * Method for calculating Job weight by total task count. If * the total task count is greater than the size of the tasks * cache, then cap it at the cache size. This allows the cache * to always hold one large job. * @param key JobId object * @param value Job object * @return Weight of the job as calculated by total task count */ @Override public int weigh(JobId key, Job value) { int taskCount = Math.min(loadedTasksCacheSize, value.getTotalMaps() + value.getTotalReduces()); return taskCount; } }; // Keep concurrencyLevel at 1. Otherwise, two problems: // 1) The largest job that can be initially loaded is // cache size / 4. // 2) Unit tests are not deterministic. loadedJobCache = CacheBuilder.newBuilder() .maximumWeight(loadedTasksCacheSize) .weigher(weightByTasks) .concurrencyLevel(1) .build(loader); } }
@Test public void whenCacheReachMaxWeight_thenEviction() { final CacheLoader<String, String> loader = new CacheLoader<String, String>() { @Override public final String load(final String key) { return key.toUpperCase(); } }; final Weigher<String, String> weighByLength = new Weigher<String, String>() { @Override public int weigh(final String key, final String value) { return value.length(); } }; final LoadingCache<String, String> cache = CacheBuilder.newBuilder().maximumWeight(16).weigher(weighByLength).build(loader); cache.getUnchecked("first"); cache.getUnchecked("second"); cache.getUnchecked("third"); cache.getUnchecked("last"); assertEquals(3, cache.size()); assertNull(cache.getIfPresent("first")); assertEquals("LAST", cache.getIfPresent("last")); }
private QueryContext createQueryContext(Session session) { QueryContext newQueryContext = new QueryContext(context.getConf(), session); String tajoTest = System.getProperty(CommonTestingUtil.TAJO_TEST_KEY); if (tajoTest != null && tajoTest.equalsIgnoreCase(CommonTestingUtil.TAJO_TEST_TRUE)) { newQueryContext.putAll(CommonTestingUtil.getSessionVarsForTest()); } // Set queryCache in session int queryCacheSize = context.getConf().getIntVar(TajoConf.ConfVars.QUERY_SESSION_QUERY_CACHE_SIZE); if (queryCacheSize > 0 && session.getQueryCache() == null) { Weigher<String, Expr> weighByLength = new Weigher<String, Expr>() { public int weigh(String key, Expr expr) { return key.length(); } }; LoadingCache<String, Expr> cache = CacheBuilder.newBuilder() .maximumWeight(queryCacheSize * 1024) .weigher(weighByLength) .expireAfterAccess(1, TimeUnit.HOURS) .build(new CacheLoader<String, Expr>() { public Expr load(String sql) throws SQLSyntaxError { return analyzer.parse(sql); } }); session.setQueryCache(cache); } return newQueryContext; }
/** * This method is used for creating a new cache object, from the defined configuration. * * @param configuration */ private void initCache(CacheConfiguration configuration) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Building new Cache"); } // Initialization step int concurrency = configuration.getConcurrencyLevel(); maxMemory = configuration.getHardMemoryLimit() * BYTES_TO_MB; long evictionTime = configuration.getEvictionTime(); EvictionPolicy policy = configuration.getPolicy(); // If Cache already exists, flush it if (cache != null) { cache.invalidateAll(); } // Create the CacheBuilder CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder(); // Add weigher Weigher<String, TileObject> weigher = new Weigher<String, TileObject>() { @Override public int weigh(String key, TileObject value) { currentSize.addAndGet(value.getBlobSize()); return value.getBlobSize(); } }; // Create the builder CacheBuilder<String, TileObject> newBuilder = builder.maximumWeight(maxMemory) .recordStats().weigher(weigher).concurrencyLevel(concurrency) .removalListener(new RemovalListener<String, TileObject>() { @Override public void onRemoval(RemovalNotification<String, TileObject> notification) { // TODO This operation is not atomic TileObject obj = notification.getValue(); // Update the current size currentSize.addAndGet(-obj.getBlobSize()); final String tileKey = generateTileKey(obj); final String layerName = obj.getLayerName(); multimap.removeTile(layerName, tileKey); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Removed tile " + tileKey + " for layer " + layerName + " due to reason:" + notification.getCause().toString()); LOGGER.debug("Removed tile was evicted? " + notification.wasEvicted()); } } }); // Handle eviction policy boolean configuredPolicy = false; if (policy != null && evictionTime > 0) { if (policy == EvictionPolicy.EXPIRE_AFTER_ACCESS) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Configuring Expire After Access eviction policy"); } newBuilder.expireAfterAccess(evictionTime, TimeUnit.SECONDS); configuredPolicy = true; } else if (policy == EvictionPolicy.EXPIRE_AFTER_WRITE) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Configuring Expire After Write eviction policy"); } newBuilder.expireAfterWrite(evictionTime, TimeUnit.SECONDS); configuredPolicy = true; } } // Build the cache cache = newBuilder.build(); // Created a new multimap multimap = new LayerMap(); // Configure a new scheduling task if needed if (configuredPolicy) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Configuring Scheduled Task for cache eviction"); } Runnable command = new Runnable() { @Override public void run() { if (configured.get()) { // Increment the number of current operations // This behavior is used in order to wait // the end of all the operations after setting // the configured parameter to false actualOperations.incrementAndGet(); try { cache.cleanUp(); } finally { // Decrement the number of current operations. actualOperations.decrementAndGet(); } } } }; // Initialization of the internal Scheduler task for scheduling cache cleanup scheduledPool = Executors.newScheduledThreadPool(CORE_POOL_SIZE); scheduledPool.scheduleAtFixedRate(command, 10, evictionTime + 1, TimeUnit.SECONDS); } // Update the configured parameter configured.getAndSet(true); }
Usage snippet has been bookmarked! Review your bookmarks
Thank you! Review your likes