001package com.nimbusds.infinispan.persistence.sql; 002 003 004import java.util.Properties; 005import java.util.concurrent.Executor; 006import java.util.concurrent.atomic.AtomicInteger; 007import javax.sql.DataSource; 008 009import static org.jooq.impl.DSL.table; 010 011import com.codahale.metrics.MetricRegistry; 012import com.codahale.metrics.health.HealthCheckRegistry; 013import com.nimbusds.common.monitor.MonitorRegistries; 014import com.nimbusds.infinispan.persistence.common.InfinispanEntry; 015import com.nimbusds.infinispan.persistence.common.InfinispanStore; 016import com.nimbusds.infinispan.persistence.common.query.QueryExecutor; 017import com.nimbusds.infinispan.persistence.sql.config.SQLStoreConfiguration; 018import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutor; 019import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutorInitContext; 020import com.zaxxer.hikari.HikariConfig; 021import com.zaxxer.hikari.HikariDataSource; 022import net.jcip.annotations.ThreadSafe; 023import org.infinispan.commons.configuration.ConfiguredBy; 024import org.infinispan.filter.KeyFilter; 025import org.infinispan.marshall.core.MarshalledEntry; 026import org.infinispan.marshall.core.MarshalledEntryFactory; 027import org.infinispan.persistence.TaskContextImpl; 028import org.infinispan.persistence.spi.InitializationContext; 029import org.infinispan.persistence.spi.PersistenceException; 030import org.jooq.DSLContext; 031import org.jooq.Merge; 032import org.jooq.Record; 033import org.jooq.SQLDialect; 034import org.jooq.impl.DSL; 035import org.kohsuke.MetaInfServices; 036 037 038/** 039 * SQL store for Infinispan 8.2+ caches and maps. 040 */ 041@ThreadSafe 042@MetaInfServices 043@ConfiguredBy(SQLStoreConfiguration.class) 044public class SQLStore<K,V> extends InfinispanStore<K,V> { 045 046 047 /** 048 * The SQL store configuration. 049 */ 050 private SQLStoreConfiguration config; 051 052 053 /** 054 * The SQL (JDBC) data source (with connection pool). 055 */ 056 private DataSource dataSource; 057 058 059 /** 060 * Wrap the SQL data source with jOOQ. 061 * See http://stackoverflow.com/a/31389342/429425 062 */ 063 private DSLContext sql; 064 065 066 /** 067 * The SQL record transformer (to / from Infinispan entries). 068 */ 069 private SQLRecordTransformer<K,V> sqlRecordTransformer; 070 071 072 /** 073 * The optional SQL query executor. 074 */ 075 private SQLQueryExecutor<K,V> sqlQueryExecutor; 076 077 078 /** 079 * The marshalled Infinispan entry factory. 080 */ 081 private MarshalledEntryFactory<K, V> marshalledEntryFactory; 082 083 084 /** 085 * Purges expired entries found in the SQL store, as indicated by 086 * their persisted metadata (optional, may be ignored / not stored). 087 */ 088 private ExpiredEntryReaper<K,V> reaper; 089 090 091 /** 092 * jOOQ query fixes. 093 */ 094 private JOOQFixes jooqFixes; 095 096 097 /** 098 * Loads an SQL record transformer with the specified class name. 099 * 100 * @param clazz The class. Must not be {@code null}. 101 * 102 * @return The SQL entry transformer. 103 */ 104 @SuppressWarnings( "unchecked" ) 105 private SQLRecordTransformer<K,V> loadRecordTransformerClass(final Class clazz) { 106 107 try { 108 Class<SQLRecordTransformer<K,V>> genClazz = (Class<SQLRecordTransformer<K,V>>)clazz; 109 return genClazz.newInstance(); 110 } catch (Exception e) { 111 throw new PersistenceException("Couldn't load SQL record transformer class: " + e.getMessage(), e); 112 } 113 } 114 115 116 /** 117 * Loads an SQL query executor with the specified class name. 118 * 119 * @param clazz The class. Must not be {@code null}. 120 * 121 * @return The SQL query executor. 122 */ 123 @SuppressWarnings( "unchecked" ) 124 private SQLQueryExecutor<K,V> loadQueryExecutorClass(final Class clazz) { 125 126 try { 127 Class<SQLQueryExecutor<K,V>> genClazz = (Class<SQLQueryExecutor<K,V>>)clazz; 128 return genClazz.newInstance(); 129 } catch (Exception e) { 130 throw new PersistenceException("Couldn't load SQL query executor class: " + e.getMessage(), e); 131 } 132 } 133 134 135 /** 136 * Returns the SQL store configuration. 137 * 138 * @return The SQL store configuration, {@code null} if not 139 * initialised. 140 */ 141 public SQLStoreConfiguration getConfiguration() { 142 143 return config; 144 } 145 146 147 /** 148 * Returns the underlying SQL data source. 149 * 150 * @return The underlying SQL data source, {@code null} if not 151 * initialised. 152 */ 153 public DataSource getDataSource() { 154 155 return dataSource; 156 } 157 158 159 @Override 160 @SuppressWarnings("unchecked") 161 public void init(final InitializationContext ctx) { 162 163 // This method will be invoked by the PersistenceManager during initialization. The InitializationContext 164 // contains: 165 // - this CacheLoader's configuration 166 // - the cache to which this loader is applied. Your loader might want to use the cache's name to construct 167 // cache-specific identifiers 168 // - the StreamingMarshaller that needs to be used to marshall/unmarshall the entries 169 // - a TimeService which the loader can use to determine expired entries 170 // - a ByteBufferFactory which needs to be used to construct ByteBuffers 171 // - a MarshalledEntryFactory which needs to be used to construct entries from the data retrieved by the loader 172 173 super.init(ctx); 174 175 this.config = ctx.getConfiguration(); 176 177 Loggers.MAIN_LOG.info("[IS0100] Infinispan SQL store configuration for cache {}:", getCacheName()); 178 config.log(); 179 180 // Load and initialise the SQL record transformer 181 Loggers.MAIN_LOG.debug("[IS0101] Loading SQL record transformer class {} for cache {}...", 182 config.getRecordTransformerClass(), 183 getCacheName()); 184 185 sqlRecordTransformer = loadRecordTransformerClass(config.getRecordTransformerClass()); 186 sqlRecordTransformer.init(() -> config.getSQLDialect()); 187 188 jooqFixes = new JOOQFixes(config.getSQLDialect(), sqlRecordTransformer.getCreateTableStatement()); 189 190 // Load and initialise the optional SQL query executor 191 if (config.getQueryExecutorClass() != null) { 192 Loggers.MAIN_LOG.debug("[IS0201] Loading optional SQL query executor class {} for cache {}...", 193 config.getQueryExecutorClass(), 194 getCacheName()); 195 196 sqlQueryExecutor = loadQueryExecutorClass(config.getQueryExecutorClass()); 197 198 sqlQueryExecutor.init(new SQLQueryExecutorInitContext<K, V>() { 199 @Override 200 public DataSource getDataSource() { 201 return dataSource; 202 } 203 204 205 @Override 206 public SQLRecordTransformer<K, V> getSQLRecordTransformer() { 207 return sqlRecordTransformer; 208 } 209 210 211 @Override 212 public SQLDialect getSQLDialect() { 213 return config.getSQLDialect(); 214 } 215 }); 216 } 217 218 marshalledEntryFactory = (MarshalledEntryFactory<K, V>)ctx.getMarshalledEntryFactory(); 219 220 Loggers.MAIN_LOG.info("[IS0102] Initialized SQL external store for cache {} with table {}", 221 getCacheName(), 222 sqlRecordTransformer.getTableName()); 223 } 224 225 226 @Override 227 public QueryExecutor<K, V> getQueryExecutor() { 228 229 return sqlQueryExecutor; 230 } 231 232 233 @Override 234 public void start() { 235 236 // This method will be invoked by the PersistenceManager to start the CacheLoader. At this stage configuration 237 // is complete and the loader can perform operations such as opening a connection to the external storage, 238 // initialize internal data structures, etc. 239 240 Properties hikariProps = HikariConfigUtils.removeNonHikariProperties(config.properties()); 241 HikariConfigUtils.setDefaultPoolName(hikariProps, getCacheName()); 242 243 HikariConfig hikariConfig = new HikariConfig(hikariProps); 244 245 MetricRegistry metricRegistry = MonitorRegistries.getMetricRegistry(); 246 if (HikariConfigUtils.metricsAlreadyRegistered(metricRegistry)) { 247 Loggers.MAIN_LOG.warn("[IS0130] SQL store: Couldn't register Dropwizard metrics: Existing registered metrics for " + getCacheName()); 248 } else { 249 hikariConfig.setMetricRegistry(metricRegistry); 250 } 251 252 HealthCheckRegistry healthCheckRegistry = MonitorRegistries.getHealthCheckRegistry(); 253 if (HikariConfigUtils.healthChecksAlreadyRegistered(healthCheckRegistry)) { 254 Loggers.MAIN_LOG.warn("[IS0131] SQL store: Couldn't register Dropwizard health checks: Existing registered health checks for " + getCacheName()); 255 } else { 256 hikariConfig.setHealthCheckRegistry(healthCheckRegistry); 257 } 258 259 dataSource = new HikariDataSource(hikariConfig); 260 261 // Init jOOQ SQL context 262 sql = DSL.using(dataSource, config.getSQLDialect()); 263 264 // Create table if missing 265 try { 266 267 int rows = sql.execute(sqlRecordTransformer.getCreateTableStatement()); 268 269 if (rows > 0) { 270 Loggers.MAIN_LOG.info("[IS0129] SQL store: Created table {} for cache {}", sqlRecordTransformer.getTableName(), getCacheName()); 271 } 272 273 } catch (Exception e) { 274 Loggers.MAIN_LOG.fatal("[IS0103] SQL store: Create table if not exists failed: {}: e", e.getMessage(), e); 275 throw new PersistenceException(e.getMessage(), e); 276 } 277 278 Loggers.MAIN_LOG.info("[IS0104] Started SQL external store connector for cache {} with table {}", getCacheName(), sqlRecordTransformer.getTableName()); 279 280 reaper = new ExpiredEntryReaper<>(sqlRecordTransformer); 281 } 282 283 284 @Override 285 public void stop() { 286 287 super.stop(); 288 289 // DataSource has no explicit close method 290 Loggers.MAIN_LOG.info("[IS0105] Stopped SQL store connector for cache {}", getCacheName()); 291 } 292 293 294 @SuppressWarnings("unchecked") 295 private K resolveKey(final Object key) { 296 297 if (key instanceof byte[]) { 298 throw new PersistenceException("Cannot resolve " + getCacheName() + " cache key from byte[], enable compatibility mode"); 299 } 300 301 return (K)key; 302 } 303 304 305 @Override 306 public boolean contains(final Object key) { 307 308 // This method will be invoked by the PersistenceManager to determine if the loader contains the specified key. 309 // The implementation should be as fast as possible, e.g. it should strive to transfer the least amount of data possible 310 // from the external storage to perform the check. Also, if possible, make sure the field is indexed on the external storage 311 // so that its existence can be determined as quickly as possible. 312 // 313 // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol 314 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 315 316 Loggers.SQL_LOG.trace("[IS0106] SQL store: Checking {} cache key {}", getCacheName(), key); 317 318 try { 319 return sql.selectOne() 320 .from(table(sqlRecordTransformer.getTableName())) 321 .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key))) 322 .fetchOne() != null; 323 324 } catch (Exception e) { 325 Loggers.SQL_LOG.error("[IS0107] {}: {}", e.getMessage(), e); 326 throw new PersistenceException(e.getMessage(), e); 327 } 328 } 329 330 331 @Override 332 @SuppressWarnings("unchecked") 333 public MarshalledEntry<K,V> load(final Object key) { 334 335 // Fetches an entry from the storage using the specified key. The CacheLoader should retrieve from the external storage all 336 // of the data that is needed to reconstruct the entry in memory, i.e. the value and optionally the metadata. This method 337 // needs to return a MarshalledEntry which can be constructed as follows: 338 // 339 // ctx.getMarshalledEntryFactory().new MarshalledEntry(key, value, metadata); 340 // 341 // If the entry does not exist or has expired, this method should return null. 342 // If an error occurs while retrieving data from the external storage, this method should throw a PersistenceException 343 // 344 // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol 345 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 346 // If the loader needs to have knowledge of the key/value data beyond their binary representation, then it needs access to the key's and value's 347 // classes and the marshaller used to encode them. 348 349 Loggers.SQL_LOG.trace("[IS0108] SQL store: Loading {} cache entry with key {}", getCacheName(), key); 350 351 final Record record; 352 353 try { 354 record = sql.selectFrom(table(sqlRecordTransformer.getTableName())) 355 .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key))) 356 .fetchOne(); 357 358 } catch (Exception e) { 359 Loggers.SQL_LOG.error("[IS0109] {}, {}", e.getMessage(), e); 360 throw new PersistenceException(e.getMessage(), e); 361 } 362 363 if (record == null) { 364 // Not found 365 Loggers.SQL_LOG.trace("[IS0110] SQL store: Record with key {} not found", key); 366 return null; 367 } 368 369 if (Loggers.SQL_LOG.isTraceEnabled()) { 370 Loggers.SQL_LOG.trace("[IS0111] SQL store: Retrieved record: {}", record); 371 } 372 373 // Transform LDAP entry to Infinispan entry 374 InfinispanEntry<K,V> infinispanEntry = sqlRecordTransformer.toInfinispanEntry(record); 375 376 return marshalledEntryFactory.newMarshalledEntry( 377 infinispanEntry.getKey(), 378 infinispanEntry.getValue(), 379 infinispanEntry.getMetadata()); 380 } 381 382 383 @Override 384 public boolean delete(final Object key) { 385 386 // The CacheWriter should remove from the external storage the entry identified by the specified key. 387 // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol 388 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 389 390 Loggers.SQL_LOG.trace("[IS0112] SQL store: Deleting {} cache entry with key {}", getCacheName(), key); 391 392 int deletedRows; 393 394 try { 395 deletedRows = sql.deleteFrom(table(sqlRecordTransformer.getTableName())) 396 .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key))) 397 .execute(); 398 399 } catch (Exception e) { 400 Loggers.SQL_LOG.error("[IS0113] {}, {}", e.getMessage(), e); 401 throw new PersistenceException(e.getMessage(), e); 402 } 403 404 Loggers.SQL_LOG.trace("[IS0113] SQL store: Deleted {} record with key {}", deletedRows, key); 405 406 if (deletedRows == 1) { 407 return true; 408 } else if (deletedRows == 0) { 409 return false; 410 } else { 411 Loggers.SQL_LOG.error("[IS0114] Too many deleted rows ({}) for key {}", deletedRows, key); 412 throw new PersistenceException("Too many deleted rows for key " + key); 413 } 414 } 415 416 417 @Override 418 public void write(final MarshalledEntry<? extends K, ? extends V> marshalledEntry) { 419 420 // The CacheWriter should write the specified entry to the external storage. 421 // 422 // The PersistenceManager uses MarshalledEntry as the default format so that CacheWriters can efficiently store data coming 423 // from a remote node, thus avoiding any additional transformation steps. 424 // 425 // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol 426 // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[]. 427 428 Loggers.SQL_LOG.trace("[IS0115] SQL store: Writing {} cache entry {}", getCacheName(), marshalledEntry); 429 430 try { 431 SQLRecord sqlRecord = sqlRecordTransformer.toSQLRecord( 432 new InfinispanEntry<>( 433 marshalledEntry.getKey(), 434 marshalledEntry.getValue(), 435 marshalledEntry.getMetadata())); 436 437 // Use H2 style MERGE, JOOQ will adapt it for the particular database 438 // http://www.jooq.org/doc/3.8/manual/sql-building/sql-statements/merge-statement/ 439 Merge mergeStatement = sql.mergeInto(table(sqlRecordTransformer.getTableName()), sqlRecord.getFields().keySet()) 440 .key(sqlRecord.getKeyColumns()) 441 .values(sqlRecord.getFields().values()); 442 443 String sqlStatement = jooqFixes.fixMergeStatement(mergeStatement); 444 445 int rows = sql.execute(sqlStatement); 446 447 if (rows != 1) { 448 449 if (SQLDialect.MYSQL.equals(config.getSQLDialect()) && rows == 2) { 450 // MySQL indicates UPDATE on INSERT by returning 2 num rows 451 return; 452 } 453 454 Loggers.SQL_LOG.error("[IS0116] SQL insert / update for key {} in table {} failed: Rows {}", 455 marshalledEntry.getKey(),sqlRecordTransformer.getTableName(), rows); 456 throw new PersistenceException("(Synthetic) SQL MERGE failed: Rows " + rows); 457 } 458 459 } catch (Exception e) { 460 Loggers.SQL_LOG.error("[IS0117] {}: {}", e.getMessage(), e); 461 throw new PersistenceException(e.getMessage(), e); 462 } 463 } 464 465 466 @Override 467 public void process(final KeyFilter<? super K> keyFilter, 468 final CacheLoaderTask<K, V> cacheLoaderTask, 469 final Executor executor, 470 final boolean fetchValue, 471 final boolean fetchMetadata) { 472 473 Loggers.SQL_LOG.trace("[IS0118] SQL store: Processing key filter for {} cache: fetchValue={} fetchMetadata=", 474 getCacheName(), fetchValue, fetchMetadata); 475 476 final TaskContext taskContext = new TaskContextImpl(); 477 478 // TODO consider multi-threaded SQL retrieval? 479 executor.execute(() -> { 480 481 try { 482 // Retrieves entire entry, fetchValue / fetchMetadata params are ignored TODO reconsider 483 sql.selectFrom(table(sqlRecordTransformer.getTableName())) 484 .fetch() 485 .forEach(record -> { 486 487 if (taskContext.isStopped()) { 488 return; 489 } 490 491 InfinispanEntry<K, V> infinispanEntry = sqlRecordTransformer.toInfinispanEntry(record); 492 493 if (keyFilter.accept(infinispanEntry.getKey())) { 494 495 MarshalledEntry<K, V> marshalledEntry = marshalledEntryFactory.newMarshalledEntry( 496 infinispanEntry.getKey(), 497 infinispanEntry.getValue(), 498 infinispanEntry.getMetadata()); 499 500 try { 501 cacheLoaderTask.processEntry(marshalledEntry, taskContext); 502 503 } catch (InterruptedException e) { 504 throw new PersistenceException(e.getMessage(), e); 505 } 506 } 507 }); 508 509 } catch (Exception e) { 510 Loggers.SQL_LOG.error("[IS0119] {}: {}", e.getMessage(), e); 511 throw new PersistenceException(e.getMessage(), e); 512 } 513 }); 514 } 515 516 517 @Override 518 public int size() { 519 520 // Infinispan code analysis on 8.2 shows that this method is never called in practice, and 521 // is not wired to the data / cache container API 522 523 Loggers.SQL_LOG.trace("[IS0120] SQL store: Counting {} records", getCacheName()); 524 525 final int count; 526 527 try { 528 count = sql.fetchCount(table(sqlRecordTransformer.getTableName())); 529 530 } catch (Exception e) { 531 Loggers.SQL_LOG.error("[IS0121] {}: {}", e.getMessage(), e); 532 throw new PersistenceException(e.getMessage(), e); 533 } 534 535 Loggers.SQL_LOG.trace("[IS0122] SQL store: Counted {} {} records", count, getCacheName()); 536 537 return count; 538 } 539 540 541 @Override 542 public void clear() { 543 544 Loggers.SQL_LOG.trace("[IS0123] SQL store: Clearing {} records", getCacheName()); 545 546 int numDeleted; 547 548 try { 549 numDeleted = sql.deleteFrom(table(sqlRecordTransformer.getTableName())).execute(); 550 551 } catch (Exception e) { 552 Loggers.SQL_LOG.error("[IS0124] {}: {}", e.getMessage(), e); 553 throw new PersistenceException(e.getMessage(), e); 554 } 555 556 Loggers.SQL_LOG.info("[IS0125] SQL store: Cleared {} {} records", numDeleted, sqlRecordTransformer.getTableName()); 557 } 558 559 560 @Override 561 public void purge(final Executor executor, final PurgeListener<? super K> purgeListener) { 562 563 Loggers.SQL_LOG.trace("[IS0126] SQL store: Purging {} cache entries", getCacheName()); 564 565 final AtomicInteger numPurged = new AtomicInteger(); 566 567 try { 568 executor.execute(() -> numPurged.set(reaper.purge(sql, purgeListener))); 569 570 } catch (Exception e) { 571 Loggers.SQL_LOG.error("[IS0127] {}: {}", e.getMessage(), e); 572 throw new PersistenceException("Purge exception: " + e.getMessage(), e); 573 } 574 575 Loggers.SQL_LOG.debug("[IS0128] SQL store: Purged {} expired {} cache entries", numPurged.get(), getCacheName()); 576 } 577}