001package com.nimbusds.infinispan.persistence.sql;
002
003
004import java.util.Properties;
005import java.util.concurrent.Executor;
006import java.util.concurrent.atomic.AtomicInteger;
007import javax.sql.DataSource;
008
009import static org.jooq.impl.DSL.table;
010
011import com.codahale.metrics.MetricRegistry;
012import com.codahale.metrics.health.HealthCheckRegistry;
013import com.nimbusds.common.monitor.MonitorRegistries;
014import com.nimbusds.infinispan.persistence.common.InfinispanEntry;
015import com.nimbusds.infinispan.persistence.common.InfinispanStore;
016import com.nimbusds.infinispan.persistence.common.query.QueryExecutor;
017import com.nimbusds.infinispan.persistence.sql.config.SQLStoreConfiguration;
018import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutor;
019import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutorInitContext;
020import com.zaxxer.hikari.HikariConfig;
021import com.zaxxer.hikari.HikariDataSource;
022import net.jcip.annotations.ThreadSafe;
023import org.infinispan.commons.configuration.ConfiguredBy;
024import org.infinispan.filter.KeyFilter;
025import org.infinispan.marshall.core.MarshalledEntry;
026import org.infinispan.marshall.core.MarshalledEntryFactory;
027import org.infinispan.persistence.TaskContextImpl;
028import org.infinispan.persistence.spi.InitializationContext;
029import org.infinispan.persistence.spi.PersistenceException;
030import org.jooq.DSLContext;
031import org.jooq.Merge;
032import org.jooq.Record;
033import org.jooq.SQLDialect;
034import org.jooq.conf.RenderNameStyle;
035import org.jooq.conf.Settings;
036import org.jooq.impl.DSL;
037import org.kohsuke.MetaInfServices;
038
039
040/**
041 * SQL store for Infinispan 8.2+ caches and maps.
042 */
043@ThreadSafe
044@MetaInfServices
045@ConfiguredBy(SQLStoreConfiguration.class)
046public class SQLStore<K,V> extends InfinispanStore<K,V> {
047
048
049        /**
050         * The SQL store configuration.
051         */
052        private SQLStoreConfiguration config;
053        
054        
055        /**
056         * The Hikari SQL data source (with connection pool).
057         */
058        private HikariDataSource dataSource;
059        
060        
061        /**
062         * Wrap the SQL data source with jOOQ.
063         * See http://stackoverflow.com/a/31389342/429425
064         */
065        private DSLContext sql;
066
067
068        /**
069         * The SQL record transformer (to / from Infinispan entries).
070         */
071        private SQLRecordTransformer<K,V> sqlRecordTransformer;
072        
073        
074        /**
075         * The optional SQL query executor.
076         */
077        private SQLQueryExecutor<K,V> sqlQueryExecutor;
078
079
080        /**
081         * The marshalled Infinispan entry factory.
082         */
083        private MarshalledEntryFactory<K, V> marshalledEntryFactory;
084
085
086        /**
087         * Purges expired entries found in the SQL store, as indicated by
088         * their persisted metadata (optional, may be ignored / not stored).
089         */
090        private ExpiredEntryReaper<K,V> reaper;
091        
092        
093        /**
094         * jOOQ query fixes.
095         */
096        private JOOQFixes jooqFixes;
097
098
099        /**
100         * Loads an SQL record transformer with the specified class name.
101         *
102         * @param clazz The class. Must not be {@code null}.
103         *
104         * @return The SQL entry transformer.
105         */
106        @SuppressWarnings( "unchecked" )
107        private SQLRecordTransformer<K,V> loadRecordTransformerClass(final Class clazz) {
108
109                try {
110                        Class<SQLRecordTransformer<K,V>> genClazz = (Class<SQLRecordTransformer<K,V>>)clazz;
111                        return genClazz.newInstance();
112                } catch (Exception e) {
113                        throw new PersistenceException("Couldn't load SQL record transformer class: " + e.getMessage(), e);
114                }
115        }
116        
117        
118        /**
119         * Loads an SQL query executor with the specified class name.
120         *
121         * @param clazz The class. Must not be {@code null}.
122         *
123         * @return The SQL query executor.
124         */
125        @SuppressWarnings( "unchecked" )
126        private SQLQueryExecutor<K,V> loadQueryExecutorClass(final Class clazz) {
127                
128                try {
129                        Class<SQLQueryExecutor<K,V>> genClazz = (Class<SQLQueryExecutor<K,V>>)clazz;
130                        return genClazz.newInstance();
131                } catch (Exception e) {
132                        throw new PersistenceException("Couldn't load SQL query executor class: " + e.getMessage(), e);
133                }
134        }
135        
136        
137        /**
138         * Returns the SQL store configuration.
139         *
140         * @return The SQL store configuration, {@code null} if not
141         *         initialised.
142         */
143        public SQLStoreConfiguration getConfiguration() {
144                
145                return config;
146        }
147        
148        
149        /**
150         * Returns the underlying SQL data source.
151         *
152         * @return The underlying SQL data source, {@code null} if not
153         *         initialised.
154         */
155        public DataSource getDataSource() {
156                
157                return dataSource;
158        }
159
160
161        @Override
162        @SuppressWarnings("unchecked")
163        public void init(final InitializationContext ctx) {
164
165                // This method will be invoked by the PersistenceManager during initialization. The InitializationContext
166                // contains:
167                // - this CacheLoader's configuration
168                // - the cache to which this loader is applied. Your loader might want to use the cache's name to construct
169                //   cache-specific identifiers
170                // - the StreamingMarshaller that needs to be used to marshall/unmarshall the entries
171                // - a TimeService which the loader can use to determine expired entries
172                // - a ByteBufferFactory which needs to be used to construct ByteBuffers
173                // - a MarshalledEntryFactory which needs to be used to construct entries from the data retrieved by the loader
174
175                super.init(ctx);
176                
177                this.config = ctx.getConfiguration();
178
179                Loggers.MAIN_LOG.info("[IS0100] SQL store: Infinispan cache store configuration for {}:", getCacheName());
180                config.log();
181                
182                Loggers.MAIN_LOG.info("[IS0140] SQL store: Expiration thread wake up interval for cache {}: {}", getCacheName(),
183                        ctx.getCache().getCacheConfiguration().expiration().wakeUpInterval());
184                
185                // Load and initialise the SQL record transformer
186                Loggers.MAIN_LOG.debug("[IS0101] Loading SQL record transformer class {} for cache {}...",
187                        config.getRecordTransformerClass(),
188                        getCacheName());
189                
190                sqlRecordTransformer = loadRecordTransformerClass(config.getRecordTransformerClass());
191                sqlRecordTransformer.init(() -> config.getSQLDialect());
192                
193                jooqFixes = new JOOQFixes(config.getSQLDialect(), sqlRecordTransformer.getCreateTableStatement());
194                
195                // Load and initialise the optional SQL query executor
196                if (config.getQueryExecutorClass() != null) {
197                        Loggers.MAIN_LOG.debug("[IS0201] Loading optional SQL query executor class {} for cache {}...",
198                                config.getQueryExecutorClass(),
199                                getCacheName());
200                        
201                        sqlQueryExecutor = loadQueryExecutorClass(config.getQueryExecutorClass());
202                        
203                        sqlQueryExecutor.init(new SQLQueryExecutorInitContext<K, V>() {
204                                @Override
205                                public DataSource getDataSource() {
206                                        return dataSource;
207                                }
208                                
209                                
210                                @Override
211                                public SQLRecordTransformer<K, V> getSQLRecordTransformer() {
212                                        return sqlRecordTransformer;
213                                }
214                                
215                                
216                                @Override
217                                public SQLDialect getSQLDialect() {
218                                        return config.getSQLDialect();
219                                }
220                        });
221                }
222
223                marshalledEntryFactory = (MarshalledEntryFactory<K, V>)ctx.getMarshalledEntryFactory();
224
225                Loggers.MAIN_LOG.info("[IS0102] Initialized SQL external store for cache {} with table {}",
226                        getCacheName(),
227                        sqlRecordTransformer.getTableName());
228        }
229        
230        
231        /**
232         * Returns the underlying SQL record transformer.
233         *
234         * @return The SQL record transformer, {@code null} if not initialised.
235         */
236        public SQLRecordTransformer<K, V> getSQLRecordTransformer() {
237                return sqlRecordTransformer;
238        }
239        
240        
241        @Override
242        public QueryExecutor<K, V> getQueryExecutor() {
243                
244                return sqlQueryExecutor;
245        }
246        
247        
248        @Override
249        public void start() {
250
251                // This method will be invoked by the PersistenceManager to start the CacheLoader. At this stage configuration
252                // is complete and the loader can perform operations such as opening a connection to the external storage,
253                // initialize internal data structures, etc.
254                
255                Properties hikariProps = HikariConfigUtils.removeNonHikariProperties(config.properties());
256                HikariPoolName poolName = HikariPoolName.setDefaultPoolName(hikariProps, getCacheName());
257                
258                HikariConfig hikariConfig = new HikariConfig(hikariProps);
259                
260                MetricRegistry metricRegistry = MonitorRegistries.getMetricRegistry();
261                if (HikariConfigUtils.metricsAlreadyRegistered(poolName, metricRegistry)) {
262                        Loggers.MAIN_LOG.warn("[IS0130] SQL store: Couldn't register Dropwizard metrics: Existing registered metrics for " + getCacheName());
263                } else {
264                        hikariConfig.setMetricRegistry(metricRegistry);
265                }
266                
267                HealthCheckRegistry healthCheckRegistry = MonitorRegistries.getHealthCheckRegistry();
268                if (HikariConfigUtils.healthChecksAlreadyRegistered(poolName, healthCheckRegistry)) {
269                        Loggers.MAIN_LOG.warn("[IS0131] SQL store: Couldn't register Dropwizard health checks: Existing registered health checks for " + getCacheName());
270                } else {
271                        hikariConfig.setHealthCheckRegistry(healthCheckRegistry);
272                }
273                
274                dataSource = new HikariDataSource(hikariConfig);
275                
276                // Init jOOQ SQL context
277                Settings jooqSettings = new Settings();
278                if (config.getSQLDialect().equals(SQLDialect.H2)) {
279                        // Quoted column names occasionally cause problems in H2
280                        jooqSettings.setRenderNameStyle(RenderNameStyle.AS_IS);
281                }
282                sql = DSL.using(dataSource, config.getSQLDialect(), jooqSettings);
283                
284                if (config.createTableIfMissing()) {
285                        try {
286                                int rows = sql.execute(sqlRecordTransformer.getCreateTableStatement());
287                                
288                                if (rows > 0) {
289                                        Loggers.MAIN_LOG.info("[IS0129] SQL store: Created table {} for cache {}", sqlRecordTransformer.getTableName(), getCacheName());
290                                }
291                                
292                        } catch (Exception e) {
293                                Loggers.MAIN_LOG.fatal("[IS0103] SQL store: Create table if not exists failed: {}: e", e.getMessage(), e);
294                                throw new PersistenceException(e.getMessage(), e);
295                        }
296                } else {
297                        Loggers.MAIN_LOG.info("[IS0132] SQL store: Skipped create table if missing step");
298                }
299
300                Loggers.MAIN_LOG.info("[IS0104] Started SQL external store connector for cache {} with table {}", getCacheName(), sqlRecordTransformer.getTableName());
301
302                reaper = new ExpiredEntryReaper<>(sqlRecordTransformer);
303        }
304
305
306        @Override
307        public void stop() {
308
309                super.stop();
310                
311                dataSource.close();
312                
313                Loggers.MAIN_LOG.info("[IS0105] Stopped SQL store connector for cache {}",  getCacheName());
314        }
315
316
317        @SuppressWarnings("unchecked")
318        private K resolveKey(final Object key) {
319
320                if (key instanceof byte[]) {
321                        throw new PersistenceException("Cannot resolve " + getCacheName() + " cache key from byte[], enable compatibility mode");
322                }
323
324                return (K)key;
325        }
326
327
328        @Override
329        public boolean contains(final Object key) {
330
331                // This method will be invoked by the PersistenceManager to determine if the loader contains the specified key.
332                // The implementation should be as fast as possible, e.g. it should strive to transfer the least amount of data possible
333                // from the external storage to perform the check. Also, if possible, make sure the field is indexed on the external storage
334                // so that its existence can be determined as quickly as possible.
335                //
336                // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
337                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
338
339                Loggers.SQL_LOG.trace("[IS0106] SQL store: Checking {} cache key {}", getCacheName(), key);
340                
341                try {
342                        return sql.selectOne()
343                                .from(table(sqlRecordTransformer.getTableName()))
344                                .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
345                                .fetchOne() != null;
346                        
347                } catch (Exception e) {
348                        Loggers.SQL_LOG.error("[IS0107] {}: {}", e.getMessage(), e);
349                        throw new PersistenceException(e.getMessage(), e);
350                }
351        }
352
353
354        @Override
355        @SuppressWarnings("unchecked")
356        public MarshalledEntry<K,V> load(final Object key) {
357
358                // Fetches an entry from the storage using the specified key. The CacheLoader should retrieve from the external storage all
359                // of the data that is needed to reconstruct the entry in memory, i.e. the value and optionally the metadata. This method
360                // needs to return a MarshalledEntry which can be constructed as follows:
361                //
362                // ctx.getMarshalledEntryFactory().new MarshalledEntry(key, value, metadata);
363                //
364                // If the entry does not exist or has expired, this method should return null.
365                // If an error occurs while retrieving data from the external storage, this method should throw a PersistenceException
366                //
367                // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
368                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
369                // If the loader needs to have knowledge of the key/value data beyond their binary representation, then it needs access to the key's and value's
370                // classes and the marshaller used to encode them.
371
372                Loggers.SQL_LOG.trace("[IS0108] SQL store: Loading {} cache entry with key {}", getCacheName(), key);
373                
374                final Record record;
375                
376                try {
377                        record = sql.selectFrom(table(sqlRecordTransformer.getTableName()))
378                                .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
379                                .fetchOne();
380                        
381                } catch (Exception e) {
382                        Loggers.SQL_LOG.error("[IS0109] {}, {}", e.getMessage(), e);
383                        throw new PersistenceException(e.getMessage(), e);
384                }
385                
386                if (record == null) {
387                        // Not found
388                        Loggers.SQL_LOG.trace("[IS0110] SQL store: Record with key {} not found", key);
389                        return null;
390                }
391                
392                if (Loggers.SQL_LOG.isTraceEnabled()) {
393                        Loggers.SQL_LOG.trace("[IS0111] SQL store: Retrieved record: {}", record);
394                }
395
396                // Transform SQLg entry to Infinispan entry
397                InfinispanEntry<K,V> infinispanEntry = sqlRecordTransformer.toInfinispanEntry(record);
398
399                return marshalledEntryFactory.newMarshalledEntry(
400                        infinispanEntry.getKey(),
401                        infinispanEntry.getValue(),
402                        infinispanEntry.getMetadata());
403        }
404
405
406        @Override
407        public boolean delete(final Object key) {
408
409                // The CacheWriter should remove from the external storage the entry identified by the specified key.
410                // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
411                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
412
413                Loggers.SQL_LOG.trace("[IS0112] SQL store: Deleting {} cache entry with key {}", getCacheName(), key);
414                
415                int deletedRows;
416                
417                try {
418                        deletedRows = sql.deleteFrom(table(sqlRecordTransformer.getTableName()))
419                                .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
420                                .execute();
421                        
422                } catch (Exception e) {
423                        Loggers.SQL_LOG.error("[IS0113] {}, {}", e.getMessage(), e);
424                        throw new PersistenceException(e.getMessage(), e);
425                }
426                
427                Loggers.SQL_LOG.trace("[IS0113] SQL store: Deleted {} record with key {}", deletedRows, key);
428                
429                if (deletedRows == 1) {
430                        return true;
431                } else if (deletedRows == 0) {
432                        return false;
433                } else {
434                        Loggers.SQL_LOG.error("[IS0114] Too many deleted rows ({}) for key {}", deletedRows, key);
435                        throw new PersistenceException("Too many deleted rows for key " + key);
436                }
437        }
438
439
440        @Override
441        public void write(final MarshalledEntry<? extends K, ? extends V> marshalledEntry) {
442
443                // The CacheWriter should write the specified entry to the external storage.
444                //
445                // The PersistenceManager uses MarshalledEntry as the default format so that CacheWriters can efficiently store data coming
446                // from a remote node, thus avoiding any additional transformation steps.
447                //
448                // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
449                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
450
451                Loggers.SQL_LOG.trace("[IS0115] SQL store: Writing {} cache entry {}", getCacheName(), marshalledEntry);
452                
453                try {
454                        SQLRecord sqlRecord = sqlRecordTransformer.toSQLRecord(
455                                new InfinispanEntry<>(
456                                        marshalledEntry.getKey(),
457                                        marshalledEntry.getValue(),
458                                        marshalledEntry.getMetadata()));
459                        
460                        // Use H2 style MERGE, JOOQ will adapt it for the particular database
461                        // http://www.jooq.org/doc/3.8/manual/sql-building/sql-statements/merge-statement/
462                        Merge mergeStatement = sql.mergeInto(table(sqlRecordTransformer.getTableName()), sqlRecord.getFields().keySet())
463                                .key(sqlRecord.getKeyColumns())
464                                .values(sqlRecord.getFields().values());
465                        
466                        String sqlStatement = jooqFixes.fixMergeStatement(mergeStatement);
467                        
468                        int rows = sql.execute(sqlStatement);
469                                
470                        if (rows != 1) {
471                                
472                                if (SQLDialect.MYSQL.equals(config.getSQLDialect()) && rows == 2) {
473                                        // MySQL indicates UPDATE on INSERT by returning 2 num rows
474                                        return;
475                                }
476                                
477                                Loggers.SQL_LOG.error("[IS0116] SQL insert / update for key {} in table {} failed: Rows {}",
478                                        marshalledEntry.getKey(),sqlRecordTransformer.getTableName(),  rows);
479                                throw new PersistenceException("(Synthetic) SQL MERGE failed: Rows " + rows);
480                        }
481                        
482                } catch (Exception e) {
483                        Loggers.SQL_LOG.error("[IS0117] {}: {}", e.getMessage(), e);
484                        throw new PersistenceException(e.getMessage(), e);
485                }
486        }
487
488
489        @Override
490        public void process(final KeyFilter<? super K> keyFilter,
491                            final CacheLoaderTask<K, V> cacheLoaderTask,
492                            final Executor executor,
493                            final boolean fetchValue,
494                            final boolean fetchMetadata) {
495
496                Loggers.SQL_LOG.trace("[IS0118] SQL store: Processing key filter for {} cache: fetchValue={} fetchMetadata=",
497                        getCacheName(), fetchValue, fetchMetadata);
498
499                final TaskContext taskContext = new TaskContextImpl();
500
501                // TODO consider multi-threaded SQL retrieval?
502                executor.execute(() -> {
503                        
504                        try {
505                                // Retrieves entire entry, fetchValue / fetchMetadata params are ignored TODO reconsider
506                                sql.selectFrom(table(sqlRecordTransformer.getTableName()))
507                                        .fetch()
508                                        .forEach(record -> {
509                                        
510                                        if (taskContext.isStopped()) {
511                                                return;
512                                        }
513                                        
514                                        InfinispanEntry<K, V> infinispanEntry = sqlRecordTransformer.toInfinispanEntry(record);
515                                        
516                                        if (keyFilter.accept(infinispanEntry.getKey())) {
517                                                
518                                                MarshalledEntry<K, V> marshalledEntry = marshalledEntryFactory.newMarshalledEntry(
519                                                        infinispanEntry.getKey(),
520                                                        infinispanEntry.getValue(),
521                                                        infinispanEntry.getMetadata());
522                                                
523                                                try {
524                                                        cacheLoaderTask.processEntry(marshalledEntry, taskContext);
525                                                        
526                                                } catch (InterruptedException e) {
527                                                        throw new PersistenceException(e.getMessage(), e);
528                                                }
529                                        }
530                                });
531                                
532                        } catch (Exception e) {
533                                Loggers.SQL_LOG.error("[IS0119] {}: {}", e.getMessage(), e);
534                                throw new PersistenceException(e.getMessage(), e);
535                        }
536                });
537        }
538
539
540        @Override
541        public int size() {
542
543                // Infinispan code analysis on 8.2 shows that this method is never called in practice, and
544                // is not wired to the data / cache container API
545
546                Loggers.SQL_LOG.trace("[IS0120] SQL store: Counting {} records", getCacheName());
547
548                final int count;
549                
550                try {
551                        count = sql.fetchCount(table(sqlRecordTransformer.getTableName()));
552                        
553                } catch (Exception e) {
554                        Loggers.SQL_LOG.error("[IS0121] {}: {}", e.getMessage(), e);
555                        throw new PersistenceException(e.getMessage(), e);
556                }
557
558                Loggers.SQL_LOG.trace("[IS0122] SQL store: Counted {} {} records", count, getCacheName());
559
560                return count;
561        }
562
563
564        @Override
565        public void clear() {
566
567                Loggers.SQL_LOG.trace("[IS0123] SQL store: Clearing {} records", getCacheName());
568
569                int numDeleted;
570                
571                try {
572                        numDeleted = sql.deleteFrom(table(sqlRecordTransformer.getTableName())).execute();
573                        
574                } catch (Exception e) {
575                        Loggers.SQL_LOG.error("[IS0124] {}: {}", e.getMessage(), e);
576                        throw new PersistenceException(e.getMessage(), e);
577                }
578
579                Loggers.SQL_LOG.info("[IS0125] SQL store: Cleared {} {} records", numDeleted, sqlRecordTransformer.getTableName());
580        }
581
582
583        @Override
584        public void purge(final Executor executor, final PurgeListener<? super K> purgeListener) {
585
586                Loggers.SQL_LOG.trace("[IS0126] SQL store: Purging {} cache entries", getCacheName());
587
588                final AtomicInteger numPurged = new AtomicInteger();
589                
590                try {
591                        executor.execute(() -> numPurged.set(reaper.purge(sql, purgeListener)));
592                        
593                } catch (Exception e) {
594                        Loggers.SQL_LOG.error("[IS0127] {}: {}", e.getMessage(), e);
595                        throw new PersistenceException("Purge exception: " + e.getMessage(), e);
596                }
597
598                Loggers.SQL_LOG.debug("[IS0128] SQL store: Purged {} expired {} cache entries", numPurged.get(), getCacheName());
599        }
600}