001package com.nimbusds.infinispan.persistence.sql;
002
003
004import java.sql.Connection;
005import java.util.Properties;
006import java.util.concurrent.Executor;
007import java.util.concurrent.atomic.AtomicInteger;
008import javax.sql.DataSource;
009
010import static org.jooq.impl.DSL.table;
011
012import com.nimbusds.infinispan.persistence.common.InfinispanEntry;
013import com.nimbusds.infinispan.persistence.common.InfinispanStore;
014import com.zaxxer.hikari.HikariConfig;
015import com.zaxxer.hikari.HikariDataSource;
016import net.jcip.annotations.ThreadSafe;
017import org.infinispan.commons.configuration.ConfiguredBy;
018import org.infinispan.filter.KeyFilter;
019import org.infinispan.marshall.core.MarshalledEntry;
020import org.infinispan.marshall.core.MarshalledEntryFactory;
021import org.infinispan.persistence.TaskContextImpl;
022import org.infinispan.persistence.spi.InitializationContext;
023import org.infinispan.persistence.spi.PersistenceException;
024import org.jooq.DSLContext;
025import org.jooq.Record;
026import org.jooq.SQLDialect;
027import org.jooq.impl.DSL;
028import org.kohsuke.MetaInfServices;
029
030
031/**
032 * SQL store for Infinispan 8.2+ caches and maps.
033 */
034@ThreadSafe
035@MetaInfServices
036@ConfiguredBy(SQLStoreConfiguration.class)
037public class SQLStore<K,V> extends InfinispanStore<K,V> {
038
039
040        /**
041         * The SQL store configuration.
042         */
043        private SQLStoreConfiguration config;
044        
045        
046        /**
047         * The SQL (JDBC) data source (with connection pool).
048         */
049        private DataSource dataSource;
050
051
052        /**
053         * The SQL record transformer (to / from Infinispan entries).
054         */
055        private SQLRecordTransformer<K,V> sqlRecordTransformer;
056
057
058        /**
059         * The marshalled Infinispan entry factory.
060         */
061        private MarshalledEntryFactory<K, V> marshalledEntryFactory;
062
063
064        /**
065         * Purges expired entries found in the SQL store, as indicated by
066         * their persisted metadata (optional, may be ignored / not stored).
067         */
068        private ExpiredEntryReaper<K,V> reaper;
069
070
071        /**
072         * Loads an SQL record transformer with the specified class name.
073         *
074         * @param className The class name. Must not be {@code null}.
075         *
076         * @return The SQL entry transformer.
077         */
078        @SuppressWarnings( "unchecked" )
079        private SQLRecordTransformer<K,V> loadRecordTransformerClass(final String className) {
080
081                try {
082                        Class<SQLRecordTransformer<K,V>> clazz = (Class<SQLRecordTransformer<K,V>>)Class.forName(className);
083                        return clazz.newInstance();
084                } catch (Exception e) {
085                        throw new PersistenceException("Couldn't load SQL record transformer class: " + e.getMessage(), e);
086                }
087        }
088        
089        
090        /**
091         * Returns the SQL store configuration.
092         *
093         * @return The SQL store configuration, {@code null} if not
094         *         initialised.
095         */
096        public SQLStoreConfiguration getConfiguration() {
097                
098                return config;
099        }
100        
101        
102        /**
103         * Returns the underlying SQL data source.
104         *
105         * @return The underlying SQL data source, {@code null} if not
106         *         initialised.
107         */
108        public DataSource getDataSource() {
109                
110                return dataSource;
111        }
112
113
114        @Override
115        @SuppressWarnings("unchecked")
116        public void init(final InitializationContext ctx) {
117
118                // This method will be invoked by the PersistenceManager during initialization. The InitializationContext
119                // contains:
120                // - this CacheLoader's configuration
121                // - the cache to which this loader is applied. Your loader might want to use the cache's name to construct
122                //   cache-specific identifiers
123                // - the StreamingMarshaller that needs to be used to marshall/unmarshall the entries
124                // - a TimeService which the loader can use to determine expired entries
125                // - a ByteBufferFactory which needs to be used to construct ByteBuffers
126                // - a MarshalledEntryFactory which needs to be used to construct entries from the data retrieved by the loader
127
128                super.init(ctx);
129                
130                this.config = ctx.getConfiguration();
131
132                Loggers.MAIN_LOG.info("[IS0100] Infinispan SQL store configuration for cache {}:", getCacheName());
133                config.log();
134
135                Loggers.MAIN_LOG.debug("[IS0101] Loading SQL record transformer class {} for cache {}...",
136                        config.recordTransformer,
137                        getCacheName());
138                
139                // Load and initialise the SQL record transformer
140                sqlRecordTransformer = loadRecordTransformerClass(config.recordTransformer);
141                sqlRecordTransformer.init(() -> config.sqlDialect);
142
143                marshalledEntryFactory = (MarshalledEntryFactory<K, V>)ctx.getMarshalledEntryFactory();
144
145                Loggers.MAIN_LOG.info("[IS0102] Initialized SQL external store for cache {} with table {}",
146                        getCacheName(),
147                        sqlRecordTransformer.getTableName());
148        }
149
150
151        @Override
152        public void start() {
153
154                // This method will be invoked by the PersistenceManager to start the CacheLoader. At this stage configuration
155                // is complete and the loader can perform operations such as opening a connection to the external storage,
156                // initialize internal data structures, etc.
157                
158                Properties hikariProps = new Properties();
159                
160                for (Object key: config.properties().keySet()) {
161                        if (key.toString().startsWith("sqlStore.")) {
162                                continue; // skip
163                        }
164                        // Skip non-Hikari props, else Hikari will throw config exception
165                        hikariProps.put(key, config.properties().get(key));
166                }
167                
168                dataSource = new HikariDataSource(new HikariConfig(hikariProps));
169                
170                // Create table if missing
171                try (Connection con = dataSource.getConnection()) {
172                        
173                        int rows = DSL.using(con, config.sqlDialect).execute(sqlRecordTransformer.getCreateTableStatement());
174                        
175                        if (rows > 0) {
176                                Loggers.MAIN_LOG.info("[IS0129] SQL store: Created table {} for cache {}", sqlRecordTransformer.getTableName(), getCacheName());
177                        }
178                        
179                } catch (Exception e) {
180                        Loggers.MAIN_LOG.fatal("[IS0103] SQL store: Create table if not exists failed: {}: e", e.getMessage(), e);
181                        throw new PersistenceException(e.getMessage(), e);
182                }
183
184                Loggers.MAIN_LOG.info("[IS0104] Started SQL external store connector for cache {} with table {}", getCacheName(), sqlRecordTransformer.getTableName());
185
186                reaper = new ExpiredEntryReaper<>(sqlRecordTransformer);
187        }
188
189
190        @Override
191        public void stop() {
192
193                super.stop();
194                
195                // DataSource has no explicit close method
196                Loggers.MAIN_LOG.info("[IS0105] Stopped SQL store connector for cache {}",  getCacheName());
197        }
198
199
200        @SuppressWarnings("unchecked")
201        private K resolveKey(final Object key) {
202
203                if (key instanceof byte[]) {
204                        throw new PersistenceException("Cannot resolve " + getCacheName() + " cache key from byte[], enable compatibility mode");
205                }
206
207                return (K)key;
208        }
209
210
211        @Override
212        public boolean contains(final Object key) {
213
214                // This method will be invoked by the PersistenceManager to determine if the loader contains the specified key.
215                // The implementation should be as fast as possible, e.g. it should strive to transfer the least amount of data possible
216                // from the external storage to perform the check. Also, if possible, make sure the field is indexed on the external storage
217                // so that its existence can be determined as quickly as possible.
218                //
219                // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
220                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
221
222                Loggers.SQL_LOG.trace("[IS0106] SQL store: Checking {} cache key {}", getCacheName(), key);
223                
224                try (Connection connection = dataSource.getConnection()) {
225                        
226                        return DSL.using(connection, config.sqlDialect)
227                                .selectOne()
228                                .from(table(sqlRecordTransformer.getTableName()))
229                                .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
230                                .fetchOne() != null;
231                        
232                } catch (Exception e) {
233                        Loggers.SQL_LOG.error("[IS0107] {}: {}", e.getMessage(), e);
234                        throw new PersistenceException(e.getMessage(), e);
235                }
236        }
237
238
239        @Override
240        @SuppressWarnings("unchecked")
241        public MarshalledEntry<K,V> load(final Object key) {
242
243                // Fetches an entry from the storage using the specified key. The CacheLoader should retrieve from the external storage all
244                // of the data that is needed to reconstruct the entry in memory, i.e. the value and optionally the metadata. This method
245                // needs to return a MarshalledEntry which can be constructed as follows:
246                //
247                // ctx.getMarshalledEntryFactory().new MarshalledEntry(key, value, metadata);
248                //
249                // If the entry does not exist or has expired, this method should return null.
250                // If an error occurs while retrieving data from the external storage, this method should throw a PersistenceException
251                //
252                // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
253                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
254                // If the loader needs to have knowledge of the key/value data beyond their binary representation, then it needs access to the key's and value's
255                // classes and the marshaller used to encode them.
256
257                Loggers.SQL_LOG.trace("[IS0108] SQL store: Loading {} cache entry with key {}", getCacheName(), key);
258                
259                final Record record;
260                
261                try (Connection connection = dataSource.getConnection()) {
262                        
263                        record = DSL.using(connection, config.sqlDialect)
264                                .selectFrom(table(sqlRecordTransformer.getTableName()))
265                                .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
266                                .fetchOne();
267                        
268                } catch (Exception e) {
269                        Loggers.SQL_LOG.error("[IS0109] {}, {}", e.getMessage(), e);
270                        throw new PersistenceException(e.getMessage(), e);
271                }
272                
273                if (record == null) {
274                        // Not found
275                        Loggers.SQL_LOG.trace("[IS0110] SQL store: Record with key {} not found", key);
276                        return null;
277                }
278                
279                if (Loggers.SQL_LOG.isTraceEnabled()) {
280                        Loggers.SQL_LOG.trace("[IS0111] SQL store: Retrieved record: {}", record);
281                }
282
283                // Transform LDAP entry to Infinispan entry
284                InfinispanEntry<K,V> infinispanEntry = sqlRecordTransformer.toInfinispanEntry(record);
285
286                return marshalledEntryFactory.newMarshalledEntry(
287                        infinispanEntry.getKey(),
288                        infinispanEntry.getValue(),
289                        infinispanEntry.getMetadata());
290        }
291
292
293        @Override
294        public boolean delete(final Object key) {
295
296                // The CacheWriter should remove from the external storage the entry identified by the specified key.
297                // Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
298                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
299
300                Loggers.SQL_LOG.trace("[IS0112] SQL store: Deleting {} cache entry with key {}", getCacheName(), key);
301                
302                int deletedRows;
303                
304                try (Connection connection = dataSource.getConnection()) {
305                        
306                        deletedRows = DSL.using(connection, config.sqlDialect)
307                                .deleteFrom(table(sqlRecordTransformer.getTableName()))
308                                .where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
309                                .execute();
310                        
311                } catch (Exception e) {
312                        Loggers.SQL_LOG.error("[IS0113] {}, {}", e.getMessage(), e);
313                        throw new PersistenceException(e.getMessage(), e);
314                }
315                
316                Loggers.SQL_LOG.trace("[IS0113] SQL store: Deleted {} record with key {}", deletedRows, key);
317                
318                if (deletedRows == 1) {
319                        return true;
320                } else if (deletedRows == 0) {
321                        return false;
322                } else {
323                        Loggers.SQL_LOG.error("[IS0114] Too many deleted rows ({}) for key {}", deletedRows, key);
324                        throw new PersistenceException("Too many deleted rows for key " + key);
325                }
326        }
327
328
329        @Override
330        public void write(final MarshalledEntry<? extends K, ? extends V> marshalledEntry) {
331
332                // The CacheWriter should write the specified entry to the external storage.
333                //
334                // The PersistenceManager uses MarshalledEntry as the default format so that CacheWriters can efficiently store data coming
335                // from a remote node, thus avoiding any additional transformation steps.
336                //
337                // Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
338                // such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
339
340                Loggers.SQL_LOG.trace("[IS0115] SQL store: Writing {} cache entry {}", getCacheName(), marshalledEntry);
341                
342                try (Connection connection = dataSource.getConnection()) {
343                        
344                        SQLRecord sqlRecord = sqlRecordTransformer.toSQLRecord(
345                                new InfinispanEntry<>(
346                                        marshalledEntry.getKey(),
347                                        marshalledEntry.getValue(),
348                                        marshalledEntry.getMetadata()));
349                        
350                        // Use H2 style MERGE, JOOQ will adapt it for the particular database
351                        // http://www.jooq.org/doc/3.8/manual/sql-building/sql-statements/merge-statement/
352                        
353                        int rows = DSL.using(connection, config.sqlDialect)
354                                .mergeInto(table(sqlRecordTransformer.getTableName()), sqlRecord.getFields().keySet())
355                                .key(sqlRecord.getKeyColumns())
356                                .values(sqlRecord.getFields().values())
357                                .execute();
358                                
359                        if (rows != 1) {
360                                
361                                if (SQLDialect.MYSQL.equals(config.sqlDialect) && rows == 2) {
362                                        // MySQL indicates UPDATE on INSERT by returning 2 num rows
363                                        return;
364                                }
365                                
366                                Loggers.SQL_LOG.error("[IS0116] SQL insert / update for key {} in table {} failed: Rows {}",
367                                        marshalledEntry.getKey(),sqlRecordTransformer.getTableName(),  rows);
368                                throw new PersistenceException("(Synthetic) SQL MERGE failed: Rows " + rows);
369                        }
370                        
371                } catch (Exception e) {
372                        Loggers.SQL_LOG.error("[IS0117] {}: {}", e.getMessage(), e);
373                        throw new PersistenceException(e.getMessage(), e);
374                }
375        }
376
377
378        @Override
379        public void process(final KeyFilter<? super K> keyFilter,
380                            final CacheLoaderTask<K, V> cacheLoaderTask,
381                            final Executor executor,
382                            final boolean fetchValue,
383                            final boolean fetchMetadata) {
384
385                Loggers.SQL_LOG.trace("[IS0118] SQL store: Processing key filter for {} cache: fetchValue={} fetchMetadata=",
386                        getCacheName(), fetchValue, fetchMetadata);
387
388                final TaskContext taskContext = new TaskContextImpl();
389
390                // TODO consider multi-threaded SQL retrieval?
391                executor.execute(() -> {
392                        
393                        try (Connection connection = dataSource.getConnection()){
394                                
395                                // Retrieves entire entry, fetchValue / fetchMetadata params are ignored TODO reconsider
396                                
397                                DSL.using(connection, config.sqlDialect)
398                                        .selectFrom(table(sqlRecordTransformer.getTableName()))
399                                        .fetch()
400                                        .forEach(record -> {
401                                        
402                                        if (taskContext.isStopped()) {
403                                                return;
404                                        }
405                                        
406                                        InfinispanEntry<K, V> infinispanEntry = sqlRecordTransformer.toInfinispanEntry(record);
407                                        
408                                        if (keyFilter.accept(infinispanEntry.getKey())) {
409                                                
410                                                MarshalledEntry<K, V> marshalledEntry = marshalledEntryFactory.newMarshalledEntry(
411                                                        infinispanEntry.getKey(),
412                                                        infinispanEntry.getValue(),
413                                                        infinispanEntry.getMetadata());
414                                                
415                                                try {
416                                                        cacheLoaderTask.processEntry(marshalledEntry, taskContext);
417                                                        
418                                                } catch (InterruptedException e) {
419                                                        throw new PersistenceException(e.getMessage(), e);
420                                                }
421                                        }
422                                });
423                                
424                        } catch (Exception e) {
425                                Loggers.SQL_LOG.error("[IS0119] {}: {}", e.getMessage(), e);
426                                throw new PersistenceException(e.getMessage(), e);
427                        }
428                });
429        }
430
431
432        @Override
433        public int size() {
434
435                // Infinispan code analysis on 8.2 shows that this method is never called in practice, and
436                // is not wired to the data / cache container API
437
438                Loggers.SQL_LOG.trace("[IS0120] SQL store: Counting {} records", getCacheName());
439
440                final int count;
441                
442                try (Connection connection = dataSource.getConnection()) {
443                        
444                        count = DSL.using(connection, config.sqlDialect)
445                                .fetchCount(table(sqlRecordTransformer.getTableName()));
446                        
447                } catch (Exception e) {
448                        Loggers.SQL_LOG.error("[IS0121] {}: {}", e.getMessage(), e);
449                        throw new PersistenceException(e.getMessage(), e);
450                }
451
452                Loggers.SQL_LOG.trace("[IS0122] SQL store: Counted {} {} records", count, getCacheName());
453
454                return count;
455        }
456
457
458        @Override
459        public void clear() {
460
461                Loggers.SQL_LOG.trace("[IS0123] SQL store: Clearing {} records", getCacheName());
462
463                int numDeleted;
464                
465                try (Connection connection = dataSource.getConnection()) {
466                        
467                        numDeleted = DSL.using(connection, config.sqlDialect)
468                                .deleteFrom(table(sqlRecordTransformer.getTableName()))
469                                .execute();
470                        
471                } catch (Exception e) {
472                        Loggers.SQL_LOG.error("[IS0124] {}: {}", e.getMessage(), e);
473                        throw new PersistenceException(e.getMessage(), e);
474                }
475
476                Loggers.SQL_LOG.info("[IS0125] SQL store: Cleared {} {} records", numDeleted, sqlRecordTransformer.getTableName());
477        }
478
479
480        @Override
481        public void purge(final Executor executor, final PurgeListener<? super K> purgeListener) {
482
483                Loggers.SQL_LOG.trace("[IS0126] SQL store: Purging {} cache entries", getCacheName());
484
485                final AtomicInteger numPurged = new AtomicInteger();
486                
487                try (Connection connection = dataSource.getConnection()) {
488                
489                        DSLContext dsl = DSL.using(connection, config.sqlDialect);
490                        
491                        executor.execute(() -> numPurged.set(reaper.purge(dsl, purgeListener)));
492                        
493                } catch (Exception e) {
494                        Loggers.SQL_LOG.error("[IS0127] {}: {}", e.getMessage(), e);
495                        throw new PersistenceException("Purge exception: " + e.getMessage(), e);
496                }
497
498                Loggers.SQL_LOG.debug("[IS0128] SQL store: Purged {} expired {} cache entries", numPurged.get(), getCacheName());
499        }
500}