[1/2] activemq git commit: [AMQ-6652] add final modifier to test

Previous Topic Next Topic
 
classic Classic list List threaded Threaded
2 messages Options
Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

[1/2] activemq git commit: [AMQ-6652] add final modifier to test

cshannon
Repository: activemq
Updated Branches:
  refs/heads/activemq-5.14.x 32a4d7449 -> b8a7aa80e


[AMQ-6652] add final modifier to test

(cherry picked from commit af03ad467987fad7f1ee2d8382ed0acd17432a9b)


Project: http://git-wip-us.apache.org/repos/asf/activemq/repo
Commit: http://git-wip-us.apache.org/repos/asf/activemq/commit/b8a7aa80
Tree: http://git-wip-us.apache.org/repos/asf/activemq/tree/b8a7aa80
Diff: http://git-wip-us.apache.org/repos/asf/activemq/diff/b8a7aa80

Branch: refs/heads/activemq-5.14.x
Commit: b8a7aa80e524ee6d5d566b1236fd8431aa27c0d9
Parents: f3a3476
Author: gtully <[hidden email]>
Authored: Tue Apr 11 11:39:22 2017 +0100
Committer: Christopher L. Shannon (cshannon) <[hidden email]>
Committed: Tue Apr 11 10:27:54 2017 -0400

----------------------------------------------------------------------
 .../activemq/store/kahadb/JournalMetaDataCheckpointTest.java       | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/activemq/blob/b8a7aa80/activemq-kahadb-store/src/test/java/org/apache/activemq/store/kahadb/JournalMetaDataCheckpointTest.java
----------------------------------------------------------------------
diff --git a/activemq-kahadb-store/src/test/java/org/apache/activemq/store/kahadb/JournalMetaDataCheckpointTest.java b/activemq-kahadb-store/src/test/java/org/apache/activemq/store/kahadb/JournalMetaDataCheckpointTest.java
index 9230ca5..bfe2244 100644
--- a/activemq-kahadb-store/src/test/java/org/apache/activemq/store/kahadb/JournalMetaDataCheckpointTest.java
+++ b/activemq-kahadb-store/src/test/java/org/apache/activemq/store/kahadb/JournalMetaDataCheckpointTest.java
@@ -142,7 +142,7 @@ public class JournalMetaDataCheckpointTest {
 
         // verify metadata is correct on disk
         final MessageDatabase.Metadata[] fromDiskMetaData = new MessageDatabase.Metadata[1];
-        KahaDBStore messageStore = ((KahaDBPersistenceAdapter) broker.getPersistenceAdapter()).getStore();
+        final KahaDBStore messageStore = ((KahaDBPersistenceAdapter) broker.getPersistenceAdapter()).getStore();
 
         // need to avoid cache and in-progress writes of existing pageFile
         PageFile fromDiskPageFile = new PageFile(messageStore.getIndexDirectory(), "db");

Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

[2/2] activemq git commit: [AMQ-6652] ensure checkpoint pagefile flush includeds the checkpoint transaction

cshannon
[AMQ-6652] ensure checkpoint pagefile flush includeds the checkpoint transaction

(cherry picked from commit ca5e41bb7ab740207795d27ca341634f983f375e)


Project: http://git-wip-us.apache.org/repos/asf/activemq/repo
Commit: http://git-wip-us.apache.org/repos/asf/activemq/commit/f3a34766
Tree: http://git-wip-us.apache.org/repos/asf/activemq/tree/f3a34766
Diff: http://git-wip-us.apache.org/repos/asf/activemq/diff/f3a34766

Branch: refs/heads/activemq-5.14.x
Commit: f3a34766a0bffe4db19988044c79fb6ecdf92fb9
Parents: 32a4d74
Author: gtully <[hidden email]>
Authored: Mon Apr 10 17:15:07 2017 +0100
Committer: Christopher L. Shannon (cshannon) <[hidden email]>
Committed: Tue Apr 11 10:27:54 2017 -0400

----------------------------------------------------------------------
 .../activemq/store/kahadb/MessageDatabase.java  |   2 +-
 .../kahadb/JournalMetaDataCheckpointTest.java   | 219 +++++++++++++++++++
 2 files changed, 220 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/activemq/blob/f3a34766/activemq-kahadb-store/src/main/java/org/apache/activemq/store/kahadb/MessageDatabase.java
----------------------------------------------------------------------
diff --git a/activemq-kahadb-store/src/main/java/org/apache/activemq/store/kahadb/MessageDatabase.java b/activemq-kahadb-store/src/main/java/org/apache/activemq/store/kahadb/MessageDatabase.java
index 34509b1..d10c064 100644
--- a/activemq-kahadb-store/src/main/java/org/apache/activemq/store/kahadb/MessageDatabase.java
+++ b/activemq-kahadb-store/src/main/java/org/apache/activemq/store/kahadb/MessageDatabase.java
@@ -1692,6 +1692,7 @@ public abstract class MessageDatabase extends ServiceSupport implements BrokerSe
                         return checkpointUpdate(tx, cleanup);
                     }
                 });
+                pageFile.flush();
                 // after the index update such that partial removal does not leave dangling references in the index.
                 journal.removeDataFiles(filesToGc);
             } finally {
@@ -1720,7 +1721,6 @@ public abstract class MessageDatabase extends ServiceSupport implements BrokerSe
         Location[] inProgressTxRange = getInProgressTxLocationRange();
         metadata.firstInProgressTransactionLocation = inProgressTxRange[0];
         tx.store(metadata.page, metadataMarshaller, true);
-        pageFile.flush();
 
         final TreeSet<Integer> gcCandidateSet = new TreeSet<>();
         if (cleanup) {

http://git-wip-us.apache.org/repos/asf/activemq/blob/f3a34766/activemq-kahadb-store/src/test/java/org/apache/activemq/store/kahadb/JournalMetaDataCheckpointTest.java
----------------------------------------------------------------------
diff --git a/activemq-kahadb-store/src/test/java/org/apache/activemq/store/kahadb/JournalMetaDataCheckpointTest.java b/activemq-kahadb-store/src/test/java/org/apache/activemq/store/kahadb/JournalMetaDataCheckpointTest.java
new file mode 100644
index 0000000..9230ca5
--- /dev/null
+++ b/activemq-kahadb-store/src/test/java/org/apache/activemq/store/kahadb/JournalMetaDataCheckpointTest.java
@@ -0,0 +1,219 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.activemq.store.kahadb;
+
+import org.apache.activemq.ActiveMQConnectionFactory;
+import org.apache.activemq.broker.BrokerService;
+import org.apache.activemq.broker.region.policy.PolicyEntry;
+import org.apache.activemq.broker.region.policy.PolicyMap;
+import org.apache.activemq.command.ActiveMQQueue;
+import org.apache.activemq.store.kahadb.disk.journal.DataFile;
+import org.apache.activemq.store.kahadb.disk.journal.Location;
+import org.apache.activemq.store.kahadb.disk.page.Page;
+import org.apache.activemq.store.kahadb.disk.page.PageFile;
+import org.apache.activemq.store.kahadb.disk.page.Transaction;
+import org.junit.After;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.jms.Connection;
+import javax.jms.Destination;
+import javax.jms.Message;
+import javax.jms.MessageProducer;
+import javax.jms.Session;
+import java.io.IOException;
+import java.util.Collection;
+
+import static org.apache.activemq.store.kahadb.JournalCorruptionEofIndexRecoveryTest.drain;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class JournalMetaDataCheckpointTest {
+
+    private static final Logger LOG = LoggerFactory.getLogger(JournalMetaDataCheckpointTest.class);
+
+    private final String KAHADB_DIRECTORY = "target/activemq-data/";
+    private final String payload = new String(new byte[1024]);
+
+    private BrokerService broker = null;
+    private final Destination destination = new ActiveMQQueue("Test");
+    private KahaDBPersistenceAdapter adapter;
+
+    protected void startBroker() throws Exception {
+        doStartBroker(true);
+    }
+
+    protected void restartBroker() throws Exception {
+        if (broker != null) {
+            broker.stop();
+            broker.waitUntilStopped();
+        }
+
+        doStartBroker(false);
+    }
+
+    private void doStartBroker(boolean delete) throws Exception {
+        doCreateBroker(delete);
+        LOG.info("Starting broker..");
+        broker.start();
+    }
+
+    private void doCreateBroker(boolean delete) throws Exception {
+
+        broker = new BrokerService();
+        broker.setDeleteAllMessagesOnStartup(delete);
+        broker.setPersistent(true);
+        broker.setUseJmx(true);
+        broker.setDataDirectory(KAHADB_DIRECTORY);
+
+        PolicyMap policyMap = new PolicyMap();
+        PolicyEntry policyEntry = new PolicyEntry();
+        policyEntry.setUseCache(false);
+        policyMap.setDefaultEntry(policyEntry);
+        broker.setDestinationPolicy(policyMap);
+
+        configurePersistence(broker);
+    }
+
+    protected void configurePersistence(BrokerService brokerService) throws Exception {
+        adapter = (KahaDBPersistenceAdapter) brokerService.getPersistenceAdapter();
+
+        // ensure there are a bunch of data files but multiple entries in each
+        adapter.setJournalMaxFileLength(1024 * 20);
+
+        // manual cleanup
+        adapter.setCheckpointInterval(0);
+        adapter.setCleanupInterval(0);
+
+        adapter.setCheckForCorruptJournalFiles(true);
+
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        if (broker != null) {
+            broker.stop();
+            broker.waitUntilStopped();
+        }
+    }
+
+    @Test
+    public void testRecoveryOnDeleteFailureMetaDataOk() throws Exception {
+        startBroker();
+
+        int sent = produceMessagesToConsumeMultipleDataFiles(50);
+
+        int numFilesAfterSend = getNumberOfJournalFiles();
+        LOG.info("Sent {}, Num journal files: {} ", sent, numFilesAfterSend);
+
+        assertTrue("more than x files: " + numFilesAfterSend, numFilesAfterSend > 4);
+
+
+        int received = tryConsume(destination, sent/2);
+        assertEquals("all message received", sent/2, received);
+
+
+        int numFilesAfterRestart = getNumberOfJournalFiles();
+        LOG.info("Num journal files before gc: " + numFilesAfterRestart);
+
+        // force gc
+        ((KahaDBPersistenceAdapter) broker.getPersistenceAdapter()).getStore().checkpoint(true);
+
+        int numFilesAfterGC = getNumberOfJournalFiles();
+        assertEquals("all message received", sent/2, received);
+        LOG.info("Num journal files after restart nd gc: " + numFilesAfterGC);
+        assertTrue("Gc has happened", numFilesAfterGC < numFilesAfterRestart);
+
+        // verify metadata is correct on disk
+        final MessageDatabase.Metadata[] fromDiskMetaData = new MessageDatabase.Metadata[1];
+        KahaDBStore messageStore = ((KahaDBPersistenceAdapter) broker.getPersistenceAdapter()).getStore();
+
+        // need to avoid cache and in-progress writes of existing pageFile
+        PageFile fromDiskPageFile = new PageFile(messageStore.getIndexDirectory(), "db");
+        fromDiskPageFile.setEnablePageCaching(false);
+        fromDiskPageFile.setEnableRecoveryFile(false);
+        fromDiskPageFile.load();
+        fromDiskPageFile.tx().execute(new Transaction.Closure<IOException>() {
+            @Override
+            public void execute(Transaction tx) throws IOException {
+                    Page<MessageDatabase.Metadata> page = tx.load(0, messageStore.metadataMarshaller);
+                fromDiskMetaData[0] = page.get();
+            }
+        });
+
+        assertEquals("location is uptodate", messageStore.getMetadata().ackMessageFileMapLocation, fromDiskMetaData[0].ackMessageFileMapLocation);
+    }
+
+    @Ignore("needs work")
+    public void testAckMessageFileMapSyncOnModOnly() throws Exception {
+        startBroker();
+        // force gc
+        ((KahaDBPersistenceAdapter) broker.getPersistenceAdapter()).getStore().checkpoint(true);
+
+        KahaDBStore messageStore = ((KahaDBPersistenceAdapter) broker.getPersistenceAdapter()).getStore();
+
+        Location ackMessageFileModLoc =  messageStore.getMetadata().ackMessageFileMapLocation;
+        // force gc
+        ((KahaDBPersistenceAdapter) broker.getPersistenceAdapter()).getStore().checkpoint(true);
+
+        assertEquals("location is not changed on no modification", ackMessageFileModLoc, messageStore.getMetadata().ackMessageFileMapLocation);
+    }
+
+    private int getNumberOfJournalFiles() throws IOException {
+        Collection<DataFile> files = ((KahaDBPersistenceAdapter) broker.getPersistenceAdapter()).getStore().getJournal().getFileMap().values();
+        int reality = 0;
+        for (DataFile file : files) {
+            if (file != null) {
+                reality++;
+            }
+        }
+        return reality;
+    }
+
+    private int produceMessages(Destination destination, int numToSend) throws Exception {
+        int sent = 0;
+        Connection connection = new ActiveMQConnectionFactory(broker.getVmConnectorURI()).createConnection();
+        connection.start();
+        try {
+            Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
+            MessageProducer producer = session.createProducer(destination);
+            for (int i = 0; i < numToSend; i++) {
+                producer.send(createMessage(session, i));
+                sent++;
+            }
+        } finally {
+            connection.close();
+        }
+
+        return sent;
+    }
+
+    private int tryConsume(Destination destination, int numToGet) throws Exception {
+        ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory(broker.getVmConnectorURI());
+        return  drain(cf, destination, numToGet);
+    }
+
+    private int produceMessagesToConsumeMultipleDataFiles(int numToSend) throws Exception {
+        return produceMessages(destination, numToSend);
+    }
+
+    private Message createMessage(Session session, int i) throws Exception {
+        return session.createTextMessage(payload + "::" + i);
+    }
+}

Loading...