diff --git a/ChangeLog.md b/ChangeLog.md
index eed4cd10ef..f96febf25d 100644
--- a/ChangeLog.md
+++ b/ChangeLog.md
@@ -17,6 +17,13 @@ specific language governing permissions and limitations
under the License.
-->
+**2023-12-06 Arturo Bernal (abernal AT apache DOT org)**
+
+* _2.12.2-git-12_
+*
+* [JSPWIKI-1178](https://issues.apache.org/jira/browse/JSPWIKI-1178) - Deadlock with Java Virtual Threads
+
+
**2023-12-04 Juan Pablo Santos (juanpablo AT apache DOT org)**
* _2.12.2-git-11_
diff --git a/jspwiki-api/src/main/java/org/apache/wiki/api/Release.java b/jspwiki-api/src/main/java/org/apache/wiki/api/Release.java
index b3699e53a1..1c6bef6af2 100644
--- a/jspwiki-api/src/main/java/org/apache/wiki/api/Release.java
+++ b/jspwiki-api/src/main/java/org/apache/wiki/api/Release.java
@@ -69,7 +69,7 @@ public final class Release {
*
* If the build identifier is empty, it is not added.
*/
- public static final String BUILD = "11";
+ public static final String BUILD = "12";
/**
* This is the generic version string you should use when printing out the version. It is of
diff --git a/jspwiki-event/pom.xml b/jspwiki-event/pom.xml
index 2066adbfa0..e819a97b2c 100644
--- a/jspwiki-event/pom.xml
+++ b/jspwiki-event/pom.xml
@@ -62,5 +62,11 @@
junit-jupiter-enginetest
+
+
+ ${project.groupId}
+ jspwiki-util
+ ${project.version}
+
diff --git a/jspwiki-event/src/main/java/org/apache/wiki/event/WikiEventManager.java b/jspwiki-event/src/main/java/org/apache/wiki/event/WikiEventManager.java
index 635ec6e444..0bdb543eeb 100644
--- a/jspwiki-event/src/main/java/org/apache/wiki/event/WikiEventManager.java
+++ b/jspwiki-event/src/main/java/org/apache/wiki/event/WikiEventManager.java
@@ -21,18 +21,21 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import org.apache.wiki.util.Synchronizer;
import java.lang.ref.WeakReference;
-import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.ConcurrentModificationException;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.Vector;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReentrantLock;
/**
* A singleton class that manages the addition and removal of WikiEvent listeners to a event source, as well as the firing of events
@@ -133,7 +136,7 @@ public final class WikiEventManager {
private static WikiEventListener c_monitor;
/* The Map of client object to WikiEventDelegate. */
- private final Map< Object, WikiEventDelegate > m_delegates = new HashMap<>();
+ private final Map< Object, WikiEventDelegate > m_delegates = new ConcurrentHashMap<>();
/* The Vector containing any preloaded WikiEventDelegates. */
private final Vector< WikiEventDelegate > m_preloadCache = new Vector<>();
@@ -141,6 +144,17 @@ public final class WikiEventManager {
/* Singleton instance of the WikiEventManager. */
private static WikiEventManager c_instance;
+ /*
+ * Locks used to ensure thread safety when accessing shared resources.
+ * This locks provide more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private static final ReentrantLock instanceLock = new ReentrantLock();
+ private static final ReentrantLock removeWikiEventListenerLock = new ReentrantLock();
+
/** Constructor for a WikiEventManager. */
private WikiEventManager() {
c_instance = this;
@@ -154,11 +168,13 @@ private WikiEventManager() {
* @return A shared instance of the WikiEventManager
*/
public static WikiEventManager getInstance() {
- if( c_instance == null ) {
- synchronized( WikiEventManager.class ) {
- return new WikiEventManager();
- // start up any post-instantiation services here
- }
+ if (c_instance == null) {
+ Synchronizer.synchronize(instanceLock, () -> {
+ if (c_instance == null) {
+ c_instance = new WikiEventManager();
+ // start up any post-instantiation services here
+ }
+ });
}
return c_instance;
}
@@ -238,11 +254,11 @@ public static Set getWikiEventListeners( final Object client
* @return true if the listener was found and removed.
*/
public static boolean removeWikiEventListener( final WikiEventListener listener ) {
- boolean removed = false;
+ final AtomicBoolean removed = new AtomicBoolean(false);
// get the Map.entry object for the entire Map, then check match on entry (listener)
final WikiEventManager mgr = getInstance();
final Map< Object, WikiEventDelegate > sources = Collections.synchronizedMap( mgr.getDelegates() );
- synchronized( sources ) {
+ Synchronizer.synchronize(removeWikiEventListenerLock, () -> {
// get an iterator over the Map.Enty objects in the map
for( final Map.Entry< Object, WikiEventDelegate > entry : sources.entrySet() ) {
// the entry value is the delegate
@@ -250,20 +266,16 @@ public static boolean removeWikiEventListener( final WikiEventListener listener
// now see if we can remove the listener from the delegate (delegate may be null because this is a weak reference)
if( delegate != null && delegate.removeWikiEventListener( listener ) ) {
- removed = true; // was removed
+ removed.set(true); // was removed
}
}
- }
- return removed;
+ });
+ return removed.get();
}
private void removeDelegates() {
- synchronized( m_delegates ) {
- m_delegates.clear();
- }
- synchronized( m_preloadCache ) {
- m_preloadCache.clear();
- }
+ m_delegates.clear();
+ m_preloadCache.clear();
}
public static void shutdown() {
@@ -314,35 +326,33 @@ private Map< Object, WikiEventDelegate > getDelegates() {
* @param client the client Object, or alternately a Class reference
* @return the WikiEventDelegate.
*/
- private WikiEventDelegate getDelegateFor( final Object client ) {
- synchronized( m_delegates ) {
- if( client == null || client instanceof Class ) { // then preload the cache
- final WikiEventDelegate delegate = new WikiEventDelegate( client );
- m_preloadCache.add( delegate );
- m_delegates.put( client, delegate );
- return delegate;
- } else if( !m_preloadCache.isEmpty() ) {
- // then see if any of the cached delegates match the class of the incoming client
- for( int i = m_preloadCache.size()-1 ; i >= 0 ; i-- ) { // start with most-recently added
- final WikiEventDelegate delegate = m_preloadCache.elementAt( i );
- if( delegate.getClientClass() == null || delegate.getClientClass().equals( client.getClass() ) ) {
- // we have a hit, so use it, but only on a client we haven't seen before
- if( !m_delegates.containsKey( client ) ) {
- m_preloadCache.remove( delegate );
- m_delegates.put( client, delegate );
- return delegate;
- }
+ private WikiEventDelegate getDelegateFor(final Object client) {
+ if (client == null || client instanceof Class) { // then preload the cache
+ final WikiEventDelegate delegate = new WikiEventDelegate(client);
+ m_preloadCache.add(delegate);
+ m_delegates.put(client, delegate);
+ return delegate;
+ } else if (!m_preloadCache.isEmpty()) {
+ // then see if any of the cached delegates match the class of the incoming client
+ for (int i = m_preloadCache.size() - 1; i >= 0; i--) { // start with most-recently added
+ final WikiEventDelegate delegate = m_preloadCache.elementAt(i);
+ if (delegate.getClientClass() == null || delegate.getClientClass().equals(client.getClass())) {
+ // we have a hit, so use it, but only on a client we haven't seen before
+ if (!m_delegates.containsKey(client)) {
+ m_preloadCache.remove(delegate);
+ m_delegates.put(client, delegate);
+ return delegate;
}
}
}
- // otherwise treat normally...
- WikiEventDelegate delegate = m_delegates.get( client );
- if( delegate == null ) {
- delegate = new WikiEventDelegate( client );
- m_delegates.put( client, delegate );
- }
- return delegate;
}
+ // otherwise treat normally...
+ WikiEventDelegate delegate = m_delegates.get(client);
+ if (delegate == null) {
+ delegate = new WikiEventDelegate(client);
+ m_delegates.put(client, delegate);
+ }
+ return delegate;
}
@@ -358,7 +368,7 @@ private WikiEventDelegate getDelegateFor( final Object client ) {
private static final class WikiEventDelegate {
/* A list of event listeners for this instance. */
- private final ArrayList< WeakReference< WikiEventListener > > m_listenerList = new ArrayList<>();
+ private final CopyOnWriteArrayList< WeakReference< WikiEventListener > > m_listenerList = new CopyOnWriteArrayList<>();
private Class< ? > m_class;
/**
@@ -386,17 +396,15 @@ private static final class WikiEventDelegate {
* @throws java.lang.UnsupportedOperationException if any attempt is made to modify the Set
*/
public Set< WikiEventListener > getWikiEventListeners() {
- synchronized( m_listenerList ) {
- final TreeSet< WikiEventListener > set = new TreeSet<>( new WikiEventListenerComparator() );
- for( final WeakReference< WikiEventListener > wikiEventListenerWeakReference : m_listenerList ) {
- final WikiEventListener l = wikiEventListenerWeakReference.get();
- if( l != null ) {
- set.add( l );
- }
+ final TreeSet< WikiEventListener > set = new TreeSet<>( new WikiEventListenerComparator() );
+ for( final WeakReference< WikiEventListener > wikiEventListenerWeakReference : m_listenerList ) {
+ final WikiEventListener l = wikiEventListenerWeakReference.get();
+ if( l != null ) {
+ set.add( l );
}
-
- return Collections.unmodifiableSet( set );
}
+
+ return Collections.unmodifiableSet( set );
}
/**
@@ -406,13 +414,11 @@ public Set< WikiEventListener > getWikiEventListeners() {
* @return true if the listener was added (i.e., it was not already in the list and was added)
*/
public boolean addWikiEventListener( final WikiEventListener listener ) {
- synchronized( m_listenerList ) {
- final boolean listenerAlreadyContained = m_listenerList.stream()
- .map( WeakReference::get )
- .anyMatch( ref -> ref == listener );
- if( !listenerAlreadyContained ) {
- return m_listenerList.add( new WeakReference<>( listener ) );
- }
+ final boolean listenerAlreadyContained = m_listenerList.stream()
+ .map( WeakReference::get )
+ .anyMatch( ref -> ref == listener );
+ if( !listenerAlreadyContained ) {
+ return m_listenerList.add( new WeakReference<>( listener ) );
}
return false;
}
@@ -424,16 +430,13 @@ public boolean addWikiEventListener( final WikiEventListener listener ) {
* @return true if the listener was removed (i.e., it was actually in the list and was removed)
*/
public boolean removeWikiEventListener( final WikiEventListener listener ) {
- synchronized( m_listenerList ) {
- for( final Iterator< WeakReference< WikiEventListener > > i = m_listenerList.iterator(); i.hasNext(); ) {
- final WikiEventListener l = i.next().get();
- if( l == listener ) {
- i.remove();
- return true;
- }
+ for (final Iterator> i = m_listenerList.iterator(); i.hasNext(); ) {
+ final WikiEventListener l = i.next().get();
+ if (l == listener) {
+ i.remove();
+ return true;
}
}
-
return false;
}
@@ -441,9 +444,7 @@ public boolean removeWikiEventListener( final WikiEventListener listener ) {
* Returns true if there are one or more listeners registered with this instance.
*/
public boolean isListening() {
- synchronized( m_listenerList ) {
- return !m_listenerList.isEmpty();
- }
+ return !m_listenerList.isEmpty();
}
/**
@@ -452,29 +453,26 @@ public boolean isListening() {
public void fireEvent( final WikiEvent event ) {
boolean needsCleanup = false;
try {
- synchronized( m_listenerList ) {
- for( final WeakReference< WikiEventListener > wikiEventListenerWeakReference : m_listenerList ) {
- final WikiEventListener listener = wikiEventListenerWeakReference.get();
- if( listener != null ) {
- listener.actionPerformed( event );
- } else {
- needsCleanup = true;
- }
+ for( final WeakReference< WikiEventListener > wikiEventListenerWeakReference : m_listenerList ) {
+ final WikiEventListener listener = wikiEventListenerWeakReference.get();
+ if( listener != null ) {
+ listener.actionPerformed( event );
+ } else {
+ needsCleanup = true;
}
+ }
- // Remove all such listeners which have expired
- if( needsCleanup ) {
- for( int i = 0; i < m_listenerList.size(); i++ ) {
- final WeakReference< WikiEventListener > w = m_listenerList.get( i );
- if( w.get() == null ) {
- m_listenerList.remove(i--);
- }
+ // Remove all such listeners which have expired
+ if( needsCleanup ) {
+ for( int i = 0; i < m_listenerList.size(); i++ ) {
+ final WeakReference< WikiEventListener > w = m_listenerList.get( i );
+ if( w.get() == null ) {
+ m_listenerList.remove( i-- );
}
}
-
}
- } catch( final ConcurrentModificationException e ) {
- // We don't die, we just don't do notifications in that case.
+ } catch ( final ConcurrentModificationException e ) {
+ // We don't die, we just don't do notifications in that case.
LOG.info( "Concurrent modification of event list; please report this.", e );
}
}
diff --git a/jspwiki-kendra-searchprovider/src/main/java/org/apache/wiki/search/kendra/KendraSearchProvider.java b/jspwiki-kendra-searchprovider/src/main/java/org/apache/wiki/search/kendra/KendraSearchProvider.java
index 9563e59f2f..334988f779 100644
--- a/jspwiki-kendra-searchprovider/src/main/java/org/apache/wiki/search/kendra/KendraSearchProvider.java
+++ b/jspwiki-kendra-searchprovider/src/main/java/org/apache/wiki/search/kendra/KendraSearchProvider.java
@@ -46,6 +46,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.auth.permissions.PagePermission;
import org.apache.wiki.pages.PageManager;
import org.apache.wiki.search.SearchProvider;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import java.io.IOException;
@@ -55,6 +56,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.*;
+import java.util.concurrent.locks.ReentrantLock;
import static java.lang.String.format;
@@ -87,8 +89,15 @@ public class KendraSearchProvider implements SearchProvider {
private static final String PROP_KENDRA_INDEXDELAY = "jspwiki.kendra.indexdelay";
private static final String PROP_KENDRA_INITIALDELAY = "jspwiki.kendra.initialdelay";
- public KendraSearchProvider() {
- }
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
/**
* {@inheritDoc}
@@ -108,7 +117,7 @@ public void initialize( final Engine engine, final Properties properties ) throw
// Start the Kendra update thread, which waits first for a little while
// before starting to go through the "pages that need updating".
- if ( initialDelay >= 0 ) {
+ if( initialDelay >= 0 ) {
final KendraUpdater updater = new KendraUpdater( engine, this, initialDelay, indexDelay );
updater.start();
}
@@ -116,14 +125,14 @@ public void initialize( final Engine engine, final Properties properties ) throw
private Map< String, Object > getContentTypes() {
final Gson gson = new GsonBuilder().create();
- try ( final InputStream in = KendraSearchProvider.class.getResourceAsStream( "content_types.json" ) ) {
+ try( final InputStream in = KendraSearchProvider.class.getResourceAsStream( "content_types.json" ) ) {
if ( in != null ) {
final Type collectionType = new TypeToken< HashMap< String, Object > >() {
}.getType();
return gson.fromJson( new InputStreamReader( in ), collectionType );
}
- } catch ( final IOException e ) {
- LOG.error( format( "Unable to load default propertyfile 'content_types.json': %s", e.getMessage() ), e );
+ } catch( final IOException e ) {
+ LOG.error( "Unable to load default propertyfile 'content_types.json': {}", e.getMessage(), e );
}
return null;
}
@@ -142,13 +151,12 @@ public String getProviderInfo() {
@Override
public void pageRemoved( final Page page ) {
final String pageName = page.getName();
- final BatchDeleteDocumentRequest request = new BatchDeleteDocumentRequest().withIndexId( indexId )
- .withDocumentIdList( pageName );
+ final BatchDeleteDocumentRequest request = new BatchDeleteDocumentRequest().withIndexId( indexId ).withDocumentIdList( pageName );
final BatchDeleteDocumentResult result = getKendra().batchDeleteDocument( request );
- if (result.getFailedDocuments().isEmpty()) {
- LOG.debug( format( "Page '%s' was removed from index", pageName ) );
+ if( result.getFailedDocuments().isEmpty() ) {
+ LOG.debug( "Page '{}' was removed from index", pageName );
} else {
- LOG.error( format( "Failed to remove Page '%s' from index", pageName ) );
+ LOG.error( "Failed to remove Page '{}' from index", pageName );
}
}
@@ -157,9 +165,9 @@ public void pageRemoved( final Page page ) {
*/
@Override
public void reindexPage( final Page page ) {
- if ( page != null ) {
+ if( page != null ) {
updates.add( page );
- LOG.debug( format( "Scheduling page '%s' for indexing ...", page.getName() ) );
+ LOG.debug( "Scheduling page '{}' for indexing ...", page.getName() );
}
}
@@ -193,16 +201,15 @@ public Collection< SearchResult > findPages( final String query, final Context w
new String[]{ documentExcerpt } );
searchResults.add( searchResult );
} else {
- LOG.error( format( "Page '%s' is not accessible", documentId ) );
+ LOG.error( "Page '{}' is not accessible", documentId );
}
} else {
- LOG.error(
- format( "Kendra found a result page '%s' that could not be loaded, removing from index", documentId ) );
+ LOG.error( "Kendra found a result page '{}' that could not be loaded, removing from index", documentId );
pageRemoved( Wiki.contents().page( this.engine, documentId ) );
}
break;
default:
- LOG.error( format( "Unknown query result type: %s", item.getType() ) );
+ LOG.error( "Unknown query result type: {}", item.getType() );
}
}
return searchResults;
@@ -313,7 +320,7 @@ private void doFullReindex() throws IOException {
if ( pages.isEmpty() ) {
return;
}
- LOG.debug( format( "Indexing all %d pages. Please wait ...", pages.size() ) );
+ LOG.debug( "Indexing all {} pages. Please wait ...", pages.size() );
final String executionId = startExecution();
for ( final Page page : pages ) {
// Since I do not want to handle the size limit
@@ -334,20 +341,21 @@ private void doFullReindex() throws IOException {
* index pages that have been modified
*/
private void doPartialReindex() {
- if ( updates.isEmpty() ) {
+ if (updates.isEmpty()) {
return;
}
- LOG.debug( "Indexing updated pages. Please wait ..." );
+ LOG.debug("Indexing updated pages. Please wait ...");
final String executionId = startExecution();
- synchronized ( updates ) {
+
+ Synchronizer.synchronize(lock, () -> {
try {
while (!updates.isEmpty()) {
- indexOnePage( updates.remove( 0 ), executionId );
+ indexOnePage(updates.remove(0), executionId);
}
} finally {
stopExecution();
}
- }
+ });
}
/**
@@ -381,18 +389,17 @@ private void indexOnePage( final Page page, final String executionId ) {
final String pageName = page.getName();
try {
final Document document = newDocument( page, executionId );
- final BatchPutDocumentRequest request = new BatchPutDocumentRequest().withIndexId( indexId )
- .withDocuments( document );
+ final BatchPutDocumentRequest request = new BatchPutDocumentRequest().withIndexId( indexId ).withDocuments( document );
final BatchPutDocumentResult result = getKendra().batchPutDocument( request );
- if (result.getFailedDocuments().isEmpty()) {
- LOG.info( format( "Successfully indexed Page '%s' as %s", page.getName(), document.getContentType() ) );
+ if ( result.getFailedDocuments().isEmpty() ) {
+ LOG.info( "Successfully indexed Page '{}' as {}", page.getName(), document.getContentType() );
} else {
for ( final BatchPutDocumentResponseFailedDocument failedDocument : result.getFailedDocuments() ) {
- LOG.error( format( "Failed to index Page '%s': %s", failedDocument.getId(), failedDocument.getErrorMessage() ) );
+ LOG.error( "Failed to index Page '{}': {}", failedDocument.getId(), failedDocument.getErrorMessage() );
}
}
} catch ( final IOException e ) {
- LOG.error( format( "Failed to index Page '%s': %s", pageName, e.getMessage() ) );
+ LOG.error( "Failed to index Page '{}': {}", pageName, e.getMessage() );
}
}
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/WatchDog.java b/jspwiki-main/src/main/java/org/apache/wiki/WatchDog.java
index 8b83a13228..9c38feba69 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/WatchDog.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/WatchDog.java
@@ -21,6 +21,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.wiki.api.core.Engine;
+import org.apache.wiki.util.Synchronizer;
import java.lang.ref.WeakReference;
import java.util.Iterator;
@@ -28,6 +29,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Set;
import java.util.Stack;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -57,6 +59,17 @@ public final class WatchDog {
private static final Map< Integer, WeakReference< WatchDog > > c_kennel = new ConcurrentHashMap<>();
private static WikiBackgroundThread c_watcherThread;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock watchDogLock = new ReentrantLock();
+ private final ReentrantLock stateStackLock = new ReentrantLock();
+
/**
* Returns the current watchdog for the current thread. This is the preferred method of getting you a Watchdog, since it
* keeps an internal list of Watchdogs for you so that there won't be more than one watchdog per thread.
@@ -92,12 +105,12 @@ public WatchDog( final Engine engine, final Watchable watch ) {
m_engine = engine;
m_watchable = watch;
- synchronized( WatchDog.class ) {
- if( c_watcherThread == null ) {
- c_watcherThread = new WatchDogThread( engine );
+ Synchronizer.synchronize(watchDogLock, () -> {
+ if (c_watcherThread == null) {
+ c_watcherThread = new WatchDogThread(engine);
c_watcherThread.start();
}
- }
+ });
}
/**
@@ -136,26 +149,26 @@ private static void scrub() {
* Can be used to enable the WatchDog. Will cause a new Thread to be created, if none was existing previously.
*/
public void enable() {
- synchronized( WatchDog.class ) {
+ Synchronizer.synchronize( watchDogLock, () -> {
if( !m_enabled ) {
m_enabled = true;
c_watcherThread = new WatchDogThread( m_engine );
c_watcherThread.start();
}
- }
+ } );
}
/**
* Is used to disable a WatchDog. The watchdog thread is shut down and resources released.
*/
public void disable() {
- synchronized( WatchDog.class ) {
+ Synchronizer.synchronize( watchDogLock, () -> {
if( m_enabled ) {
m_enabled = false;
c_watcherThread.shutdown();
c_watcherThread = null;
}
- }
+ } );
}
/**
@@ -171,14 +184,12 @@ public void enterState( final String state ) {
/**
* Enters a watched state which has an expected completion time. This is the main method for using the
* WatchDog. For example:
- *
*
* WatchDog w = m_engine.getCurrentWatchDog();
* w.enterState("Processing Foobar", 60);
* foobar();
* w.exitState();
*
- *
* If the call to foobar() takes more than 60 seconds, you will receive an ERROR in the log stream.
*
* @param state A free-form string description of the state
@@ -186,10 +197,10 @@ public void enterState( final String state ) {
*/
public void enterState( final String state, final int expectedCompletionTime ) {
LOG.debug( "{}: Entering state {}, expected completion in {} s", m_watchable.getName(), state, expectedCompletionTime );
- synchronized( m_stateStack ) {
+ Synchronizer.synchronize( stateStackLock, () -> {
final State st = new State( state, expectedCompletionTime );
m_stateStack.push( st );
- }
+ } );
}
/**
@@ -208,7 +219,7 @@ public void exitState() {
*/
public void exitState( final String state ) {
if( !m_stateStack.empty() ) {
- synchronized( m_stateStack ) {
+ Synchronizer.synchronize( stateStackLock, () -> {
final State st = m_stateStack.peek();
if( state == null || st.getState().equals( state ) ) {
m_stateStack.pop();
@@ -218,7 +229,7 @@ public void exitState( final String state ) {
// FIXME: should actually go and fix things for that
LOG.error( "exitState() called before enterState()" );
}
- }
+ } );
} else {
LOG.warn( "Stack for " + m_watchable.getName() + " is empty!" );
}
@@ -244,15 +255,14 @@ public boolean isWatchableAlive() {
private void check() {
LOG.debug( "Checking watchdog '{}'", m_watchable.getName() );
-
- synchronized( m_stateStack ) {
+ Synchronizer.synchronize( stateStackLock, () -> {
if( !m_stateStack.empty() ) {
final State st = m_stateStack.peek();
final long now = System.currentTimeMillis();
if( now > st.getExpiryTime() ) {
LOG.info( "Watchable '" + m_watchable.getName() + "' exceeded timeout in state '" + st.getState() +
- "' by " + (now - st.getExpiryTime()) / 1000 + " seconds" +
+ "' by " + ( now - st.getExpiryTime() ) / 1_000 + " seconds" +
( LOG.isDebugEnabled() ? "" : "Enable DEBUG-level logging to see stack traces." ) );
dumpStackTraceForWatchable();
@@ -261,7 +271,7 @@ private void check() {
} else {
LOG.warn( "Stack for " + m_watchable.getName() + " is empty!" );
}
- }
+ } );
}
/**
@@ -302,7 +312,7 @@ private void dumpStackTraceForWatchable() {
*/
@Override
public String toString() {
- synchronized( m_stateStack ) {
+ return Synchronizer.synchronize( stateStackLock, () -> {
String state = "Idle";
if( !m_stateStack.empty() ) {
@@ -310,7 +320,7 @@ public String toString() {
state = st.getState();
}
return "WatchDog state=" + state;
- }
+ } );
}
/**
@@ -336,7 +346,6 @@ public void shutdownTask() {
/**
* Checks if the watchable is alive, and if it is, checks if the stack is finished.
- *
* If the watchable has been deleted in the meantime, will simply shut down itself.
*/
@Override
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/WikiEngine.java b/jspwiki-main/src/main/java/org/apache/wiki/WikiEngine.java
index 4ec79d82e0..3d9ef8ab22 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/WikiEngine.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/WikiEngine.java
@@ -57,6 +57,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.url.URLConstructor;
import org.apache.wiki.util.ClassUtil;
import org.apache.wiki.util.PropertyReader;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import org.apache.wiki.variables.VariableManager;
import org.apache.wiki.workflow.WorkflowManager;
@@ -81,6 +82,8 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Properties;
import java.util.TimeZone;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
@@ -138,6 +141,16 @@ public class WikiEngine implements Engine {
/** Stores WikiEngine's associated managers. */
protected final Map< Class< ? >, Object > managers = new ConcurrentHashMap<>();
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private static final ReentrantLock lock = new ReentrantLock();
+
/**
* Gets a WikiEngine related to this servlet. Since this method is only called from JSP pages (and JspInit()) to be specific,
* we throw a RuntimeException if things don't work.
@@ -146,7 +159,7 @@ public class WikiEngine implements Engine {
* @return A WikiEngine instance.
* @throws InternalWikiException in case something fails. This is a RuntimeException, so be prepared for it.
*/
- public static synchronized WikiEngine getInstance( final ServletConfig config ) throws InternalWikiException {
+ public static WikiEngine getInstance( final ServletConfig config ) throws InternalWikiException {
return getInstance( config.getServletContext(), null );
}
@@ -160,7 +173,7 @@ public static synchronized WikiEngine getInstance( final ServletConfig config )
*
* @return One well-behaving WikiEngine instance.
*/
- public static synchronized WikiEngine getInstance( final ServletConfig config, final Properties props ) {
+ public static WikiEngine getInstance( final ServletConfig config, final Properties props ) {
return getInstance( config.getServletContext(), props );
}
@@ -172,35 +185,38 @@ public static synchronized WikiEngine getInstance( final ServletConfig config, f
* @return One fully functional, properly behaving WikiEngine.
* @throws InternalWikiException If the WikiEngine instantiation fails.
*/
- public static synchronized WikiEngine getInstance( final ServletContext context, Properties props ) throws InternalWikiException {
- WikiEngine engine = ( WikiEngine )context.getAttribute( ATTR_WIKIENGINE );
- if( engine == null ) {
- final String appid = Integer.toString( context.hashCode() );
- context.log( " Assigning new engine to " + appid );
- try {
- if( props == null ) {
- props = PropertyReader.loadWebAppProps( context );
- }
-
- engine = new WikiEngine( context, appid );
+ public static WikiEngine getInstance(final ServletContext context, Properties props) throws InternalWikiException {
+ final AtomicReference propsRef = new AtomicReference<>(props);
+ return Synchronizer.synchronize( lock, () -> {
+ WikiEngine engine = (WikiEngine) context.getAttribute(ATTR_WIKIENGINE);
+ if (engine == null) {
+ final String appid = Integer.toString(context.hashCode());
+ context.log(" Assigning new engine to " + appid);
try {
- // Note: May be null, if JSPWiki has been deployed in a WAR file.
- engine.start( props );
- LOG.info( "Root path for this Wiki is: '{}'", engine.getRootPath() );
- } catch( final Exception e ) {
- final String msg = Release.APPNAME + ": Unable to load and setup properties from jspwiki.properties. " + e.getMessage();
- context.log( msg );
- LOG.error( msg, e );
- throw new WikiException( msg, e );
+ if (propsRef.get() == null) {
+ propsRef.set(PropertyReader.loadWebAppProps(context));
+ }
+
+ engine = new WikiEngine(context, appid);
+ try {
+ // Note: May be null, if JSPWiki has been deployed in a WAR file.
+ engine.start(propsRef.get());
+ LOG.info("Root path for this Wiki is: '{}'", engine.getRootPath());
+ } catch (final Exception e) {
+ final String msg = Release.APPNAME + ": Unable to load and setup properties from jspwiki.properties. " + e.getMessage();
+ context.log(msg);
+ LOG.error(msg, e);
+ throw new WikiException(msg, e);
+ }
+ context.setAttribute(ATTR_WIKIENGINE, engine);
+ } catch (final Exception e) {
+ context.log("ERROR: Failed to create a Wiki engine: " + e.getMessage());
+ LOG.error("ERROR: Failed to create a Wiki engine, stacktrace follows ", e);
+ throw new InternalWikiException("No wiki engine, check logs.", e);
}
- context.setAttribute( ATTR_WIKIENGINE, engine );
- } catch( final Exception e ) {
- context.log( "ERROR: Failed to create a Wiki engine: " + e.getMessage() );
- LOG.error( "ERROR: Failed to create a Wiki engine, stacktrace follows ", e );
- throw new InternalWikiException( "No wiki engine, check logs.", e );
}
- }
- return engine;
+ return engine;
+ } );
}
/**
@@ -943,14 +959,20 @@ public InternationalizationManager getInternationalizationManager() {
/** {@inheritDoc} */
@Override
- public final synchronized void addWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.addWikiEventListener( this, listener );
+ public final void addWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.addWikiEventListener( this, listener );
+ });
}
- /** {@inheritDoc} */
+ /**
+ * {@inheritDoc}
+ */
@Override
- public final synchronized void removeWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.removeWikiEventListener( this, listener );
+ public final void removeWikiEventListener(final WikiEventListener listener) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.removeWikiEventListener(this, listener);
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultAuthenticationManager.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultAuthenticationManager.java
index 6f86584c14..f393e8b35a 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultAuthenticationManager.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultAuthenticationManager.java
@@ -37,6 +37,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.event.WikiEventManager;
import org.apache.wiki.event.WikiSecurityEvent;
import org.apache.wiki.util.ClassUtil;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import org.apache.wiki.util.TimedCounterList;
@@ -53,6 +54,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Map;
import java.util.Properties;
import java.util.Set;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -103,6 +105,16 @@ public class DefaultAuthenticationManager implements AuthenticationManager {
/** Keeps a list of the usernames who have attempted a login recently. */
private final TimedCounterList< String > m_lastLoginAttempts = new TimedCounterList<>();
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* {@inheritDoc}
*/
@@ -352,16 +364,20 @@ public Set< Principal > doJAASLogin( final Class< ? extends LoginModule > clazz,
* {@inheritDoc}
*/
@Override
- public synchronized void addWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.addWikiEventListener( this, listener );
+ public void addWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.addWikiEventListener( this, listener );
+ });
}
/**
* {@inheritDoc}
*/
@Override
- public synchronized void removeWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.removeWikiEventListener( this, listener );
+ public void removeWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.removeWikiEventListener( this, listener );
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultAuthorizationManager.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultAuthorizationManager.java
index f39f65cdcd..ca19d678a8 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultAuthorizationManager.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultAuthorizationManager.java
@@ -44,6 +44,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.pages.PageManager;
import org.apache.wiki.preferences.Preferences;
import org.apache.wiki.util.ClassUtil;
+import org.apache.wiki.util.Synchronizer;
import org.freshcookies.security.policy.LocalPolicy;
import javax.servlet.http.HttpServletResponse;
@@ -64,6 +65,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Properties;
import java.util.ResourceBundle;
import java.util.WeakHashMap;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -89,10 +91,14 @@ public class DefaultAuthorizationManager implements AuthorizationManager {
private LocalPolicy m_localPolicy;
/**
- * Constructs a new DefaultAuthorizationManager instance.
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
*/
- public DefaultAuthorizationManager() {
- }
+ private final ReentrantLock lock = new ReentrantLock();
/** {@inheritDoc} */
@Override
@@ -373,14 +379,18 @@ public Principal resolvePrincipal( final String name ) {
/** {@inheritDoc} */
@Override
- public synchronized void addWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.addWikiEventListener( this, listener );
+ public void addWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.addWikiEventListener( this, listener );
+ });
}
/** {@inheritDoc} */
@Override
- public synchronized void removeWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.removeWikiEventListener( this, listener );
+ public void removeWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.removeWikiEventListener( this, listener );
+ });
}
}
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultUserManager.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultUserManager.java
index 048fc76315..4c1808563f 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultUserManager.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/DefaultUserManager.java
@@ -47,6 +47,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.tasks.TasksManager;
import org.apache.wiki.ui.InputValidator;
import org.apache.wiki.util.ClassUtil;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import org.apache.wiki.workflow.Decision;
import org.apache.wiki.workflow.DecisionRequiredException;
@@ -69,6 +70,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Properties;
import java.util.ResourceBundle;
import java.util.WeakHashMap;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -96,6 +98,16 @@ public class DefaultUserManager implements UserManager {
/** The user database loads, manages and persists user identities */
private UserDatabase m_database;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/** {@inheritDoc} */
@Override
public void initialize( final Engine engine, final Properties props ) {
@@ -406,8 +418,11 @@ public Principal[] listWikiNames() throws WikiSecurityException {
* This is a convenience method.
* @param listener the event listener
*/
- @Override public synchronized void addWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.addWikiEventListener( this, listener );
+ @Override
+ public void addWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.addWikiEventListener( this, listener );
+ });
}
/**
@@ -415,8 +430,11 @@ public Principal[] listWikiNames() throws WikiSecurityException {
* This is a convenience method.
* @param listener the event listener
*/
- @Override public synchronized void removeWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.removeWikiEventListener( this, listener );
+ @Override
+ public void removeWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.removeWikiEventListener( this, listener );
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/SessionMonitor.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/SessionMonitor.java
index dc7e082e1c..33aef281bd 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/SessionMonitor.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/SessionMonitor.java
@@ -22,22 +22,26 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.logging.log4j.Logger;
import org.apache.wiki.api.core.Engine;
import org.apache.wiki.api.core.Session;
+import org.apache.wiki.api.spi.SessionSPI;
import org.apache.wiki.api.spi.Wiki;
import org.apache.wiki.event.WikiEventListener;
import org.apache.wiki.event.WikiEventManager;
import org.apache.wiki.event.WikiSecurityEvent;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.comparators.PrincipalComparator;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpSession;
import javax.servlet.http.HttpSessionEvent;
import javax.servlet.http.HttpSessionListener;
+import java.lang.ref.WeakReference;
import java.security.Principal;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.WeakHashMap;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
/**
@@ -59,6 +63,18 @@ public class SessionMonitor implements HttpSessionListener {
private final PrincipalComparator m_comparator = new PrincipalComparator();
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see ReentrantLock
+ */
+ private final ReentrantLock mSessionsLock = new ReentrantLock();
+ private final ReentrantLock listenerLock = new ReentrantLock();
+
+
/**
* Returns the instance of the SessionMonitor for this wiki. Only one SessionMonitor exists per Engine.
*
@@ -79,9 +95,6 @@ public static SessionMonitor getInstance( final Engine engine ) {
}
/** Construct the SessionListener */
- public SessionMonitor() {
- }
-
private SessionMonitor( final Engine engine ) {
m_engine = engine;
}
@@ -123,10 +136,10 @@ private Session findSession( final String sessionId ) {
/**
*
Looks up the wiki session associated with a user's Http session and adds it to the session cache. This method will return the
- * "guest session" as constructed by {@link org.apache.wiki.api.spi.SessionSPI#guest(Engine)} if the HttpSession is not currently
+ * "guest session" as constructed by {@link SessionSPI#guest(Engine)} if the HttpSession is not currently
* associated with a WikiSession. This method is guaranteed to return a non-null WikiSession.
*
Internally, the session is stored in a HashMap; keys are the HttpSession objects, while the values are
- * {@link java.lang.ref.WeakReference}-wrapped WikiSessions.
+ * {@link WeakReference}-wrapped WikiSessions.
*
* @param session the HTTP session
* @return the wiki session
@@ -143,10 +156,10 @@ public final Session find( final HttpSession session ) {
/**
*
Looks up the wiki session associated with a user's Http session and adds it to the session cache. This method will return the
- * "guest session" as constructed by {@link org.apache.wiki.api.spi.SessionSPI#guest(Engine)} if the HttpSession is not currently
+ * "guest session" as constructed by {@link SessionSPI#guest(Engine)} if the HttpSession is not currently
* associated with a WikiSession. This method is guaranteed to return a non-null WikiSession.
*
Internally, the session is stored in a HashMap; keys are the HttpSession objects, while the values are
- * {@link java.lang.ref.WeakReference}-wrapped WikiSessions.
+ * {@link WeakReference}-wrapped WikiSessions.
*
* @param sessionId the HTTP session
* @return the wiki session
@@ -169,9 +182,9 @@ public final Session find( final String sessionId ) {
private Session createGuestSessionFor( final String sessionId ) {
LOG.debug( "Session for session ID={}... not found. Creating guestSession()", sessionId );
final Session wikiSession = Wiki.session().guest( m_engine );
- synchronized( m_sessions ) {
- m_sessions.put( sessionId, wikiSession );
- }
+ Synchronizer.synchronize( mSessionsLock, () -> {
+ m_sessions.put(sessionId, wikiSession);
+ } );
return wikiSession;
}
@@ -196,9 +209,9 @@ public final void remove( final HttpSession session ) {
if( session == null ) {
throw new IllegalArgumentException( "Session cannot be null." );
}
- synchronized( m_sessions ) {
+ Synchronizer.synchronize( mSessionsLock, () -> {
m_sessions.remove( session.getId() );
- }
+ } );
}
/**
@@ -214,17 +227,16 @@ public final int sessions()
*
Returns the current wiki users as a sorted array of Principal objects. The principals are those returned by
* each WikiSession's {@link Session#getUserPrincipal()}'s method.
*
To obtain the list of current WikiSessions, we iterate through our session Map and obtain the list of values,
- * which are WikiSessions wrapped in {@link java.lang.ref.WeakReference} objects. Those WeakReferences
+ * which are WikiSessions wrapped in {@link WeakReference} objects. Those WeakReferences
* whose get() method returns non-null values are valid sessions.
*
* @return the array of user principals
*/
public final Principal[] userPrincipals() {
- final Collection principals;
- synchronized ( m_sessions ) {
- principals = m_sessions.values().stream().map(Session::getUserPrincipal).collect(Collectors.toList());
- }
- final Principal[] p = principals.toArray( new Principal[0] );
+ final Collection< Principal > principals = Synchronizer.synchronize( mSessionsLock, () ->
+ m_sessions.values().stream().map( Session::getUserPrincipal ).collect( Collectors.toList() ) );
+
+ final Principal[] p = principals.toArray( new Principal[ 0 ] );
Arrays.sort( p, m_comparator );
return p;
}
@@ -235,8 +247,10 @@ public final Principal[] userPrincipals() {
* @param listener the event listener
* @since 2.4.75
*/
- public final synchronized void addWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.addWikiEventListener( this, listener );
+ public final void addWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize( listenerLock, () -> {
+ WikiEventManager.addWikiEventListener( this, listener );
+ });
}
/**
@@ -245,8 +259,10 @@ public final synchronized void addWikiEventListener( final WikiEventListener lis
* @param listener the event listener
* @since 2.4.75
*/
- public final synchronized void removeWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.removeWikiEventListener( this, listener );
+ public final void removeWikiEventListener(final WikiEventListener listener) {
+ Synchronizer.synchronize( listenerLock, () -> {
+ WikiEventManager.removeWikiEventListener(this, listener);
+ });
}
/**
@@ -265,7 +281,7 @@ protected final void fireEvent( final int type, final Principal principal, final
/**
* Fires when the web container creates a new HTTP session.
- *
+ *
* @param se the HTTP session event
*/
@Override
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/acl/AclEntryImpl.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/acl/AclEntryImpl.java
index cc7464a45e..2c570267da 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/acl/AclEntryImpl.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/acl/AclEntryImpl.java
@@ -19,12 +19,14 @@ Licensed to the Apache Software Foundation (ASF) under one
package org.apache.wiki.auth.acl;
import org.apache.wiki.auth.permissions.PagePermission;
+import org.apache.wiki.util.Synchronizer;
import java.io.Serializable;
import java.security.Permission;
import java.security.Principal;
import java.util.Enumeration;
import java.util.Vector;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
@@ -40,10 +42,14 @@ public class AclEntryImpl implements AclEntry, Serializable {
private Principal m_principal;
/**
- * Constructs a new AclEntryImpl instance.
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
*/
- public AclEntryImpl() {
- }
+ private final ReentrantLock lock = new ReentrantLock();
/**
* Adds the specified permission to this ACL entry. The permission must be of type
@@ -54,13 +60,14 @@ public AclEntryImpl() {
* already part of this entry's permission set, and false if the permission is not of type PagePermission
*/
@Override
- public synchronized boolean addPermission(final Permission permission ) {
- if( permission instanceof PagePermission && findPermission( permission ) == null ) {
- m_permissions.add( permission );
- return true;
- }
-
- return false;
+ public boolean addPermission(final Permission permission) {
+ return Synchronizer.synchronize(lock, () -> {
+ if (permission instanceof PagePermission && findPermission(permission) == null) {
+ m_permissions.add(permission);
+ return true;
+ }
+ return false;
+ });
}
/**
@@ -81,8 +88,8 @@ public boolean checkPermission(final Permission permission ) {
* @return the principal associated with this entry.
*/
@Override
- public synchronized Principal getPrincipal() {
- return m_principal;
+ public Principal getPrincipal() {
+ return Synchronizer.synchronize(lock, () -> m_principal);
}
/**
@@ -102,14 +109,15 @@ public Enumeration< Permission > permissions() {
* @return true if the permission is removed, false if the permission was not part of this entry's permission set.
*/
@Override
- public synchronized boolean removePermission(final Permission permission ) {
- final Permission p = findPermission( permission );
- if( p != null ) {
- m_permissions.remove( p );
- return true;
- }
-
- return false;
+ public boolean removePermission(final Permission permission ) {
+ return Synchronizer.synchronize(lock, () -> {
+ final Permission p = findPermission(permission);
+ if (p != null) {
+ m_permissions.remove(p);
+ return true;
+ }
+ return false;
+ });
}
/**
@@ -121,12 +129,14 @@ public synchronized boolean removePermission(final Permission permission ) {
* principal set for this entry
*/
@Override
- public synchronized boolean setPrincipal(final Principal user ) {
- if( m_principal != null || user == null ) {
- return false;
- }
- m_principal = user;
- return true;
+ public boolean setPrincipal(final Principal user) {
+ return Synchronizer.synchronize(lock, () -> {
+ if (m_principal != null || user == null) {
+ return false;
+ }
+ m_principal = user;
+ return true;
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/acl/AclImpl.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/acl/AclImpl.java
index 11af72c27f..e974535b2a 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/acl/AclImpl.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/acl/AclImpl.java
@@ -19,6 +19,7 @@ Licensed to the Apache Software Foundation (ASF) under one
package org.apache.wiki.auth.acl;
import org.apache.wiki.api.core.AclEntry;
+import org.apache.wiki.util.Synchronizer;
import java.io.Serializable;
import java.security.Permission;
@@ -27,6 +28,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -36,14 +38,18 @@ Licensed to the Apache Software Foundation (ASF) under one
*/
public class AclImpl implements Acl, Serializable {
- private static final long serialVersionUID = 1L;
- private final Vector< AclEntry > m_entries = new Vector<>();
-
/**
- * Constructs a new AclImpl instance.
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
*/
- public AclImpl() {
- }
+ private final ReentrantLock lock = new ReentrantLock();
+
+ private static final long serialVersionUID = 1L;
+ private final Vector< AclEntry > m_entries = new Vector<>();
/** {@inheritDoc} */
@Override
@@ -86,24 +92,26 @@ private boolean hasEntry( final AclEntry entry ) {
/** {@inheritDoc} */
@Override
- public synchronized boolean addEntry( final AclEntry entry ) {
- if( entry.getPrincipal() == null ) {
- throw new IllegalArgumentException( "Entry principal cannot be null" );
- }
+ public boolean addEntry( final AclEntry entry ) {
+ return Synchronizer.synchronize(lock, () -> {
+ if( entry.getPrincipal() == null ) {
+ throw new IllegalArgumentException( "Entry principal cannot be null" );
+ }
- if( hasEntry( entry ) ) {
- return false;
- }
+ if( hasEntry( entry ) ) {
+ return false;
+ }
- m_entries.add( entry );
+ m_entries.add( entry );
- return true;
+ return true;
+ });
}
/** {@inheritDoc} */
@Override
- public synchronized boolean removeEntry( final AclEntry entry ) {
- return m_entries.remove( entry );
+ public boolean removeEntry( final AclEntry entry ) {
+ return Synchronizer.synchronize(lock, () -> m_entries.remove( entry ));
}
/** {@inheritDoc} */
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/authorize/DefaultGroupManager.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/authorize/DefaultGroupManager.java
index fccf6c1cb0..c635c1c2fc 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/authorize/DefaultGroupManager.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/authorize/DefaultGroupManager.java
@@ -40,15 +40,17 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.event.WikiSecurityEvent;
import org.apache.wiki.ui.InputValidator;
import org.apache.wiki.util.ClassUtil;
+import org.apache.wiki.util.Synchronizer;
import java.security.Principal;
import java.util.Arrays;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.StringTokenizer;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -73,7 +75,17 @@ public class DefaultGroupManager implements GroupManager, Authorizer, WikiEventL
private GroupDatabase m_groupDatabase;
/** Map with GroupPrincipals as keys, and Groups as values */
- private final Map< Principal, Group > m_groups = new HashMap<>();
+ private final Map< Principal, Group > m_groups = new ConcurrentHashMap<>();
+
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock wikiEventListenerLock = new ReentrantLock();
/** {@inheritDoc} */
@Override
@@ -152,12 +164,10 @@ public void initialize( final Engine engine, final Properties props ) throws Wik
// Load all groups from the database into the cache
final Group[] groups = m_groupDatabase.groups();
- synchronized( m_groups ) {
- for( final Group group : groups ) {
- // Add new group to cache; fire GROUP_ADD event
- m_groups.put( group.getPrincipal(), group );
- fireEvent( WikiSecurityEvent.GROUP_ADD, group );
- }
+ for( final Group group : groups ) {
+ // Add new group to cache; fire GROUP_ADD event
+ m_groups.put( group.getPrincipal(), group );
+ fireEvent( WikiSecurityEvent.GROUP_ADD, group );
}
// Make the GroupManager listen for WikiEvents (WikiSecurityEvents for changed user profiles)
@@ -253,9 +263,7 @@ public void removeGroup( final String index ) throws WikiSecurityException {
// Delete the group
// TODO: need rollback procedure
- synchronized( m_groups ) {
- m_groups.remove( group.getPrincipal() );
- }
+ m_groups.remove( group.getPrincipal() );
m_groupDatabase.delete( group );
fireEvent( WikiSecurityEvent.GROUP_REMOVE, group );
}
@@ -269,9 +277,7 @@ public void setGroup( final Session session, final Group group ) throws WikiSecu
final Group oldGroup = m_groups.get( group.getPrincipal() );
if( oldGroup != null ) {
fireEvent( WikiSecurityEvent.GROUP_REMOVE, oldGroup );
- synchronized( m_groups ) {
- m_groups.remove( oldGroup.getPrincipal() );
- }
+ m_groups.remove( oldGroup.getPrincipal() );
}
// Copy existing modifier info & timestamps
@@ -283,13 +289,11 @@ public void setGroup( final Session session, final Group group ) throws WikiSecu
}
// Add new group to cache; announce GROUP_ADD event
- synchronized( m_groups ) {
- m_groups.put( group.getPrincipal(), group );
- }
+ m_groups.put( group.getPrincipal(), group );
fireEvent( WikiSecurityEvent.GROUP_ADD, group );
// Save the group to back-end database; if it fails, roll back to previous state. Note that the back-end
- // MUST timestammp the create/modify fields in the Group.
+ // MUST timestamp the create/modify fields in the Group.
try {
m_groupDatabase.save( group, session.getUserPrincipal() );
}
@@ -300,9 +304,7 @@ public void setGroup( final Session session, final Group group ) throws WikiSecu
// Restore previous version, re-throw...
fireEvent( WikiSecurityEvent.GROUP_REMOVE, group );
fireEvent( WikiSecurityEvent.GROUP_ADD, oldGroup );
- synchronized( m_groups ) {
- m_groups.put( oldGroup.getPrincipal(), oldGroup );
- }
+ m_groups.put( oldGroup.getPrincipal(), oldGroup );
throw new WikiSecurityException( e.getMessage() + " (rolled back to previous version).", e );
}
// Re-throw security exception
@@ -367,14 +369,18 @@ protected String[] extractMembers( final String memberLine ) {
/** {@inheritDoc} */
@Override
- public synchronized void addWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.addWikiEventListener( this, listener );
+ public void addWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize( wikiEventListenerLock, () -> {
+ WikiEventManager.addWikiEventListener( this, listener );
+ } );
}
/** {@inheritDoc} */
@Override
- public synchronized void removeWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.removeWikiEventListener( this, listener );
+ public void removeWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize( wikiEventListenerLock, () -> {
+ WikiEventManager.removeWikiEventListener( this, listener );
+ } );
}
/** {@inheritDoc} */
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/authorize/Group.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/authorize/Group.java
index fc0b733abb..05c172be49 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/authorize/Group.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/authorize/Group.java
@@ -19,10 +19,13 @@ Licensed to the Apache Software Foundation (ASF) under one
package org.apache.wiki.auth.authorize;
import org.apache.wiki.auth.GroupPrincipal;
+import org.apache.wiki.util.Synchronizer;
import java.security.Principal;
import java.util.Date;
import java.util.Vector;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
/**
*
@@ -77,6 +80,16 @@ public class Group {
private final String m_wiki;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* Protected constructor to prevent direct instantiation except by other
* package members. Callers should use
@@ -98,20 +111,22 @@ protected Group( final String name, final String wiki ) {
* @param user the principal to add
* @return true if the operation was successful
*/
- public synchronized boolean add( final Principal user ) {
- if( isMember( user ) ) {
- return false;
- }
-
- m_members.add( user );
- return true;
+ public boolean add( final Principal user ) {
+ return Synchronizer.synchronize(lock, () -> {
+ if( isMember( user ) ) {
+ return false;
+ }
+
+ m_members.add( user );
+ return true;
+ });
}
/**
* Clears all Principals from the group list.
*/
- public synchronized void clear() {
- m_members.clear();
+ public void clear() {
+ Synchronizer.synchronize(lock, m_members::clear);
}
/**
@@ -156,8 +171,8 @@ public int hashCode() {
*
* @return the creation date
*/
- public synchronized Date getCreated() {
- return m_created;
+ public Date getCreated() {
+ return Synchronizer.synchronize(lock, () -> m_created);
}
/**
@@ -165,8 +180,8 @@ public synchronized Date getCreated() {
*
* @return the creator
*/
- public final synchronized String getCreator() {
- return m_creator;
+ public final String getCreator() {
+ return Synchronizer.synchronize(lock, () -> m_creator);
}
/**
@@ -174,8 +189,8 @@ public final synchronized String getCreator() {
*
* @return the date and time of last modification
*/
- public synchronized Date getLastModified() {
- return m_modified;
+ public Date getLastModified() {
+ return Synchronizer.synchronize(lock, () -> m_modified);
}
/**
@@ -183,8 +198,8 @@ public synchronized Date getLastModified() {
*
* @return the modifier
*/
- public final synchronized String getModifier() {
- return m_modifier;
+ public final String getModifier() {
+ return Synchronizer.synchronize(lock, () -> m_modifier);
}
/**
@@ -240,14 +255,17 @@ public Principal[] members() {
* @param user the principal to remove
* @return true if the operation was successful
*/
- public synchronized boolean remove( Principal user ) {
- user = findMember( user.getName() );
- if( user == null )
- return false;
-
- m_members.remove( user );
-
- return true;
+ public boolean remove(Principal user) {
+ final AtomicReference userRef = new AtomicReference<>(user);
+ return Synchronizer.synchronize(lock, () -> {
+ Principal updatedUser = findMember(userRef.get().getName());
+ userRef.set(updatedUser);
+ if (updatedUser == null) {
+ return false;
+ }
+ m_members.remove(updatedUser);
+ return true;
+ });
}
/**
@@ -255,16 +273,20 @@ public synchronized boolean remove( Principal user ) {
*
* @param date the creation date
*/
- public synchronized void setCreated( final Date date ) {
- m_created = date;
+ public void setCreated( final Date date ) {
+ Synchronizer.synchronize(lock, () -> {
+ m_created = date;
+ });
}
/**
* Sets the creator of this Group.
* @param creator the creator
*/
- public final synchronized void setCreator( final String creator ) {
- this.m_creator = creator;
+ public final void setCreator( final String creator ) {
+ Synchronizer.synchronize(lock, () -> {
+ this.m_creator = creator;
+ });
}
/**
@@ -272,8 +294,10 @@ public final synchronized void setCreator( final String creator ) {
*
* @param date the last-modified date
*/
- public synchronized void setLastModified( final Date date ) {
- m_modified = date;
+ public void setLastModified( final Date date ) {
+ Synchronizer.synchronize(lock, () -> {
+ m_modified = date;
+ });
}
/**
@@ -281,8 +305,10 @@ public synchronized void setLastModified( final Date date ) {
*
* @param modifier the modifier
*/
- public final synchronized void setModifier( final String modifier ) {
- this.m_modifier = modifier;
+ public final void setModifier( final String modifier ) {
+ Synchronizer.synchronize(lock, () -> {
+ this.m_modifier = modifier;
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/login/CookieAuthenticationLoginModule.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/login/CookieAuthenticationLoginModule.java
index 1d98e66cbd..28a9634d3b 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/login/CookieAuthenticationLoginModule.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/login/CookieAuthenticationLoginModule.java
@@ -24,6 +24,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.auth.WikiPrincipal;
import org.apache.wiki.util.FileUtil;
import org.apache.wiki.util.HttpUtil;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import javax.security.auth.callback.Callback;
@@ -44,6 +45,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.UUID;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -99,6 +101,16 @@ public class CookieAuthenticationLoginModule extends AbstractLoginModule {
*/
private static final long SCRUB_PERIOD = 60 * 60 * 1_000L; // In milliseconds
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private static final ReentrantLock lock = new ReentrantLock();
+
/**
* {@inheritDoc}
*
@@ -268,25 +280,28 @@ private static Cookie getLoginCookie( final String value ) {
* @param days number of days that the cookie will survive
* @param cookieDir cookie directory
*/
- private static synchronized void scrub( final int days, final File cookieDir ) {
- LOG.debug( "Scrubbing cookieDir..." );
- final File[] files = cookieDir.listFiles();
- final long obsoleteDateLimit = System.currentTimeMillis() - ( ( long )days + 1 ) * 24 * 60 * 60 * 1000L;
- int deleteCount = 0;
-
- for( int i = 0; i < files.length; i++ ) {
- final File f = files[ i ];
- final long lastModified = f.lastModified();
- if( lastModified < obsoleteDateLimit ) {
- if( f.delete() ) {
- deleteCount++;
- } else {
- LOG.debug( "Error deleting cookie login with index {}", i );
+ private static void scrub( final int days, final File cookieDir ) {
+ Synchronizer.synchronize(lock, () -> {
+ LOG.debug( "Scrubbing cookieDir..." );
+ final File[] files = cookieDir.listFiles();
+ final long obsoleteDateLimit = System.currentTimeMillis() - ( ( long )days + 1 ) * 24 * 60 * 60 * 1000L;
+ int deleteCount = 0;
+
+ for( int i = 0; i < files.length; i++ ) {
+ final File f = files[ i ];
+ final long lastModified = f.lastModified();
+ if( lastModified < obsoleteDateLimit ) {
+ if( f.delete() ) {
+ deleteCount++;
+ } else {
+ LOG.debug( "Error deleting cookie login with index {}", i );
+ }
}
}
- }
- LOG.debug( "Removed {} obsolete cookie logins", deleteCount );
+ LOG.debug( "Removed {} obsolete cookie logins", deleteCount );
+ });
+
}
}
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/permissions/PermissionFactory.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/permissions/PermissionFactory.java
index 760c879f4b..37c6ec726f 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/permissions/PermissionFactory.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/permissions/PermissionFactory.java
@@ -19,8 +19,11 @@ Licensed to the Apache Software Foundation (ASF) under one
package org.apache.wiki.auth.permissions;
import org.apache.wiki.api.core.Page;
+import org.apache.wiki.util.Synchronizer;
import java.util.WeakHashMap;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -42,6 +45,16 @@ private PermissionFactory() {}
* cached page permissions.
*/
private static final WeakHashMap c_cache = new WeakHashMap<>();
+
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private static final ReentrantLock lock = new ReentrantLock();
/**
* Get a permission object for a WikiPage and a set of actions.
@@ -75,41 +88,28 @@ public static PagePermission getPagePermission( final String page, final String
* @param actions A list of actions.
* @return A PagePermission object.
*/
- private static PagePermission getPagePermission( final String wiki, String page, final String actions )
- {
- PagePermission perm;
- //
- // Since this is pretty speed-critical, we try to avoid the StringBuffer creation
- // overhead by XORring the hashcodes. However, if page name length > 32 characters,
- // this might result in two same hashCodes.
- // FIXME: Make this work for page-name lengths > 32 characters (use the alt implementation
- // if page.length() > 32?)
- // Alternative implementation below, but it does create an extra StringBuffer.
- //String key = wiki+":"+page+":"+actions;
-
+ private static PagePermission getPagePermission(final String wiki, String page, final String actions) {
final Integer key = wiki.hashCode() ^ page.hashCode() ^ actions.hashCode();
-
- //
- // It's fine if two threads update the cache, since the objects mean the same
- // thing anyway. And this avoids nasty blocking effects.
- //
- synchronized( c_cache )
- {
- perm = c_cache.get( key );
- }
-
- if( perm == null )
- {
- if( !wiki.isEmpty() ) page = wiki+":"+page;
- perm = new PagePermission( page, actions );
-
- synchronized( c_cache )
- {
- c_cache.put( key, perm );
- }
+ AtomicReference permRef = new AtomicReference<>();
+
+ Synchronizer.synchronize(lock, () -> {
+ PagePermission perm = c_cache.get(key);
+ permRef.set(perm);
+ });
+
+ if (permRef.get() == null) {
+ if (!wiki.isEmpty()) page = wiki + ":" + page;
+ PagePermission newPerm = new PagePermission(page, actions);
+
+ Synchronizer.synchronize(lock, () -> {
+ c_cache.put(key, newPerm);
+ });
+
+ return newPerm;
}
-
- return perm;
+
+ return permRef.get();
}
+
}
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/auth/user/XMLUserDatabase.java b/jspwiki-main/src/main/java/org/apache/wiki/auth/user/XMLUserDatabase.java
index dd7c6ee582..0eebbbeeb3 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/auth/user/XMLUserDatabase.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/auth/user/XMLUserDatabase.java
@@ -25,6 +25,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.auth.WikiPrincipal;
import org.apache.wiki.auth.WikiSecurityException;
import org.apache.wiki.util.Serializer;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
@@ -52,6 +53,9 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Properties;
import java.util.SortedSet;
import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@@ -91,27 +95,51 @@ public class XMLUserDatabase extends AbstractUserDatabase {
private Document c_dom;
private File c_file;
- /** {@inheritDoc} */
- @Override
- public synchronized void deleteByLoginName( final String loginName ) throws WikiSecurityException {
- if( c_dom == null ) {
- throw new WikiSecurityException( "FATAL: database does not exist" );
- }
-
- final NodeList users = c_dom.getDocumentElement().getElementsByTagName( USER_TAG );
- for( int i = 0; i < users.getLength(); i++ ) {
- final Element user = ( Element )users.item( i );
- if( user.getAttribute( LOGIN_NAME ).equals( loginName ) ) {
- c_dom.getDocumentElement().removeChild( user );
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
- // Commit to disk
- saveDOM();
- return;
+ /** {@inheritDoc} */
+ public void deleteByLoginName( final String loginName ) throws WikiSecurityException {
+ final AtomicBoolean userDeleted = new AtomicBoolean( false );
+ final AtomicReference< WikiSecurityException > exceptionRef = new AtomicReference<>();
+ Synchronizer.synchronize( lock, () -> {
+ try{
+ if( c_dom == null ) {
+ throw new WikiSecurityException( "FATAL: database does not exist" );
+ }
+ final NodeList users = c_dom.getDocumentElement().getElementsByTagName( USER_TAG );
+ for( int i = 0; i < users.getLength(); i++ ) {
+ final Element user = ( Element ) users.item( i );
+ if( user.getAttribute( LOGIN_NAME ).equals( loginName ) ) {
+ c_dom.getDocumentElement().removeChild( user );
+
+ // Commit to disk
+ saveDOM();
+ userDeleted.set( true );
+ return;
+ }
+ }
+ } catch( WikiSecurityException e ) {
+ exceptionRef.set( e );
}
+ } );
+ if( exceptionRef.get() != null ) {
+ throw exceptionRef.get();
+ }
+ if( !userDeleted.get() ) {
+ throw new NoSuchPrincipalException( "Not in database: " + loginName );
}
- throw new NoSuchPrincipalException( "Not in database: " + loginName );
}
+
+
/** {@inheritDoc} */
@Override
public UserProfile findByEmail( final String index ) throws NoSuchPrincipalException {
@@ -322,118 +350,123 @@ private void checkForRefresh() {
* @see org.apache.wiki.auth.user.UserDatabase#rename(String, String)
*/
@Override
- public synchronized void rename( final String loginName, final String newName) throws DuplicateUserException, WikiSecurityException {
- if( c_dom == null ) {
- LOG.fatal( "Could not rename profile '" + loginName + "'; database does not exist" );
- throw new IllegalStateException( "FATAL: database does not exist" );
- }
- checkForRefresh();
+ public void rename( final String loginName, final String newName) throws DuplicateUserException, WikiSecurityException {
+ Synchronizer.synchronize(lock, () -> {
+ if (c_dom == null) {
+ LOG.fatal("Could not rename profile '" + loginName + "'; database does not exist");
+ throw new IllegalStateException("FATAL: database does not exist");
+ }
+ checkForRefresh();
- // Get the existing user; if not found, throws NoSuchPrincipalException
- final UserProfile profile = findByLoginName( loginName );
+ // Get the existing user; if not found, throws NoSuchPrincipalException
+ final UserProfile profile = findByLoginName(loginName);
- // Get user with the proposed name; if found, it's a collision
- try {
- final UserProfile otherProfile = findByLoginName( newName );
- if( otherProfile != null ) {
- throw new DuplicateUserException( "security.error.cannot.rename", newName );
+ // Get user with the proposed name; if found, it's a collision
+ try {
+ final UserProfile otherProfile = findByLoginName(newName);
+ if (otherProfile != null) {
+ throw new DuplicateUserException("security.error.cannot.rename", newName);
+ }
+ } catch (final NoSuchPrincipalException e) {
+ // Good! That means it's safe to save using the new name
}
- } catch( final NoSuchPrincipalException e ) {
- // Good! That means it's safe to save using the new name
- }
- // Find the user with the old login id attribute, and change it
- final NodeList users = c_dom.getElementsByTagName( USER_TAG );
- for( int i = 0; i < users.getLength(); i++ ) {
- final Element user = ( Element )users.item( i );
- if( user.getAttribute( LOGIN_NAME ).equals( loginName ) ) {
- final DateFormat c_format = new SimpleDateFormat( DATE_FORMAT );
- final Date modDate = new Date( System.currentTimeMillis() );
- setAttribute( user, LOGIN_NAME, newName );
- setAttribute( user, LAST_MODIFIED, c_format.format( modDate ) );
- profile.setLoginName( newName );
- profile.setLastModified( modDate );
- break;
+ // Find the user with the old login id attribute, and change it
+ final NodeList users = c_dom.getElementsByTagName(USER_TAG);
+ for (int i = 0; i < users.getLength(); i++) {
+ final Element user = (Element) users.item(i);
+ if (user.getAttribute(LOGIN_NAME).equals(loginName)) {
+ final DateFormat c_format = new SimpleDateFormat(DATE_FORMAT);
+ final Date modDate = new Date(System.currentTimeMillis());
+ setAttribute(user, LOGIN_NAME, newName);
+ setAttribute(user, LAST_MODIFIED, c_format.format(modDate));
+ profile.setLoginName(newName);
+ profile.setLastModified(modDate);
+ break;
+ }
}
- }
- // Commit to disk
- saveDOM();
+ // Commit to disk
+ saveDOM();
+ });
+
}
/** {@inheritDoc} */
@Override
- public synchronized void save( final UserProfile profile ) throws WikiSecurityException {
- if ( c_dom == null ) {
- LOG.fatal( "Could not save profile " + profile + " database does not exist" );
- throw new IllegalStateException( "FATAL: database does not exist" );
- }
+ public void save( final UserProfile profile ) throws WikiSecurityException {
+ Synchronizer.synchronize(lock, () -> {
+ if ( c_dom == null ) {
+ LOG.fatal( "Could not save profile " + profile + " database does not exist" );
+ throw new IllegalStateException( "FATAL: database does not exist" );
+ }
- checkForRefresh();
+ checkForRefresh();
- final DateFormat c_format = new SimpleDateFormat( DATE_FORMAT );
- final String index = profile.getLoginName();
- final NodeList users = c_dom.getElementsByTagName( USER_TAG );
- Element user = IntStream.range(0, users.getLength()).mapToObj(i -> (Element) users.item(i)).filter(currentUser -> currentUser.getAttribute(LOGIN_NAME).equals(index)).findFirst().orElse(null);
-
- boolean isNew = false;
-
- final Date modDate = new Date( System.currentTimeMillis() );
- if( user == null ) {
- // Create new user node
- profile.setCreated( modDate );
- LOG.info( "Creating new user " + index );
- user = c_dom.createElement( USER_TAG );
- c_dom.getDocumentElement().appendChild( user );
- setAttribute( user, CREATED, c_format.format( profile.getCreated() ) );
- isNew = true;
- } else {
- // To update existing user node, delete old attributes first...
- final NodeList attributes = user.getElementsByTagName( ATTRIBUTES_TAG );
- for( int i = 0; i < attributes.getLength(); i++ ) {
- user.removeChild( attributes.item( i ) );
+ final DateFormat c_format = new SimpleDateFormat( DATE_FORMAT );
+ final String index = profile.getLoginName();
+ final NodeList users = c_dom.getElementsByTagName( USER_TAG );
+ Element user = IntStream.range(0, users.getLength()).mapToObj(i -> (Element) users.item(i)).filter(currentUser -> currentUser.getAttribute(LOGIN_NAME).equals(index)).findFirst().orElse(null);
+
+ boolean isNew = false;
+
+ final Date modDate = new Date( System.currentTimeMillis() );
+ if( user == null ) {
+ // Create new user node
+ profile.setCreated( modDate );
+ LOG.info( "Creating new user " + index );
+ user = c_dom.createElement( USER_TAG );
+ c_dom.getDocumentElement().appendChild( user );
+ setAttribute( user, CREATED, c_format.format( profile.getCreated() ) );
+ isNew = true;
+ } else {
+ // To update existing user node, delete old attributes first...
+ final NodeList attributes = user.getElementsByTagName( ATTRIBUTES_TAG );
+ for( int i = 0; i < attributes.getLength(); i++ ) {
+ user.removeChild( attributes.item( i ) );
+ }
}
- }
- setAttribute( user, UID, profile.getUid() );
- setAttribute( user, LAST_MODIFIED, c_format.format( modDate ) );
- setAttribute( user, LOGIN_NAME, profile.getLoginName() );
- setAttribute( user, FULL_NAME, profile.getFullname() );
- setAttribute( user, WIKI_NAME, profile.getWikiName() );
- setAttribute( user, EMAIL, profile.getEmail() );
- final Date lockExpiry = profile.getLockExpiry();
- setAttribute( user, LOCK_EXPIRY, lockExpiry == null ? "" : c_format.format( lockExpiry ) );
-
- // Hash and save the new password if it's different from old one
- final String newPassword = profile.getPassword();
- if( newPassword != null && !newPassword.equals( "" ) ) {
- final String oldPassword = user.getAttribute( PASSWORD );
- if( !oldPassword.equals( newPassword ) ) {
- setAttribute( user, PASSWORD, getHash( newPassword ) );
+ setAttribute( user, UID, profile.getUid() );
+ setAttribute( user, LAST_MODIFIED, c_format.format( modDate ) );
+ setAttribute( user, LOGIN_NAME, profile.getLoginName() );
+ setAttribute( user, FULL_NAME, profile.getFullname() );
+ setAttribute( user, WIKI_NAME, profile.getWikiName() );
+ setAttribute( user, EMAIL, profile.getEmail() );
+ final Date lockExpiry = profile.getLockExpiry();
+ setAttribute( user, LOCK_EXPIRY, lockExpiry == null ? "" : c_format.format( lockExpiry ) );
+
+ // Hash and save the new password if it's different from old one
+ final String newPassword = profile.getPassword();
+ if( newPassword != null && !newPassword.isEmpty() ) {
+ final String oldPassword = user.getAttribute( PASSWORD );
+ if( !oldPassword.equals( newPassword ) ) {
+ setAttribute( user, PASSWORD, getHash( newPassword ) );
+ }
}
- }
- // Save the attributes as Base64 string
- if(!profile.getAttributes().isEmpty()) {
- try {
- final String encodedAttributes = Serializer.serializeToBase64( profile.getAttributes() );
- final Element attributes = c_dom.createElement( ATTRIBUTES_TAG );
- user.appendChild( attributes );
- final Text value = c_dom.createTextNode( encodedAttributes );
- attributes.appendChild( value );
- } catch( final IOException e ) {
- throw new WikiSecurityException( "Could not save user profile attribute. Reason: " + e.getMessage(), e );
+ // Save the attributes as Base64 string
+ if( profile.getAttributes().isEmpty()) {
+ try {
+ final String encodedAttributes = Serializer.serializeToBase64( profile.getAttributes() );
+ final Element attributes = c_dom.createElement( ATTRIBUTES_TAG );
+ user.appendChild( attributes );
+ final Text value = c_dom.createTextNode( encodedAttributes );
+ attributes.appendChild( value );
+ } catch( final IOException e ) {
+ throw new WikiSecurityException( "Could not save user profile attribute. Reason: " + e.getMessage(), e );
+ }
}
- }
- // Set the profile timestamps
- if( isNew ) {
- profile.setCreated( modDate );
- }
- profile.setLastModified( modDate );
+ // Set the profile timestamps
+ if( isNew ) {
+ profile.setCreated( modDate );
+ }
+ profile.setLastModified( modDate );
- // Commit to disk
- saveDOM();
+ // Commit to disk
+ saveDOM();
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/diff/ContextualDiffProvider.java b/jspwiki-main/src/main/java/org/apache/wiki/diff/ContextualDiffProvider.java
index 32d96bce18..c79c94e6bd 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/diff/ContextualDiffProvider.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/diff/ContextualDiffProvider.java
@@ -24,6 +24,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.api.core.Context;
import org.apache.wiki.api.core.Engine;
import org.apache.wiki.api.exceptions.NoRequiredPropertyException;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import org.suigeneris.jrcs.diff.Diff;
import org.suigeneris.jrcs.diff.DifferentiationFailedException;
@@ -42,13 +43,12 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.List;
import java.util.Properties;
import java.util.StringTokenizer;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
/**
- * A seriously better diff provider, which highlights changes word-by-word using CSS.
- *
- * Suggested by John Volkar.
+ * A seriously better diff provider, which highlights changes word-by-word using CSS. Suggested by John Volkar.
*/
public class ContextualDiffProvider implements DiffProvider {
@@ -92,12 +92,15 @@ public class ContextualDiffProvider implements DiffProvider {
private static final int LIMIT_MAX_VALUE = (Integer.MAX_VALUE /2) - 1;
private int m_unchangedContextLimit = LIMIT_MAX_VALUE;
-
/**
- * Constructs this provider.
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
*/
- public ContextualDiffProvider()
- {}
+ private final ReentrantLock lock = new ReentrantLock();
/**
* @see org.apache.wiki.api.providers.WikiProvider#getProviderInfo()
@@ -137,44 +140,45 @@ public void initialize( final Engine engine, final Properties properties) throws
* {@inheritDoc}
*/
@Override
- public synchronized String makeDiffHtml( final Context ctx, final String wikiOld, final String wikiNew ) {
- //
- // Sequencing handles lineterminator to and every-other consequtive space to a
- //
- final String[] alpha = sequence( TextUtil.replaceEntities( wikiOld ) );
- final String[] beta = sequence( TextUtil.replaceEntities( wikiNew ) );
-
- final Revision rev;
- try {
- rev = Diff.diff( alpha, beta, new MyersDiff() );
- } catch( final DifferentiationFailedException dfe ) {
- LOG.error( "Diff generation failed", dfe );
- return "Error while creating version diff.";
- }
-
- final int revSize = rev.size();
- final StringBuffer sb = new StringBuffer();
-
- sb.append( DIFF_START );
+ public String makeDiffHtml( final Context ctx, final String wikiOld, final String wikiNew ) {
+ return Synchronizer.synchronize(lock, () -> {
+ //
+ // Sequencing handles lineterminator to and every-other consequtive space to a
+ //
+ final String[] alpha = sequence( TextUtil.replaceEntities( wikiOld ) );
+ final String[] beta = sequence( TextUtil.replaceEntities( wikiNew ) );
+
+ final Revision rev;
+ try {
+ rev = Diff.diff( alpha, beta, new MyersDiff() );
+ } catch( final DifferentiationFailedException dfe ) {
+ LOG.error( "Diff generation failed", dfe );
+ return "Error while creating version diff.";
+ }
- //
- // The MyersDiff is a bit dumb by converting a single line multi-word diff into a series
- // of Changes. The ChangeMerger pulls them together again...
- //
- final ChangeMerger cm = new ChangeMerger( sb, alpha, revSize );
- rev.accept( cm );
- cm.shutdown();
- sb.append( DIFF_END );
- return sb.toString();
+ final int revSize = rev.size();
+ final StringBuffer sb = new StringBuffer();
+
+ sb.append( DIFF_START );
+
+ //
+ // The MyersDiff is a bit dumb by converting a single line multi-word diff into a series
+ // of Changes. The ChangeMerger pulls them together again...
+ //
+ final ChangeMerger cm = new ChangeMerger( sb, alpha, revSize );
+ rev.accept( cm );
+ cm.shutdown();
+ sb.append( DIFF_END );
+ return sb.toString();
+ });
}
/**
* Take the string and create an array from it, split it first on newlines, making
* sure to preserve the newlines in the elements, split each resulting element on
* spaces, preserving the spaces.
- *
- * All this preseving of newlines and spaces is so the wikitext when diffed will have fidelity
- * to it's original form. As a side affect we see edits of purely whilespace.
+ * All this preserving of newlines and spaces is so the wikitext when diffed will have fidelity
+ * to its original form. As a side effect we see edits of purely whitespace.
*/
private String[] sequence( final String wikiText ) {
final String[] linesArray = Diff.stringToArray( wikiText );
@@ -183,7 +187,7 @@ private String[] sequence( final String wikiText ) {
String lastToken = null;
String token;
- // StringTokenizer might be discouraged but it still is perfect here...
+ // StringTokenizer might be discouraged, but it is still perfect here...
for( final StringTokenizer st = new StringTokenizer( line, " ", true ); st.hasMoreTokens(); ) {
token = st.nextToken();
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/filters/PageEventFilter.java b/jspwiki-main/src/main/java/org/apache/wiki/filters/PageEventFilter.java
index 299e25b62f..5ffe768bcd 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/filters/PageEventFilter.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/filters/PageEventFilter.java
@@ -26,8 +26,10 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.event.WikiEventListener;
import org.apache.wiki.event.WikiEventManager;
import org.apache.wiki.event.WikiPageEvent;
+import org.apache.wiki.util.Synchronizer;
import java.util.Properties;
+import java.util.concurrent.locks.ReentrantLock;
/**
* Fires WikiPageEvents for page events.
@@ -50,6 +52,16 @@ Licensed to the Apache Software Foundation (ASF) under one
*/
public class PageEventFilter extends BasePageFilter {
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* Called whenever a new PageFilter is instantiated and reset.
*/
@@ -112,8 +124,10 @@ public void postSave( final Context wikiContext, final String content ) {
*
* @param listener the event listener
*/
- public final synchronized void addWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.addWikiEventListener( this, listener );
+ public final void addWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.addWikiEventListener( this, listener );
+ });
}
/**
@@ -121,8 +135,10 @@ public final synchronized void addWikiEventListener( final WikiEventListener lis
*
* @param listener the event listener
*/
- public final synchronized void removeWikiEventListener( final WikiEventListener listener ) {
- WikiEventManager.removeWikiEventListener( this, listener );
+ public final void removeWikiEventListener( final WikiEventListener listener ) {
+ Synchronizer.synchronize(lock, () -> {
+ WikiEventManager.removeWikiEventListener( this, listener );
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/filters/SpamFilter.java b/jspwiki-main/src/main/java/org/apache/wiki/filters/SpamFilter.java
index 1160d8b744..434834ca52 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/filters/SpamFilter.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/filters/SpamFilter.java
@@ -46,6 +46,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.ui.EditorManager;
import org.apache.wiki.util.FileUtil;
import org.apache.wiki.util.HttpUtil;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import org.suigeneris.jrcs.diff.Diff;
import org.suigeneris.jrcs.diff.DifferentiationFailedException;
@@ -77,6 +78,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.StringTokenizer;
import java.util.Vector;
import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -235,13 +237,22 @@ public class SpamFilter extends BasePageFilter {
private static String c_hashName;
private static long c_lastUpdate;
- /** The HASH_DELAY value is a maximum amount of time that an user can keep
+ /** The HASH_DELAY value is a maximum amount of time that a user can keep
* a session open, because after the value has expired, we will invent a new
- * hash field name. By default this is {@value} hours, which should be ample
+ * hash field name. By default, this is {@value} hours, which should be ample
* time for someone.
*/
private static final long HASH_DELAY = 24;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
/**
* {@inheritDoc}
@@ -435,88 +446,90 @@ private Collection< Pattern > parseBlacklist( final String list ) {
* @param change page change
* @throws RedirectException spam filter rejects the page change.
*/
- private synchronized void checkSinglePageChange(final Context context, final Change change )
+ private void checkSinglePageChange(final Context context, final Change change )
throws RedirectException {
- final HttpServletRequest req = context.getHttpRequest();
+ Synchronizer.synchronize(lock, () -> {
+ final HttpServletRequest req = context.getHttpRequest();
- if( req != null ) {
- final String addr = HttpUtil.getRemoteAddress( req );
- int hostCounter = 0;
- int changeCounter = 0;
+ if( req != null ) {
+ final String addr = HttpUtil.getRemoteAddress( req );
+ int hostCounter = 0;
+ int changeCounter = 0;
- LOG.debug( "Change is " + change.m_change );
+ LOG.debug( "Change is " + change.m_change );
- final long time = System.currentTimeMillis() - 60*1000L; // 1 minute
+ final long time = System.currentTimeMillis() - 60*1000L; // 1 minute
- for( final Iterator< Host > i = m_lastModifications.iterator(); i.hasNext(); ) {
- final Host host = i.next();
+ for( final Iterator< Host > i = m_lastModifications.iterator(); i.hasNext(); ) {
+ final Host host = i.next();
- // Check if this item is invalid
- if( host.getAddedTime() < time ) {
- LOG.debug( "Removed host " + host.getAddress() + " from modification queue (expired)" );
- i.remove();
- continue;
- }
+ // Check if this item is invalid
+ if( host.getAddedTime() < time ) {
+ LOG.debug( "Removed host " + host.getAddress() + " from modification queue (expired)" );
+ i.remove();
+ continue;
+ }
- // Check if this IP address has been seen before
- if( host.getAddress().equals( addr ) ) {
- hostCounter++;
- }
+ // Check if this IP address has been seen before
+ if( host.getAddress().equals( addr ) ) {
+ hostCounter++;
+ }
- // Check, if this change has been seen before
- if( host.getChange() != null && host.getChange().equals( change ) ) {
- changeCounter++;
+ // Check, if this change has been seen before
+ if( host.getChange() != null && host.getChange().equals( change ) ) {
+ changeCounter++;
+ }
}
- }
- // Now, let's check against the limits.
- if( hostCounter >= m_limitSinglePageChanges ) {
- final Host host = new Host( addr, null );
- m_temporaryBanList.add( host );
+ // Now, let's check against the limits.
+ if( hostCounter >= m_limitSinglePageChanges ) {
+ final Host host = new Host( addr, null );
+ m_temporaryBanList.add( host );
- final String uid = log( context, REJECT, REASON_TOO_MANY_MODIFICATIONS, change.m_change );
- LOG.info( "SPAM:TooManyModifications (" + uid + "). Added host " + addr + " to temporary ban list for doing too many modifications/minute" );
- checkStrategy( context, "Herb says you look like a spammer, and I trust Herb! (Incident code " + uid + ")" );
- }
+ final String uid = log( context, REJECT, REASON_TOO_MANY_MODIFICATIONS, change.m_change );
+ LOG.info( "SPAM:TooManyModifications (" + uid + "). Added host " + addr + " to temporary ban list for doing too many modifications/minute" );
+ checkStrategy( context, "Herb says you look like a spammer, and I trust Herb! (Incident code " + uid + ")" );
+ }
- if( changeCounter >= m_limitSimilarChanges ) {
- final Host host = new Host( addr, null );
- m_temporaryBanList.add( host );
+ if( changeCounter >= m_limitSimilarChanges ) {
+ final Host host = new Host( addr, null );
+ m_temporaryBanList.add( host );
- final String uid = log( context, REJECT, REASON_SIMILAR_MODIFICATIONS, change.m_change );
- LOG.info( "SPAM:SimilarModifications (" + uid + "). Added host " + addr + " to temporary ban list for doing too many similar modifications" );
- checkStrategy( context, "Herb says you look like a spammer, and I trust Herb! (Incident code "+uid+")");
- }
+ final String uid = log( context, REJECT, REASON_SIMILAR_MODIFICATIONS, change.m_change );
+ LOG.info( "SPAM:SimilarModifications (" + uid + "). Added host " + addr + " to temporary ban list for doing too many similar modifications" );
+ checkStrategy( context, "Herb says you look like a spammer, and I trust Herb! (Incident code "+uid+")");
+ }
- // Calculate the number of links in the addition.
- String tstChange = change.toString();
- int urlCounter = 0;
- while( m_matcher.contains( tstChange,m_urlPattern ) ) {
- final MatchResult m = m_matcher.getMatch();
- tstChange = tstChange.substring( m.endOffset(0) );
- urlCounter++;
- }
+ // Calculate the number of links in the addition.
+ String tstChange = change.toString();
+ int urlCounter = 0;
+ while( m_matcher.contains( tstChange,m_urlPattern ) ) {
+ final MatchResult m = m_matcher.getMatch();
+ tstChange = tstChange.substring( m.endOffset(0) );
+ urlCounter++;
+ }
- if( urlCounter > m_maxUrls ) {
- final Host host = new Host( addr, null );
- m_temporaryBanList.add( host );
+ if( urlCounter > m_maxUrls ) {
+ final Host host = new Host( addr, null );
+ m_temporaryBanList.add( host );
- final String uid = log( context, REJECT, REASON_TOO_MANY_URLS, change.toString() );
- LOG.info( "SPAM:TooManyUrls (" + uid + "). Added host " + addr + " to temporary ban list for adding too many URLs" );
- checkStrategy( context, "Herb says you look like a spammer, and I trust Herb! (Incident code " + uid + ")" );
- }
+ final String uid = log( context, REJECT, REASON_TOO_MANY_URLS, change.toString() );
+ LOG.info( "SPAM:TooManyUrls (" + uid + "). Added host " + addr + " to temporary ban list for adding too many URLs" );
+ checkStrategy( context, "Herb says you look like a spammer, and I trust Herb! (Incident code " + uid + ")" );
+ }
- // Check bot trap
- checkBotTrap( context, change );
+ // Check bot trap
+ checkBotTrap( context, change );
- // Check UTF-8 mangling
- checkUTF8( context, change );
+ // Check UTF-8 mangling
+ checkUTF8( context, change );
- // Do Akismet check. This is good to be the last, because this is the most expensive operation.
- checkAkismet( context, change );
+ // Do Akismet check. This is good to be the last, because this is the most expensive operation.
+ checkAkismet( context, change );
- m_lastModifications.add( new Host( addr, change ) );
- }
+ m_lastModifications.add( new Host( addr, change ) );
+ }
+ });
}
@@ -630,16 +643,18 @@ private void checkUTF8( final Context context, final Change change ) throws Redi
}
/** Goes through the ban list and cleans away any host which has expired from it. */
- private synchronized void cleanBanList() {
- final long now = System.currentTimeMillis();
- for( final Iterator< Host > i = m_temporaryBanList.iterator(); i.hasNext(); ) {
- final Host host = i.next();
-
- if( host.getReleaseTime() < now ) {
- LOG.debug( "Removed host " + host.getAddress() + " from temporary ban list (expired)" );
- i.remove();
+ private void cleanBanList() {
+ Synchronizer.synchronize(lock, () -> {
+ final long now = System.currentTimeMillis();
+ for( final Iterator< Host > i = m_temporaryBanList.iterator(); i.hasNext(); ) {
+ final Host host = i.next();
+
+ if( host.getReleaseTime() < now ) {
+ LOG.debug( "Removed host " + host.getAddress() + " from temporary ban list (expired)" );
+ i.remove();
+ }
}
- }
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/plugin/BugReportHandler.java b/jspwiki-main/src/main/java/org/apache/wiki/plugin/BugReportHandler.java
index f3d7eff0a0..03f9c8b59b 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/plugin/BugReportHandler.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/plugin/BugReportHandler.java
@@ -31,6 +31,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.pages.PageManager;
import org.apache.wiki.parser.MarkupParser;
import org.apache.wiki.preferences.Preferences;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import java.io.PrintWriter;
@@ -43,6 +44,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Properties;
import java.util.ResourceBundle;
import java.util.StringTokenizer;
+import java.util.concurrent.locks.ReentrantLock;
/**
* Provides a handler for bug reports. Still under construction.
@@ -73,6 +75,16 @@ public class BugReportHandler implements Plugin {
/** Parameter name for setting the page. Value is {@value}. */
public static final String PARAM_PAGE = "page";
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* {@inheritDoc}
*/
@@ -166,17 +178,18 @@ public String execute( final Context context, final Map< String, String > params
* Finds a free page name for adding the bug report. Tries to construct a page, and if it's found, adds a number to it
* and tries again.
*/
- private synchronized String findNextPage( final Context context, final String title, final String baseName ) {
- final String basicPageName = ( ( baseName != null ) ? baseName : "Bug" ) + MarkupParser.cleanLink( title );
- final Engine engine = context.getEngine();
-
- String pageName = basicPageName;
- long lastbug = 2;
- while( engine.getManager( PageManager.class ).wikiPageExists( pageName ) ) {
- pageName = basicPageName + lastbug++;
- }
-
- return pageName;
+ private String findNextPage( final Context context, final String title, final String baseName ) {
+ return Synchronizer.synchronize(lock, () -> {
+ final String basicPageName = ( ( baseName != null ) ? baseName : "Bug" ) + MarkupParser.cleanLink( title );
+ final Engine engine = context.getEngine();
+
+ String pageName = basicPageName;
+ long lastbug = 2;
+ while( engine.getManager( PageManager.class ).wikiPageExists( pageName ) ) {
+ pageName = basicPageName + lastbug++;
+ }
+ return pageName;
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/plugin/PageViewPlugin.java b/jspwiki-main/src/main/java/org/apache/wiki/plugin/PageViewPlugin.java
index 850d7c210a..dad216e510 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/plugin/PageViewPlugin.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/plugin/PageViewPlugin.java
@@ -43,6 +43,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.event.WikiPageRenameEvent;
import org.apache.wiki.references.ReferenceManager;
import org.apache.wiki.render.RenderingManager;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import java.io.File;
@@ -59,6 +60,10 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Map.Entry;
import java.util.Properties;
import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -129,6 +134,16 @@ public class PageViewPlugin extends AbstractReferralPlugin implements Plugin, In
/** Constant for storage interval in seconds. */
private static final int STORAGE_INTERVAL = 60;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* Initialize the PageViewPlugin and its singleton.
*
@@ -137,12 +152,12 @@ public class PageViewPlugin extends AbstractReferralPlugin implements Plugin, In
@Override
public void initialize( final Engine engine ) {
LOG.info( "initializing PageViewPlugin" );
- synchronized( this ) {
+ Synchronizer.synchronize( lock, () -> {
if( c_singleton == null ) {
c_singleton = new PageViewManager();
}
c_singleton.initialize( engine );
- }
+ } );
}
/**
@@ -202,50 +217,54 @@ public final class PageViewManager implements WikiEventListener {
*
* @param engine The wiki engine.
*/
- public synchronized void initialize( final Engine engine ) {
- LOG.info( "initializing PageView Manager" );
- m_workDir = engine.getWorkDir();
- engine.addWikiEventListener( this );
- if( m_counters == null ) {
- // Load the counters into a collection
- m_storage = new Properties();
- m_counters = new TreeMap<>();
-
- loadCounters();
- }
+ public void initialize( final Engine engine ) {
+ Synchronizer.synchronize( lock, () -> {
+ LOG.info( "initializing PageView Manager" );
+ m_workDir = engine.getWorkDir();
+ engine.addWikiEventListener( this );
+ if( m_counters == null ) {
+ // Load the counters into a collection
+ m_storage = new Properties();
+ m_counters = new TreeMap<>();
+
+ loadCounters();
+ }
- // backup counters every 5 minutes
- if( m_pageCountSaveThread == null ) {
- m_pageCountSaveThread = new CounterSaveThread( engine, 5 * STORAGE_INTERVAL, this );
- m_pageCountSaveThread.start();
- }
+ // backup counters every 5 minutes
+ if( m_pageCountSaveThread == null ) {
+ m_pageCountSaveThread = new CounterSaveThread( engine, 5 * STORAGE_INTERVAL, this );
+ m_pageCountSaveThread.start();
+ }
- m_initialized = true;
+ m_initialized = true;
+ } );
}
/**
* Handle the shutdown event via the page counter thread.
*/
- private synchronized void handleShutdown() {
- LOG.info( "handleShutdown: The counter store thread was shut down." );
+ private void handleShutdown() {
+ Synchronizer.synchronize(lock, () -> {
+ LOG.info( "handleShutdown: The counter store thread was shut down." );
- cleanup();
+ cleanup();
- if( m_counters != null ) {
+ if( m_counters != null ) {
- m_dirty = true;
- storeCounters();
+ m_dirty = true;
+ storeCounters();
- m_counters.clear();
- m_counters = null;
+ m_counters.clear();
+ m_counters = null;
- m_storage.clear();
- m_storage = null;
- }
+ m_storage.clear();
+ m_storage = null;
+ }
- m_initialized = false;
+ m_initialized = false;
- m_pageCountSaveThread = null;
+ m_pageCountSaveThread = null;
+ });
}
/**
@@ -289,14 +308,15 @@ public void actionPerformed( final WikiEvent event ) {
public String execute( final Context context, final Map< String, String > params ) throws PluginException {
final Engine engine = context.getEngine();
final Page page = context.getPage();
- String result = STR_EMPTY;
+ final AtomicReference atomicResult = new AtomicReference<>(STR_EMPTY);
+ final AtomicReference pluginExceptionRef = new AtomicReference<>();
if( page != null ) {
// get parameters
final String pagename = page.getName();
String count = params.get( PARAM_COUNT );
final String show = params.get( PARAM_SHOW );
- int entries = TextUtil.parseIntParameter( params.get( PARAM_MAX_ENTRIES ), Integer.MAX_VALUE );
+ final AtomicInteger atomicEntries = new AtomicInteger(TextUtil.parseIntParameter(params.get(PARAM_MAX_ENTRIES), Integer.MAX_VALUE));
final int max = TextUtil.parseIntParameter( params.get( PARAM_MAX_COUNT ), Integer.MAX_VALUE );
final int min = TextUtil.parseIntParameter( params.get( PARAM_MIN_COUNT ), Integer.MIN_VALUE );
final String sort = params.get( PARAM_SORT );
@@ -305,22 +325,22 @@ public String execute( final Context context, final Map< String, String > params
final Pattern[] include = compileGlobs( PARAM_INCLUDE, params.get( PARAM_INCLUDE ) );
final Pattern[] refer = compileGlobs( PARAM_REFER, params.get( PARAM_REFER ) );
final PatternMatcher matcher = (null != exclude || null != include || null != refer) ? new Perl5Matcher() : null;
- boolean increment = false;
+ final AtomicBoolean atomicIncrement = new AtomicBoolean(false);
// increment counter?
if( STR_YES.equals( count ) ) {
- increment = true;
+ atomicIncrement.set(true);
} else {
count = null;
}
// default increment counter?
if( ( show == null || STR_NONE.equals( show ) ) && count == null ) {
- increment = true;
+ atomicIncrement.set(true);
}
// filter on referring pages?
- Collection< String > referrers = null;
+ final AtomicReference> atomicReferrers = new AtomicReference<>(null);
if( refer != null ) {
final ReferenceManager refManager = engine.getManager( ReferenceManager.class );
@@ -333,122 +353,134 @@ public String execute( final Context context, final Map< String, String > params
if( use ) {
final Collection< String > refs = engine.getManager( ReferenceManager.class ).findReferrers( name );
if( refs != null && !refs.isEmpty() ) {
- if( referrers == null ) {
- referrers = new HashSet<>();
+ if (atomicReferrers.get() == null) {
+ atomicReferrers.set(new HashSet<>());
}
- referrers.addAll( refs );
+ atomicReferrers.get().addAll(refs);
}
}
}
}
- synchronized( this ) {
- Counter counter = m_counters.get( pagename );
+ Synchronizer.synchronize( lock, () -> {
+ try {
- // only count in view mode, keep storage values in sync
- if( increment && ContextEnum.PAGE_VIEW.getRequestContext().equalsIgnoreCase( context.getRequestContext() ) ) {
- if( counter == null ) {
- counter = new Counter();
- m_counters.put( pagename, counter );
- }
- counter.increment();
- m_storage.setProperty( pagename, counter.toString() );
- m_dirty = true;
- }
+ Counter counter = m_counters.get( pagename );
- if( show == null || STR_NONE.equals( show ) ) {
- // nothing to show
-
- } else if( PARAM_COUNT.equals( show ) ) {
- // show page count
- if( counter == null ) {
- counter = new Counter();
- m_counters.put( pagename, counter );
+ // only count in view mode, keep storage values in sync
+ if( atomicIncrement.get() && ContextEnum.PAGE_VIEW.getRequestContext().equalsIgnoreCase( context.getRequestContext() ) ) {
+ if( counter == null ) {
+ counter = new Counter();
+ m_counters.put( pagename, counter );
+ }
+ counter.increment();
m_storage.setProperty( pagename, counter.toString() );
m_dirty = true;
}
- result = counter.toString();
-
- } else if( body != null && !body.isEmpty() && STR_LIST.equals( show ) ) {
- // show list of counts
- String header = STR_EMPTY;
- String line = body;
- String footer = STR_EMPTY;
- int start = body.indexOf( STR_SEPARATOR );
-
- // split body into header, line, footer on ---- separator
- if( 0 < start ) {
- header = body.substring( 0, start );
- start = skipWhitespace( start + STR_SEPARATOR.length(), body );
- int end = body.indexOf( STR_SEPARATOR, start );
- if( start >= end ) {
- line = body.substring( start );
- } else {
- line = body.substring( start, end );
- end = skipWhitespace( end + STR_SEPARATOR.length(), body );
- footer = body.substring( end );
+
+ if( show == null || STR_NONE.equals( show ) ) {
+ // nothing to show
+
+ } else if( PARAM_COUNT.equals( show ) ) {
+ // show page count
+ if( counter == null ) {
+ counter = new Counter();
+ m_counters.put( pagename, counter );
+ m_storage.setProperty( pagename, counter.toString() );
+ m_dirty = true;
+ }
+ atomicResult.set(counter.toString());
+
+ } else if( body != null && 0 < body.length() && STR_LIST.equals( show ) ) {
+ // show list of counts
+ String header = STR_EMPTY;
+ String line = body;
+ String footer = STR_EMPTY;
+ int start = body.indexOf( STR_SEPARATOR );
+
+ // split body into header, line, footer on ---- separator
+ if( 0 < start ) {
+ header = body.substring( 0, start );
+ start = skipWhitespace( start + STR_SEPARATOR.length(), body );
+ int end = body.indexOf( STR_SEPARATOR, start );
+ if( start >= end ) {
+ line = body.substring( start );
+ } else {
+ line = body.substring( start, end );
+ end = skipWhitespace( end + STR_SEPARATOR.length(), body );
+ footer = body.substring( end );
+ }
}
- }
- // sort on name or count?
- Map< String, Counter > sorted = m_counters;
- if( PARAM_COUNT.equals( sort ) ) {
- sorted = new TreeMap<>( m_compareCountDescending );
- sorted.putAll( m_counters );
- }
+ // sort on name or count?
+ Map< String, Counter > sorted = m_counters;
+ if( PARAM_COUNT.equals( sort ) ) {
+ sorted = new TreeMap<>( m_compareCountDescending );
+ sorted.putAll( m_counters );
+ }
- // build a messagebuffer with the list in wiki markup
- final StringBuffer buf = new StringBuffer( header );
- final MessageFormat fmt = new MessageFormat( line );
- final Object[] args = new Object[] { pagename, STR_EMPTY, STR_EMPTY };
- final Iterator< Entry< String, Counter > > iter = sorted.entrySet().iterator();
+ // build a messagebuffer with the list in wiki markup
+ final StringBuffer buf = new StringBuffer( header );
+ final MessageFormat fmt = new MessageFormat( line );
+ final Object[] args = new Object[] { pagename, STR_EMPTY, STR_EMPTY };
+ final Iterator< Entry< String, Counter > > iter = sorted.entrySet().iterator();
- while( 0 < entries && iter.hasNext() ) {
- final Entry< String, Counter > entry = iter.next();
- final String name = entry.getKey();
+ while( 0 < atomicEntries.get() && iter.hasNext() ) {
+ final Entry< String, Counter > entry = iter.next();
+ final String name = entry.getKey();
- // check minimum/maximum count
- final int value = entry.getValue().getValue();
- boolean use = min <= value && value <= max;
+ // check minimum/maximum count
+ final int value = entry.getValue().getValue();
+ boolean use = min <= value && value <= max;
- // did we specify a refer-to page?
- if( use && referrers != null ) {
- use = referrers.contains( name );
- }
+ // did we specify a refer-to page?
+ if( use && atomicReferrers.get() != null ) {
+ use = atomicReferrers.get().contains( name );
+ }
- // did we specify what pages to include?
- if( use && include != null ) {
- use = false;
+ // did we specify what pages to include?
+ if( use && include != null ) {
+ use = false;
- for( int n = 0; !use && n < include.length; n++ ) {
- use = matcher.matches( name, include[ n ] );
+ for( int n = 0; !use && n < include.length; n++ ) {
+ use = matcher.matches( name, include[ n ] );
+ }
}
- }
- // did we specify what pages to exclude?
- if( use && null != exclude ) {
- for( int n = 0; use && n < exclude.length; n++ ) {
- use = !matcher.matches( name, exclude[ n ] );
+ // did we specify what pages to exclude?
+ if( use && null != exclude ) {
+ for( int n = 0; use && n < exclude.length; n++ ) {
+ use = !matcher.matches( name, exclude[ n ] );
+ }
}
- }
- if( use ) {
- args[ 1 ] = engine.getManager( RenderingManager.class ).beautifyTitle( name );
- args[ 2 ] = entry.getValue();
+ if( use ) {
+ args[ 1 ] = engine.getManager( RenderingManager.class ).beautifyTitle( name );
+ args[ 2 ] = entry.getValue();
- fmt.format( args, buf, null );
+ fmt.format( args, buf, null );
- entries--;
+ atomicEntries.decrementAndGet();
+ }
}
- }
- buf.append( footer );
+ buf.append( footer );
- // let the engine render the list
- result = engine.getManager( RenderingManager.class ).textToHTML( context, buf.toString() );
+ // let the engine render the list
+ atomicResult.set(engine.getManager(RenderingManager.class).textToHTML(context, buf.toString()));
+ }
+ } catch (Exception e) {
+ if (e instanceof PluginException) {
+ pluginExceptionRef.set(new PluginException(e.getMessage()));
+ }
}
- }
+ } );
}
- return result;
+
+ if (pluginExceptionRef.get() != null) {
+ throw pluginExceptionRef.get();
+ }
+
+ return atomicResult.get();
}
/**
@@ -461,7 +493,7 @@ public String execute( final Context context, final Map< String, String > params
*/
private Pattern[] compileGlobs( final String name, final String value ) throws PluginException {
Pattern[] result = null;
- if( value != null && !value.isEmpty() && !STR_GLOBSTAR.equals( value ) ) {
+ if( value != null && 0 < value.length() && !STR_GLOBSTAR.equals( value ) ) {
try {
final PatternCompiler pc = new GlobCompiler();
final String[] ptrns = StringUtils.split( value, STR_COMMA );
@@ -509,7 +541,7 @@ int getCount( final Object key )
private void loadCounters() {
if( m_counters != null && m_storage != null ) {
LOG.info( "Loading counters." );
- synchronized( this ) {
+ Synchronizer.synchronize( lock, () -> {
try( final InputStream fis = Files.newInputStream( new File( m_workDir, COUNTER_PAGE ).toPath() ) ) {
m_storage.load( fis );
} catch( final IOException ioe ) {
@@ -520,9 +552,9 @@ private void loadCounters() {
for( final Entry< ?, ? > entry : m_storage.entrySet() ) {
m_counters.put( ( String )entry.getKey(), new Counter( ( String )entry.getValue() ) );
}
-
+
LOG.info( "Loaded " + m_counters.size() + " counter values." );
- }
+ } );
}
}
@@ -532,7 +564,7 @@ private void loadCounters() {
void storeCounters() {
if( m_counters != null && m_storage != null && m_dirty ) {
LOG.info( "Storing " + m_counters.size() + " counter values." );
- synchronized( this ) {
+ Synchronizer.synchronize( lock, () -> {
// Write out the collection of counters
try( final OutputStream fos = Files.newOutputStream( new File( m_workDir, COUNTER_PAGE ).toPath() ) ) {
m_storage.store( fos, "\n# The number of times each page has been viewed.\n# Do not modify.\n" );
@@ -542,7 +574,7 @@ void storeCounters() {
} catch( final IOException ioe ) {
LOG.error( "Couldn't store counters values: " + ioe.getMessage() );
}
- }
+ } );
}
}
@@ -552,9 +584,9 @@ void storeCounters() {
* @param thrd thread that can be the current background thread.
* @return boolean true if the thread is still the current background thread.
*/
- private synchronized boolean isRunning( final Thread thrd )
+ private boolean isRunning( final Thread thrd )
{
- return m_initialized && thrd == m_pageCountSaveThread;
+ return Synchronizer.synchronize( lock, () -> m_initialized && thrd == m_pageCountSaveThread );
}
}
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/providers/CachingAttachmentProvider.java b/jspwiki-main/src/main/java/org/apache/wiki/providers/CachingAttachmentProvider.java
index e8523d0717..cd45469dc8 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/providers/CachingAttachmentProvider.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/providers/CachingAttachmentProvider.java
@@ -33,6 +33,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.cache.CacheInfo;
import org.apache.wiki.cache.CachingManager;
import org.apache.wiki.util.ClassUtil;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import java.io.IOException;
@@ -45,6 +46,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.NoSuchElementException;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -62,6 +64,16 @@ public class CachingAttachmentProvider implements AttachmentProvider {
private boolean allRequested;
private final AtomicLong attachments = new AtomicLong( 0L );
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* {@inheritDoc}
*/
@@ -141,7 +153,7 @@ public List< Attachment > listAllChanged( final Date timestamp ) throws Provider
all = provider.listAllChanged( timestamp );
// Make sure that all attachments are in the cache.
- synchronized( this ) {
+ Synchronizer.synchronize(lock, () -> {
for( final Attachment att : all ) {
cachingManager.put( CachingManager.CACHE_ATTACHMENTS, att.getName(), att );
}
@@ -149,7 +161,7 @@ public List< Attachment > listAllChanged( final Date timestamp ) throws Provider
allRequested = true;
attachments.set( all.size() );
}
- }
+ });
} else {
final List< String > keys = cachingManager.keys( CachingManager.CACHE_ATTACHMENTS );
all = new ArrayList<>();
@@ -240,14 +252,17 @@ public void deleteAttachment( final Attachment att ) throws ProviderException {
* @return A plain string with all the above-mentioned values.
*/
@Override
- public synchronized String getProviderInfo() {
- final CacheInfo attCacheInfo = cachingManager.info( CachingManager.CACHE_ATTACHMENTS );
- final CacheInfo attColCacheInfo = cachingManager.info( CachingManager.CACHE_ATTACHMENTS_COLLECTION );
- return "Real provider: " + provider.getClass().getName() +
- ". Attachment cache hits: " + attCacheInfo.getHits() +
- ". Attachment cache misses: " + attCacheInfo.getMisses() +
- ". Attachment collection cache hits: " + attColCacheInfo.getHits() +
- ". Attachment collection cache misses: " + attColCacheInfo.getMisses();
+ public String getProviderInfo() {
+ return Synchronizer.synchronize(lock, () -> {
+ final CacheInfo attCacheInfo = cachingManager.info( CachingManager.CACHE_ATTACHMENTS );
+ final CacheInfo attColCacheInfo = cachingManager.info( CachingManager.CACHE_ATTACHMENTS_COLLECTION );
+ return "Real provider: " + provider.getClass().getName() +
+ ". Attachment cache hits: " + attCacheInfo.getHits() +
+ ". Attachment cache misses: " + attCacheInfo.getMisses() +
+ ". Attachment collection cache hits: " + attColCacheInfo.getHits() +
+ ". Attachment collection cache misses: " + attColCacheInfo.getMisses();
+ });
+
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/providers/CachingProvider.java b/jspwiki-main/src/main/java/org/apache/wiki/providers/CachingProvider.java
index 5b1260f468..3aef37e446 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/providers/CachingProvider.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/providers/CachingProvider.java
@@ -35,6 +35,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.parser.MarkupParser;
import org.apache.wiki.render.RenderingManager;
import org.apache.wiki.util.ClassUtil;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import java.io.IOException;
@@ -45,6 +46,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Properties;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -70,12 +72,22 @@ public class CachingProvider implements PageProvider {
private boolean allRequested;
private final AtomicLong pages = new AtomicLong( 0L );
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* {@inheritDoc}
*/
@Override
public void initialize( final Engine engine, final Properties properties ) throws NoRequiredPropertyException, IOException {
- LOG.debug( "Initing CachingProvider" );
+ LOG.debug( "Initializing CachingProvider" );
// engine is used for getting the search engine
this.engine = engine;
@@ -220,7 +232,7 @@ private String getTextFromCache( final String pageName ) throws ProviderExceptio
*/
@Override
public void putPageText( final Page page, final String text ) throws ProviderException {
- synchronized( this ) {
+ Synchronizer.synchronize(lock, () -> {
provider.putPageText( page, text );
page.setLastModified( new Date() );
@@ -230,7 +242,7 @@ public void putPageText( final Page page, final String text ) throws ProviderExc
cachingManager.remove( CachingManager.CACHE_PAGES_HISTORY, page.getName() );
getPageInfoFromCache( page.getName() );
- }
+ });
pages.incrementAndGet();
}
@@ -243,12 +255,12 @@ public Collection< Page > getAllPages() throws ProviderException {
if ( !allRequested ) {
all = provider.getAllPages();
// Make sure that all pages are in the cache.
- synchronized( this ) {
+ Synchronizer.synchronize(lock, () -> {
for( final Page p : all ) {
cachingManager.put( CachingManager.CACHE_PAGES, p.getName(), p );
}
allRequested = true;
- }
+ });
pages.set( all.size() );
} else {
final List< String > keys = cachingManager.keys( CachingManager.CACHE_PAGES );
@@ -349,14 +361,16 @@ public List< Page > getVersionHistory( final String pageName) throws ProviderExc
* @return A plain string with all the above-mentioned values.
*/
@Override
- public synchronized String getProviderInfo() {
- final CacheInfo pageCacheInfo = cachingManager.info( CachingManager.CACHE_PAGES );
- final CacheInfo pageHistoryCacheInfo = cachingManager.info( CachingManager.CACHE_PAGES_HISTORY );
- return "Real provider: " + provider.getClass().getName()+
- ". Page cache hits: " + pageCacheInfo.getHits() +
- ". Page cache misses: " + pageCacheInfo.getMisses() +
- ". History cache hits: " + pageHistoryCacheInfo.getHits() +
- ". History cache misses: " + pageHistoryCacheInfo.getMisses();
+ public String getProviderInfo() {
+ return Synchronizer.synchronize(lock, () -> {
+ final CacheInfo pageCacheInfo = cachingManager.info( CachingManager.CACHE_PAGES );
+ final CacheInfo pageHistoryCacheInfo = cachingManager.info( CachingManager.CACHE_PAGES_HISTORY );
+ return "Real provider: " + provider.getClass().getName()+
+ ". Page cache hits: " + pageCacheInfo.getHits() +
+ ". Page cache misses: " + pageCacheInfo.getMisses() +
+ ". History cache hits: " + pageHistoryCacheInfo.getHits() +
+ ". History cache misses: " + pageHistoryCacheInfo.getMisses();
+ });
}
/**
@@ -365,7 +379,7 @@ public synchronized String getProviderInfo() {
@Override
public void deleteVersion( final String pageName, final int version ) throws ProviderException {
// Luckily, this is such a rare operation it is okay to synchronize against the whole thing.
- synchronized( this ) {
+ Synchronizer.synchronize(lock, (Synchronizer.ThrowingRunnable) () -> {
final Page cached = getPageInfoFromCache( pageName );
final int latestcached = ( cached != null ) ? cached.getVersion() : Integer.MIN_VALUE;
@@ -377,7 +391,7 @@ public void deleteVersion( final String pageName, final int version ) throws Pro
provider.deleteVersion( pageName, version );
cachingManager.remove( CachingManager.CACHE_PAGES_HISTORY, pageName );
- }
+ });
if( version == PageProvider.LATEST_VERSION ) {
pages.decrementAndGet();
}
@@ -389,12 +403,12 @@ public void deleteVersion( final String pageName, final int version ) throws Pro
@Override
public void deletePage( final String pageName ) throws ProviderException {
// See note in deleteVersion().
- synchronized( this ) {
+ Synchronizer.synchronize(lock, () -> {
cachingManager.put( CachingManager.CACHE_PAGES, pageName, null );
cachingManager.put( CachingManager.CACHE_PAGES_TEXT, pageName, null );
cachingManager.put( CachingManager.CACHE_PAGES_HISTORY, pageName, null );
provider.deletePage( pageName );
- }
+ });
pages.decrementAndGet();
}
@@ -404,7 +418,7 @@ public void deletePage( final String pageName ) throws ProviderException {
@Override
public void movePage( final String from, final String to ) throws ProviderException {
provider.movePage( from, to );
- synchronized( this ) {
+ Synchronizer.synchronize(lock, () -> {
// Clear any cached version of the old page and new page
cachingManager.remove( CachingManager.CACHE_PAGES, from );
cachingManager.remove( CachingManager.CACHE_PAGES_TEXT, from );
@@ -413,7 +427,7 @@ public void movePage( final String from, final String to ) throws ProviderExcept
cachingManager.remove( CachingManager.CACHE_PAGES, to );
cachingManager.remove( CachingManager.CACHE_PAGES_TEXT, to );
cachingManager.remove( CachingManager.CACHE_PAGES_HISTORY, to );
- }
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/providers/VersioningFileProvider.java b/jspwiki-main/src/main/java/org/apache/wiki/providers/VersioningFileProvider.java
index 35e70e2d6d..e925e0c5f2 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/providers/VersioningFileProvider.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/providers/VersioningFileProvider.java
@@ -29,6 +29,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.api.providers.WikiProvider;
import org.apache.wiki.api.spi.Wiki;
import org.apache.wiki.util.FileUtil;
+import org.apache.wiki.util.Synchronizer;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
@@ -42,6 +43,8 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Date;
import java.util.List;
import java.util.Properties;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
/**
* Provides a simple directory based repository for Wiki pages.
@@ -60,7 +63,7 @@ Licensed to the Apache Software Foundation (ASF) under one
*
* In this case, "Main" has three versions, and "Foobar" just one version.
*
- * The properties file contains the necessary metainformation (such as author)
+ * The properties file contains the necessary meta information (such as author)
* information of the page. DO NOT MESS WITH IT!
*
*
@@ -81,13 +84,23 @@ public class VersioningFileProvider extends AbstractFileProvider {
private CachedProperties m_cachedProperties;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* {@inheritDoc}
*/
@Override
public void initialize( final Engine engine, final Properties properties ) throws NoRequiredPropertyException, IOException {
super.initialize( engine, properties );
- // some additional sanity checks :
+ // some additional sanity checks:
final File oldpages = new File( getPageDirectory(), PAGEDIR );
if( !oldpages.exists() ) {
if( !oldpages.mkdirs() ) {
@@ -268,21 +281,29 @@ private int realVersion( final String page, final int requestedVersion ) throws
* {@inheritDoc}
*/
@Override
- public synchronized String getPageText( final String page, int version ) throws ProviderException {
- final File dir = findOldPageDir( page );
+ public String getPageText(final String page, int version) throws ProviderException {
+ final AtomicReference result = new AtomicReference<>();
+ final int finalVersion = version; // Make it effectively final for use in lambda
+
+ Synchronizer.synchronize(lock, () -> {
+ final File dir = findOldPageDir(page);
+ int realVersion = realVersion(page, finalVersion);
+
+ if (realVersion == -1) {
+ // We can let the FileSystemProvider take care of these requests.
+ result.set(super.getPageText(page, PageProvider.LATEST_VERSION));
+ return;
+ }
- version = realVersion( page, version );
- if( version == -1 ) {
- // We can let the FileSystemProvider take care of these requests.
- return super.getPageText( page, PageProvider.LATEST_VERSION );
- }
+ final File pageFile = new File(dir, "" + realVersion + FILE_EXT);
+ if (!pageFile.exists()) {
+ throw new NoSuchVersionException("Version " + realVersion + " does not exist.");
+ }
- final File pageFile = new File( dir, ""+version+FILE_EXT );
- if( !pageFile.exists() ) {
- throw new NoSuchVersionException("Version "+version+"does not exist.");
- }
+ result.set(readFile(pageFile));
+ });
- return readFile( pageFile );
+ return result.get();
}
@@ -325,76 +346,79 @@ private String readFile( final File pagedata ) throws ProviderException {
* {@inheritDoc}
*/
@Override
- public synchronized void putPageText( final Page page, final String text ) throws ProviderException {
- // This is a bit complicated. We'll first need to copy the old file to be the newest file.
- final int latest = findLatestVersion( page.getName() );
- final File pageDir = findOldPageDir( page.getName() );
- if( !pageDir.exists() ) {
- pageDir.mkdirs();
- }
-
- try {
- // Copy old data to safety, if one exists.
- final File oldFile = findPage( page.getName() );
-
- // Figure out which version should the old page be? Numbers should always start at 1.
- // "most recent" = -1 ==> 1
- // "first" = 1 ==> 2
- int versionNumber = (latest > 0) ? latest : 1;
- final boolean firstUpdate = (versionNumber == 1);
-
- if( oldFile != null && oldFile.exists() ) {
- final File pageFile = new File( pageDir, versionNumber + FILE_EXT );
- try( final InputStream in = new BufferedInputStream( Files.newInputStream( oldFile.toPath() ) );
- final OutputStream out = new BufferedOutputStream( Files.newOutputStream( pageFile.toPath() ) ) ) {
- FileUtil.copyContents( in, out );
-
- // We need also to set the date, since we rely on this.
- pageFile.setLastModified( oldFile.lastModified() );
+ public void putPageText( final Page page, final String text ) throws ProviderException {
+ Synchronizer.synchronize(lock, () -> {
+ // This is a bit complicated. We'll first need to copy the old file to be the newest file.
+ final int latest = findLatestVersion( page.getName() );
+ final File pageDir = findOldPageDir( page.getName() );
+ if( !pageDir.exists() ) {
+ pageDir.mkdirs();
+ }
- // Kludge to make the property code to work properly.
- versionNumber++;
+ try {
+ // Copy old data to safety, if one exists.
+ final File oldFile = findPage( page.getName() );
+
+ // Figure out which version should the old page be? Numbers should always start at 1.
+ // "most recent" = -1 ==> 1
+ // "first" = 1 ==> 2
+ int versionNumber = (latest > 0) ? latest : 1;
+ final boolean firstUpdate = (versionNumber == 1);
+
+ if( oldFile != null && oldFile.exists() ) {
+ final File pageFile = new File( pageDir, versionNumber + FILE_EXT );
+ try( final InputStream in = new BufferedInputStream( Files.newInputStream( oldFile.toPath() ) );
+ final OutputStream out = new BufferedOutputStream( Files.newOutputStream( pageFile.toPath() ) ) ) {
+ FileUtil.copyContents( in, out );
+
+ // We need also to set the date, since we rely on this.
+ pageFile.setLastModified( oldFile.lastModified() );
+
+ // Kludge to make the property code to work properly.
+ versionNumber++;
+ }
}
- }
- // Let superclass handler writing data to a new version.
- super.putPageText( page, text );
+ // Let superclass handler writing data to a new version.
+ super.putPageText( page, text );
- // Finally, write page version data.
- // FIXME: No rollback available.
- final Properties props = getPageProperties( page.getName() );
+ // Finally, write page version data.
+ // FIXME: No rollback available.
+ final Properties props = getPageProperties( page.getName() );
- String authorFirst = null;
- // if the following file exists, we are NOT migrating from FileSystemProvider
- final File pagePropFile = new File(getPageDirectory() + File.separator + PAGEDIR + File.separator + mangleName(page.getName()) + File.separator + "page" + FileSystemProvider.PROP_EXT);
- if( firstUpdate && ! pagePropFile.exists() ) {
- // we might not yet have a versioned author because the old page was last maintained by FileSystemProvider
- final Properties props2 = getHeritagePageProperties( page.getName() );
+ String authorFirst = null;
+ // if the following file exists, we are NOT migrating from FileSystemProvider
+ final File pagePropFile = new File(getPageDirectory() + File.separator + PAGEDIR + File.separator + mangleName(page.getName()) + File.separator + "page" + FileSystemProvider.PROP_EXT);
+ if( firstUpdate && ! pagePropFile.exists() ) {
+ // we might not yet have a versioned author because the old page was last maintained by FileSystemProvider
+ final Properties props2 = getHeritagePageProperties( page.getName() );
- // remember the simulated original author (or something) in the new properties
- authorFirst = props2.getProperty( "1.author", "unknown" );
- props.setProperty( "1.author", authorFirst );
- }
+ // remember the simulated original author (or something) in the new properties
+ authorFirst = props2.getProperty( "1.author", "unknown" );
+ props.setProperty( "1.author", authorFirst );
+ }
- String newAuthor = page.getAuthor();
- if ( newAuthor == null ) {
- newAuthor = ( authorFirst != null ) ? authorFirst : "unknown";
- }
- page.setAuthor(newAuthor);
- props.setProperty( versionNumber + ".author", newAuthor );
+ String newAuthor = page.getAuthor();
+ if ( newAuthor == null ) {
+ newAuthor = ( authorFirst != null ) ? authorFirst : "unknown";
+ }
+ page.setAuthor(newAuthor);
+ props.setProperty( versionNumber + ".author", newAuthor );
+
+ final String changeNote = page.getAttribute( Page.CHANGENOTE );
+ if( changeNote != null ) {
+ props.setProperty( versionNumber + ".changenote", changeNote );
+ }
- final String changeNote = page.getAttribute( Page.CHANGENOTE );
- if( changeNote != null ) {
- props.setProperty( versionNumber + ".changenote", changeNote );
+ // Get additional custom properties from page and add to props
+ getCustomProperties( page, props );
+ putPageProperties( page.getName(), props );
+ } catch( final IOException e ) {
+ LOG.error( "Saving failed", e );
+ throw new ProviderException("Could not save page text: "+e.getMessage());
}
+ });
- // Get additional custom properties from page and add to props
- getCustomProperties( page, props );
- putPageProperties( page.getName(), props );
- } catch( final IOException e ) {
- LOG.error( "Saving failed", e );
- throw new ProviderException("Could not save page text: "+e.getMessage());
- }
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/references/DefaultReferenceManager.java b/jspwiki-main/src/main/java/org/apache/wiki/references/DefaultReferenceManager.java
index 329672d744..84c582dd35 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/references/DefaultReferenceManager.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/references/DefaultReferenceManager.java
@@ -38,6 +38,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.event.WikiPageEvent;
import org.apache.wiki.pages.PageManager;
import org.apache.wiki.render.RenderingManager;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import java.io.*;
@@ -47,6 +48,9 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.security.NoSuchAlgorithmException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
/*
@@ -132,6 +136,16 @@ public class DefaultReferenceManager extends BasePageFilter implements Reference
/** We use this also a generic serialization id */
private static final long serialVersionUID = 4L;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* Builds a new ReferenceManager.
*
@@ -143,9 +157,7 @@ public DefaultReferenceManager( final Engine engine ) {
m_engine = engine;
m_matchEnglishPlurals = TextUtil.getBooleanProperty( engine.getWikiProperties(), Engine.PROP_MATCHPLURALS, false );
- //
// Create two maps that contain unmutable versions of the two basic maps.
- //
m_unmutableReferredBy = Collections.unmodifiableMap( m_referredBy );
m_unmutableRefersTo = Collections.unmodifiableMap( m_refersTo );
}
@@ -233,54 +245,62 @@ public void initialize( final Collection< Page > pages ) throws ProviderExceptio
* Reads the serialized data from the disk back to memory. Returns the date when the data was last written on disk
*/
@SuppressWarnings("unchecked")
- private synchronized long unserializeFromDisk() throws IOException, ClassNotFoundException {
- final long saved;
-
- final File f = new File( m_engine.getWorkDir(), SERIALIZATION_FILE );
- try( final ObjectInputStream in = new ObjectInputStream( new BufferedInputStream( Files.newInputStream( f.toPath() ) ) ) ) {
- final StopWatch sw = new StopWatch();
- sw.start();
+ private long unserializeFromDisk() throws IOException, ClassNotFoundException {
+ final AtomicLong saved = new AtomicLong(0);
+ Synchronizer.synchronize(lock, () -> {
+ try {
+ final File f = new File(m_engine.getWorkDir(), SERIALIZATION_FILE);
+ try (final ObjectInputStream in = new ObjectInputStream(new BufferedInputStream(Files.newInputStream(f.toPath())))) {
+ final StopWatch sw = new StopWatch();
+ sw.start();
- final long ver = in.readLong();
+ final long ver = in.readLong();
- if( ver != serialVersionUID ) {
- throw new IOException("File format has changed; I need to recalculate references.");
- }
+ if (ver != serialVersionUID) {
+ throw new IOException("File format has changed; I need to recalculate references.");
+ }
- saved = in.readLong();
- m_refersTo = ( Map< String, Collection< String > > ) in.readObject();
- m_referredBy = ( Map< String, Set< String > > ) in.readObject();
+ saved.set(in.readLong());
+ m_refersTo = (Map>) in.readObject();
+ m_referredBy = (Map>) in.readObject();
- m_unmutableReferredBy = Collections.unmodifiableMap( m_referredBy );
- m_unmutableRefersTo = Collections.unmodifiableMap( m_refersTo );
+ m_unmutableReferredBy = Collections.unmodifiableMap(m_referredBy);
+ m_unmutableRefersTo = Collections.unmodifiableMap(m_refersTo);
- sw.stop();
- LOG.debug( "Read serialized data successfully in {}", sw );
- }
+ sw.stop();
+ LOG.debug("Read serialized data successfully in {}", sw);
+ }
+ } catch (IOException | ClassNotFoundException e) {
+ throw new RuntimeException(e);
+ }
+ });
- return saved;
+ return saved.get();
}
+
/**
* Serializes hashmaps to disk. The format is private, don't touch it.
*/
- private synchronized void serializeToDisk() {
- final File f = new File( m_engine.getWorkDir(), SERIALIZATION_FILE );
- try( final ObjectOutputStream out = new ObjectOutputStream( new BufferedOutputStream( Files.newOutputStream( f.toPath() ) ) ) ) {
- final StopWatch sw = new StopWatch();
- sw.start();
+ private void serializeToDisk() {
+ Synchronizer.synchronize(lock, () -> {
+ final File f = new File(m_engine.getWorkDir(), SERIALIZATION_FILE);
+ try (final ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(Files.newOutputStream(f.toPath())))) {
+ final StopWatch sw = new StopWatch();
+ sw.start();
- out.writeLong( serialVersionUID );
- out.writeLong( System.currentTimeMillis() ); // Timestamp
- out.writeObject( m_refersTo );
- out.writeObject( m_referredBy );
+ out.writeLong(serialVersionUID);
+ out.writeLong(System.currentTimeMillis()); // Timestamp
+ out.writeObject(m_refersTo);
+ out.writeObject(m_referredBy);
- sw.stop();
+ sw.stop();
- LOG.debug( "serialization done - took {}", sw );
- } catch( final IOException ioe ) {
- LOG.error( "Unable to serialize!", ioe );
- }
+ LOG.debug("serialization done - took {}", sw);
+ } catch (final IOException ioe) {
+ LOG.error("Unable to serialize!", ioe);
+ }
+ });
}
private String getHashFileName( final String pageName ) {
@@ -301,101 +321,117 @@ private String getHashFileName( final String pageName ) {
/**
* Reads the serialized data from the disk back to memory. Returns the date when the data was last written on disk
*/
- private synchronized long unserializeAttrsFromDisk( final Page p ) throws IOException, ClassNotFoundException {
- long saved = 0L;
+ private long unserializeAttrsFromDisk(final Page p) throws IOException, ClassNotFoundException {
+ final AtomicLong saved = new AtomicLong(0L);
+ final AtomicReference ioExceptionRef = new AtomicReference<>();
+ final AtomicReference classNotFoundExceptionRef = new AtomicReference<>();
- // Find attribute cache, and check if it exists
- final String hashName = getHashFileName( p.getName() );
- if( hashName != null ) {
- File f = new File( m_engine.getWorkDir(), SERIALIZATION_DIR );
- f = new File( f, hashName );
- if( !f.exists() ) {
- return 0L;
- }
-
- try( final ObjectInputStream in = new ObjectInputStream( new BufferedInputStream( Files.newInputStream( f.toPath() ) ) ) ) {
- final StopWatch sw = new StopWatch();
- sw.start();
- LOG.debug( "Deserializing attributes for {}", p.getName() );
+ Synchronizer.synchronize(lock, () -> {
+ try {
+ long localSaved = 0L;
+
+ // Find attribute cache, and check if it exists
+ final String hashName = getHashFileName(p.getName());
+ if (hashName != null) {
+ File f = new File(m_engine.getWorkDir(), SERIALIZATION_DIR);
+ f = new File(f, hashName);
+ if (!f.exists()) {
+ return;
+ }
- final long ver = in.readLong();
- if( ver != serialVersionUID ) {
- LOG.debug( "File format has changed; cannot deserialize." );
- return 0L;
+ try (final ObjectInputStream in = new ObjectInputStream(new BufferedInputStream(Files.newInputStream(f.toPath())))) {
+ // ... existing code here
+ final long ver = in.readLong();
+ if (ver != serialVersionUID) {
+ // Log or handle as needed
+ return;
+ }
+
+ localSaved = in.readLong();
+ final String name = in.readUTF();
+ if (!name.equals(p.getName())) {
+ // Log or handle as needed
+ return;
+ }
+
+ final long entries = in.readLong();
+ for (int i = 0; i < entries; i++) {
+ final String key = in.readUTF();
+ final Object value = in.readObject();
+ p.setAttribute(key, value);
+ // Log or handle as needed
+ }
+ }
}
- saved = in.readLong();
- final String name = in.readUTF();
- if( !name.equals( p.getName() ) ) {
- LOG.debug( "File name does not match ({}), skipping...", name );
- return 0L; // Not here
- }
+ saved.set(localSaved);
+ } catch (IOException e) {
+ ioExceptionRef.set(e);
+ } catch (ClassNotFoundException e) {
+ classNotFoundExceptionRef.set(e);
+ }
+ });
- final long entries = in.readLong();
- for( int i = 0; i < entries; i++ ) {
- final String key = in.readUTF();
- final Object value = in.readObject();
- p.setAttribute( key, value );
- LOG.debug( " attr: {}={}", key, value );
- }
+ if (ioExceptionRef.get() != null) {
+ throw ioExceptionRef.get();
+ }
- sw.stop();
- LOG.debug( "Read serialized data for {} successfully in {}", name, sw );
- p.setHasMetadata();
- }
+ if (classNotFoundExceptionRef.get() != null) {
+ throw classNotFoundExceptionRef.get();
}
- return saved;
+ return saved.get();
}
/**
* Serializes hashmaps to disk. The format is private, don't touch it.
*/
- private synchronized void serializeAttrsToDisk( final Page p ) {
- final StopWatch sw = new StopWatch();
- sw.start();
+ private void serializeAttrsToDisk( final Page p ) {
+ Synchronizer.synchronize(lock, () -> {
+ final StopWatch sw = new StopWatch();
+ sw.start();
- final String hashName = getHashFileName( p.getName() );
- if( hashName != null ) {
- File f = new File( m_engine.getWorkDir(), SERIALIZATION_DIR );
- if( !f.exists() ) {
- f.mkdirs();
- }
+ final String hashName = getHashFileName( p.getName() );
+ if( hashName != null ) {
+ File f = new File( m_engine.getWorkDir(), SERIALIZATION_DIR );
+ if( !f.exists() ) {
+ f.mkdirs();
+ }
- // Create a digest for the name
- f = new File( f, hashName );
+ // Create a digest for the name
+ f = new File( f, hashName );
- try( final ObjectOutputStream out = new ObjectOutputStream( new BufferedOutputStream( Files.newOutputStream( f.toPath() ) ) ) ) {
- // new Set to avoid concurrency issues
- final Set< Map.Entry < String, Object > > entries = new HashSet<>( p.getAttributes().entrySet() );
+ try( final ObjectOutputStream out = new ObjectOutputStream( new BufferedOutputStream( Files.newOutputStream( f.toPath() ) ) ) ) {
+ // new Set to avoid concurrency issues
+ final Set< Map.Entry < String, Object > > entries = new HashSet<>( p.getAttributes().entrySet() );
- if(entries.isEmpty()) {
- // Nothing to serialize, therefore we will just simply remove the serialization file so that the
- // next time we boot, we don't deserialize old data.
- f.delete();
- return;
- }
+ if(entries.isEmpty()) {
+ // Nothing to serialize, therefore we will just simply remove the serialization file so that the
+ // next time we boot, we don't deserialize old data.
+ f.delete();
+ return;
+ }
- out.writeLong( serialVersionUID );
- out.writeLong( System.currentTimeMillis() ); // Timestamp
- out.writeUTF( p.getName() );
- out.writeLong( entries.size() );
+ out.writeLong( serialVersionUID );
+ out.writeLong( System.currentTimeMillis() ); // Timestamp
+ out.writeUTF( p.getName() );
+ out.writeLong( entries.size() );
- for( final Map.Entry< String, Object > e : entries ) {
- if( e.getValue() instanceof Serializable ) {
- out.writeUTF( e.getKey() );
- out.writeObject( e.getValue() );
+ for( final Map.Entry< String, Object > e : entries ) {
+ if( e.getValue() instanceof Serializable ) {
+ out.writeUTF( e.getKey() );
+ out.writeObject( e.getValue() );
+ }
}
- }
- } catch( final IOException e ) {
- LOG.error( "Unable to serialize!", e );
- } finally {
- sw.stop();
- LOG.debug( "serialization for {} done - took {}", p.getName(), sw );
+ } catch( final IOException e ) {
+ LOG.error( "Unable to serialize!", e );
+ } finally {
+ sw.stop();
+ LOG.debug( "serialization for {} done - took {}", p.getName(), sw );
+ }
}
- }
-
+ });
}
/**
@@ -571,13 +607,10 @@ protected Map< String, Set< String > > getReferredBy() {
/**
* Cleans the 'referred by' list, removing references by 'referrer' to any other page. Called after 'referrer' is removed.
- *
* Two ways to go about this. One is to look up all pages previously referred by referrer and remove referrer
* from their lists, and let the update put them back in (except possibly removed ones).
- *
* The other is to get the old referred-to list, compare to the new, and tell the ones missing in the latter to remove referrer from
* their list.
- *
* We'll just try the first for now. Need to come back and optimize this a bit.
*/
private void cleanReferredBy( final String referrer,
@@ -810,10 +843,9 @@ public Collection< String > findRefersTo( final String pageName ) {
* This 'deepHashCode' can be used to determine if there were any modifications made to the underlying to and by maps of the
* ReferenceManager. The maps of the ReferenceManager are not synchronized, so someone could add/remove entries in them while the
* hashCode is being computed.
- *
* This method traps and retries if a concurrent modification occurs.
*
- * @return Sum of the hashCodes for the to and by maps of the ReferenceManager
+ * @return Sum of the hashCodes for the 'to' and 'by' maps of the ReferenceManager
* @since 2.3.24
*/
//
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/rss/DefaultRSSGenerator.java b/jspwiki-main/src/main/java/org/apache/wiki/rss/DefaultRSSGenerator.java
index 20cf0d366b..2f1d65c7ec 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/rss/DefaultRSSGenerator.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/rss/DefaultRSSGenerator.java
@@ -34,6 +34,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.pages.PageManager;
import org.apache.wiki.pages.PageTimeComparator;
import org.apache.wiki.render.RenderingManager;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import org.apache.wiki.variables.VariableManager;
@@ -43,11 +44,11 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
+import java.util.concurrent.locks.ReentrantLock;
/**
* Default implementation for {@link RSSGenerator}.
- *
* {@inheritDoc}
*/
// FIXME: Limit diff and page content size.
@@ -64,6 +65,16 @@ public class DefaultRSSGenerator implements RSSGenerator {
private static final int MAX_CHARACTERS = Integer.MAX_VALUE-1;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* Builds the RSS generator for a given Engine.
*
@@ -200,14 +211,16 @@ public String generateFeed( final Context wikiContext, final List< Page > change
/** {@inheritDoc} */
@Override
- public synchronized boolean isEnabled() {
- return m_enabled;
+ public boolean isEnabled() {
+ return Synchronizer.synchronize(lock, () -> m_enabled);
}
/** {@inheritDoc} */
@Override
- public synchronized void setEnabled( final boolean enabled ) {
- m_enabled = enabled;
+ public void setEnabled( final boolean enabled ) {
+ Synchronizer.synchronize(lock, () -> {
+ m_enabled = enabled;
+ });
}
/** {@inheritDoc} */
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/search/LuceneSearchProvider.java b/jspwiki-main/src/main/java/org/apache/wiki/search/LuceneSearchProvider.java
index 9593712018..322d64132d 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/search/LuceneSearchProvider.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/search/LuceneSearchProvider.java
@@ -67,6 +67,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.pages.PageManager;
import org.apache.wiki.util.ClassUtil;
import org.apache.wiki.util.FileUtil;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import java.io.File;
@@ -84,6 +85,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Properties;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
@@ -133,6 +135,16 @@ public class LuceneSearchProvider implements SearchProvider {
private static final String PUNCTUATION_TO_SPACES = StringUtils.repeat( " ", TextUtil.PUNCTUATION_CHARS_ALLOWED.length() );
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/** {@inheritDoc} */
@Override
public void initialize( final Engine engine, final Properties props ) throws NoRequiredPropertyException, IOException {
@@ -303,23 +315,25 @@ protected String getAttachmentContent( final Attachment att ) {
* @param page The WikiPage to check
* @param text The page text to index.
*/
- protected synchronized void updateLuceneIndex( final Page page, final String text ) {
- LOG.debug( "Updating Lucene index for page '{}'...", page.getName() );
- pageRemoved( page );
-
- // Now add back the new version.
- try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() );
- final IndexWriter writer = getIndexWriter( luceneDir ) ) {
- luceneIndexPage( page, text, writer );
- } catch( final IOException e ) {
- LOG.error( "Unable to update page '{}' from Lucene index", page.getName(), e );
- // reindexPage( page );
- } catch( final Exception e ) {
- LOG.error( "Unexpected Lucene exception - please check configuration!", e );
- // reindexPage( page );
- }
+ protected void updateLuceneIndex( final Page page, final String text ) {
+ Synchronizer.synchronize(lock, () -> {
+ LOG.debug( "Updating Lucene index for page '{}'...", page.getName() );
+ pageRemoved( page );
+
+ // Now add back the new version.
+ try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() );
+ final IndexWriter writer = getIndexWriter( luceneDir ) ) {
+ luceneIndexPage( page, text, writer );
+ } catch( final IOException e ) {
+ LOG.error( "Unable to update page '{}' from Lucene index", page.getName(), e );
+ // reindexPage( page );
+ } catch( final Exception e ) {
+ LOG.error( "Unexpected Lucene exception - please check configuration!", e );
+ // reindexPage( page );
+ }
- LOG.debug( "Done updating Lucene index for page '{}'.", page.getName() );
+ LOG.debug( "Done updating Lucene index for page '{}'.", page.getName() );
+ });
}
private Analyzer getLuceneAnalyzer() throws ProviderException {
@@ -389,9 +403,7 @@ protected Document luceneIndexPage( final Page page, final String text, final In
field = new Field( LUCENE_PAGE_KEYWORDS, page.getAttribute( "keywords" ).toString(), TextField.TYPE_STORED );
doc.add( field );
}
- synchronized( writer ) {
- writer.addDocument( doc );
- }
+ writer.addDocument( doc );
return doc;
}
@@ -400,7 +412,7 @@ protected Document luceneIndexPage( final Page page, final String text, final In
* {@inheritDoc}
*/
@Override
- public synchronized void pageRemoved( final Page page ) {
+ public void pageRemoved( final Page page ) {
try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() );
final IndexWriter writer = getIndexWriter( luceneDir ) ) {
final Query query = new TermQuery( new Term( LUCENE_ID, page.getName() ) );
@@ -540,6 +552,16 @@ private static final class LuceneUpdater extends WikiBackgroundThread {
private WatchDog m_watchdog;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private static final ReentrantLock lock = new ReentrantLock();
+
private LuceneUpdater( final Engine engine, final LuceneSearchProvider provider, final int initialDelay, final int indexDelay ) {
super( engine, indexDelay );
m_provider = provider;
@@ -567,15 +589,15 @@ public void startupTask() throws Exception {
@Override
public void backgroundTask() {
m_watchdog.enterState( "Emptying index queue", 60 );
-
- synchronized( m_provider.m_updates ) {
- while(!m_provider.m_updates.isEmpty()) {
+ Synchronizer.synchronize(lock, () -> {
+ while( !m_provider.m_updates.isEmpty() ) {
final Object[] pair = m_provider.m_updates.remove( 0 );
final Page page = ( Page )pair[ 0 ];
final String text = ( String )pair[ 1 ];
m_provider.updateLuceneIndex( page, text );
}
- }
+ });
+
m_watchdog.exitState();
}
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/ui/admin/beans/SearchManagerBean.java b/jspwiki-main/src/main/java/org/apache/wiki/ui/admin/beans/SearchManagerBean.java
index e85c86af46..59f1186d8b 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/ui/admin/beans/SearchManagerBean.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/ui/admin/beans/SearchManagerBean.java
@@ -27,9 +27,11 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.ui.admin.SimpleAdminBean;
import org.apache.wiki.ui.progress.ProgressItem;
import org.apache.wiki.ui.progress.ProgressManager;
+import org.apache.wiki.util.Synchronizer;
import javax.management.NotCompliantMBeanException;
import java.util.Collection;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -46,6 +48,16 @@ public class SearchManagerBean extends SimpleAdminBean {
private WikiBackgroundThread m_updater;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
public SearchManagerBean( final Engine engine ) throws NotCompliantMBeanException {
super();
initialize( engine );
@@ -74,50 +86,52 @@ public String getTitle()
*
* This method prevents itself from being called twice.
*/
- public synchronized void reload() {
- if( m_updater == null ) {
- m_updater = new WikiBackgroundThread( m_engine, 0 ) {
+ public void reload() {
+ Synchronizer.synchronize(lock, () -> {
+ if( m_updater == null ) {
+ m_updater = new WikiBackgroundThread( m_engine, 0 ) {
- int m_count;
- int m_max;
+ int m_count;
+ int m_max;
- @Override
- public void startupTask() throws Exception {
- super.startupTask();
+ @Override
+ public void startupTask() throws Exception {
+ super.startupTask();
- setName( "Reindexer started" );
- }
+ setName( "Reindexer started" );
+ }
- @Override
- public void backgroundTask() throws Exception {
- final Collection< Page > allPages = m_engine.getManager( PageManager.class ).getAllPages();
+ @Override
+ public void backgroundTask() throws Exception {
+ final Collection< Page > allPages = m_engine.getManager( PageManager.class ).getAllPages();
- final SearchManager mgr = m_engine.getManager( SearchManager.class );
- m_max = allPages.size();
+ final SearchManager mgr = m_engine.getManager( SearchManager.class );
+ m_max = allPages.size();
- final ProgressItem pi = new ProgressItem() {
+ final ProgressItem pi = new ProgressItem() {
- @Override
- public int getProgress() {
- return 100 * m_count / m_max;
+ @Override
+ public int getProgress() {
+ return 100 * m_count / m_max;
+ }
+ };
+ m_engine.getManager( ProgressManager.class ).startProgress( pi, PROGRESS_ID );
+
+ for( final Page page : allPages ) {
+ mgr.reindexPage( page );
+ m_count++;
}
- };
- m_engine.getManager( ProgressManager.class ).startProgress( pi, PROGRESS_ID );
- for( final Page page : allPages ) {
- mgr.reindexPage( page );
- m_count++;
+ m_engine.getManager( ProgressManager.class ).stopProgress( PROGRESS_ID );
+ shutdown();
+ m_updater = null;
}
- m_engine.getManager( ProgressManager.class ).stopProgress( PROGRESS_ID );
- shutdown();
- m_updater = null;
- }
-
- };
+ };
- m_updater.start();
- }
+ m_updater.start();
+ }
+ });
}
@Override
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/workflow/AbstractStep.java b/jspwiki-main/src/main/java/org/apache/wiki/workflow/AbstractStep.java
index 3f42bebf6f..a617760035 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/workflow/AbstractStep.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/workflow/AbstractStep.java
@@ -20,10 +20,12 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.api.core.Context;
import org.apache.wiki.api.exceptions.WikiException;
+import org.apache.wiki.util.Synchronizer;
import java.io.Serializable;
import java.security.Principal;
import java.util.*;
+import java.util.concurrent.locks.ReentrantLock;
/**
* Abstract superclass that provides a complete implementation of most Step methods; subclasses need only implement {@link #execute(Context)} and
@@ -58,6 +60,16 @@ public abstract class AbstractStep implements Step {
private boolean m_started;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* Protected constructor that creates a new Step with a specified message key. After construction, the method
* {@link #setWorkflow(int, Map)} should be called.
@@ -146,8 +158,8 @@ public final String getMessageKey() {
* {@inheritDoc}
*/
@Override
- public final synchronized Outcome getOutcome() {
- return m_outcome;
+ public final Outcome getOutcome() {
+ return Synchronizer.synchronize(lock, () -> m_outcome);
}
/**
@@ -178,35 +190,40 @@ public final boolean isStarted() {
* {@inheritDoc}
*/
@Override
- public final synchronized void setOutcome(final Outcome outcome ) {
- // Is this an allowed Outcome?
- if( !m_successors.containsKey( outcome ) ) {
- if( !Outcome.STEP_CONTINUE.equals( outcome ) && !Outcome.STEP_ABORT.equals( outcome ) ) {
- throw new IllegalArgumentException( "Outcome " + outcome.getMessageKey() + " is not supported for this Step." );
+ public final void setOutcome(final Outcome outcome ) {
+ Synchronizer.synchronize(lock, () -> {
+ // Is this an allowed Outcome?
+ if( !m_successors.containsKey( outcome ) ) {
+ if( !Outcome.STEP_CONTINUE.equals( outcome ) && !Outcome.STEP_ABORT.equals( outcome ) ) {
+ throw new IllegalArgumentException( "Outcome " + outcome.getMessageKey() + " is not supported for this Step." );
+ }
}
- }
- // Is this a "completion" outcome?
- if( outcome.isCompletion() ) {
- if( m_completed ) {
- throw new IllegalStateException( "Step has already been marked complete; cannot set again." );
+ // Is this a "completion" outcome?
+ if( outcome.isCompletion() ) {
+ if( m_completed ) {
+ throw new IllegalStateException( "Step has already been marked complete; cannot set again." );
+ }
+ m_completed = true;
+ m_end = new Date( System.currentTimeMillis() );
}
- m_completed = true;
- m_end = new Date( System.currentTimeMillis() );
- }
- m_outcome = outcome;
+ m_outcome = outcome;
+ });
+
}
/**
* {@inheritDoc}
*/
@Override
- public final synchronized void start() throws WikiException {
- if( m_started ) {
- throw new IllegalStateException( "Step already started." );
- }
- m_started = true;
- m_start = new Date( System.currentTimeMillis() );
+ public final void start() throws WikiException {
+ Synchronizer.synchronize(lock, () -> {
+ if( m_started ) {
+ throw new IllegalStateException( "Step already started." );
+ }
+ m_started = true;
+ m_start = new Date( System.currentTimeMillis() );
+ });
}
/**
@@ -226,9 +243,11 @@ public final Step getSuccessor(final Outcome outcome ) {
* @param workflowContext the parent workflow context to set
*/
@Override
- public final synchronized void setWorkflow(final int workflowId, final Map< String, Serializable > workflowContext ) {
- this.workflowId = workflowId;
- this.workflowContext = workflowContext;
+ public final void setWorkflow(final int workflowId, final Map< String, Serializable > workflowContext ) {
+ Synchronizer.synchronize(lock, () -> {
+ this.workflowId = workflowId;
+ this.workflowContext = workflowContext;
+ });
}
public int getWorkflowId() {
@@ -244,8 +263,10 @@ public Map< String, Serializable > getWorkflowContext() {
*
* @param message the error message
*/
- protected final synchronized void addError( final String message ) {
- m_errors.add( message );
+ protected final void addError( final String message ) {
+ Synchronizer.synchronize(lock, () -> {
+ m_errors.add( message );
+ });
}
}
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/workflow/Decision.java b/jspwiki-main/src/main/java/org/apache/wiki/workflow/Decision.java
index 7f833ecc59..4d346e1ce8 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/workflow/Decision.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/workflow/Decision.java
@@ -22,6 +22,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.api.exceptions.WikiException;
import org.apache.wiki.event.WikiEventEmitter;
import org.apache.wiki.event.WorkflowEvent;
+import org.apache.wiki.util.Synchronizer;
import java.io.Serializable;
import java.security.Principal;
@@ -29,6 +30,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -62,6 +64,16 @@ public abstract class Decision extends AbstractStep {
private final List m_facts;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* Constructs a new Decision for a required "actor" Principal, having a default Outcome.
*
@@ -186,12 +198,14 @@ public boolean isReassignable() {
*
* @param actor the actor to reassign the Decision to
*/
- public final synchronized void reassign( final Principal actor ) {
- if( isReassignable() ) {
- m_actor = actor;
- } else {
- throw new IllegalArgumentException( "Decision cannot be reassigned." );
- }
+ public final void reassign( final Principal actor ) {
+ Synchronizer.synchronize(lock, () -> {
+ if( isReassignable() ) {
+ m_actor = actor;
+ } else {
+ throw new IllegalArgumentException( "Decision cannot be reassigned." );
+ }
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/workflow/DecisionQueue.java b/jspwiki-main/src/main/java/org/apache/wiki/workflow/DecisionQueue.java
index 83d18a3896..2847797b91 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/workflow/DecisionQueue.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/workflow/DecisionQueue.java
@@ -23,6 +23,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.api.exceptions.WikiException;
import org.apache.wiki.event.WikiEventEmitter;
import org.apache.wiki.event.WorkflowEvent;
+import org.apache.wiki.util.Synchronizer;
import java.io.Serializable;
import java.security.Principal;
@@ -30,6 +31,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Collection;
import java.util.LinkedList;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -45,6 +47,16 @@ public class DecisionQueue implements Serializable {
private final AtomicInteger next = new AtomicInteger( 1_000 );
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/** Constructs a new DecisionQueue. */
public DecisionQueue() {
}
@@ -54,9 +66,11 @@ public DecisionQueue() {
*
* @param decision the Decision to add
*/
- protected synchronized void add( final Decision decision ) {
- m_queue.addLast( decision );
- decision.setId( next.getAndIncrement() );
+ protected void add( final Decision decision ) {
+ Synchronizer.synchronize(lock, () -> {
+ m_queue.addLast( decision );
+ decision.setId( next.getAndIncrement() );
+ });
}
/**
@@ -74,8 +88,10 @@ protected Decision[] decisions() {
*
* @param decision the decision to remove
*/
- protected synchronized void remove( final Decision decision ) {
- m_queue.remove( decision );
+ protected void remove( final Decision decision ) {
+ Synchronizer.synchronize(lock, () -> {
+ m_queue.remove( decision );
+ });
}
/**
@@ -133,14 +149,16 @@ public void decide( final Decision decision, final Outcome outcome, final Contex
* @param owner the new owner
* @throws WikiException never
*/
- public synchronized void reassign( final Decision decision, final Principal owner ) throws WikiException {
- if( decision.isReassignable() ) {
- decision.reassign( owner );
+ public void reassign( final Decision decision, final Principal owner ) throws WikiException {
+ Synchronizer.synchronize(lock, () -> {
+ if( decision.isReassignable() ) {
+ decision.reassign( owner );
- WikiEventEmitter.fireWorkflowEvent( decision, WorkflowEvent.DQ_REASSIGN );
- return;
- }
- throw new IllegalStateException( "Reassignments not allowed for this decision." );
+ WikiEventEmitter.fireWorkflowEvent( decision, WorkflowEvent.DQ_REASSIGN );
+ return;
+ }
+ throw new IllegalStateException( "Reassignments not allowed for this decision." );
+ });
}
}
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/workflow/DefaultWorkflowManager.java b/jspwiki-main/src/main/java/org/apache/wiki/workflow/DefaultWorkflowManager.java
index 93307699be..49b666feba 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/workflow/DefaultWorkflowManager.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/workflow/DefaultWorkflowManager.java
@@ -31,6 +31,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.event.WikiEvent;
import org.apache.wiki.event.WikiEventEmitter;
import org.apache.wiki.event.WorkflowEvent;
+import org.apache.wiki.util.Synchronizer;
import org.apache.wiki.util.TextUtil;
import java.io.*;
@@ -39,6 +40,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -62,6 +64,16 @@ public class DefaultWorkflowManager implements WorkflowManager, Serializable {
private Engine m_engine;
private int retainCompleted;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* Constructs a new WorkflowManager, with an empty workflow cache.
*/
@@ -140,50 +152,54 @@ public void initialize( final Engine engine, final Properties props ) {
* @return the date when the data was last written on disk or {@code 0} if there has been problems reading from disk.
*/
@SuppressWarnings( "unchecked" )
- synchronized long unserializeFromDisk( final File f ) {
- long saved = 0L;
- final StopWatch sw = new StopWatch();
- sw.start();
- try( final ObjectInputStream in = new ObjectInputStream( new BufferedInputStream( Files.newInputStream( f.toPath() ) ) ) ) {
- final long ver = in.readLong();
- if( ver != serialVersionUID ) {
- LOG.warn( "File format has changed; Unable to recover workflows and decision queue from disk." );
- } else {
- saved = in.readLong();
- m_workflows = ( Set< Workflow > )in.readObject();
- m_queue = ( DecisionQueue )in.readObject();
- m_completed = new CircularFifoQueue<>( retainCompleted );
- m_completed.addAll( ( Collection< Workflow > )in.readObject() );
- LOG.debug( "Read serialized data successfully in " + sw );
+ long unserializeFromDisk( final File f ) {
+ return Synchronizer.synchronize(lock, () -> {
+ long saved = 0L;
+ final StopWatch sw = new StopWatch();
+ sw.start();
+ try( final ObjectInputStream in = new ObjectInputStream( new BufferedInputStream( Files.newInputStream( f.toPath() ) ) ) ) {
+ final long ver = in.readLong();
+ if( ver != serialVersionUID ) {
+ LOG.warn( "File format has changed; Unable to recover workflows and decision queue from disk." );
+ } else {
+ saved = in.readLong();
+ m_workflows = ( Set< Workflow > )in.readObject();
+ m_queue = ( DecisionQueue )in.readObject();
+ m_completed = new CircularFifoQueue<>( retainCompleted );
+ m_completed.addAll( ( Collection< Workflow > )in.readObject() );
+ LOG.debug( "Read serialized data successfully in " + sw );
+ }
+ } catch( final IOException | ClassNotFoundException e ) {
+ LOG.warn( "unable to recover from disk workflows and decision queue: " + e.getMessage() );
}
- } catch( final IOException | ClassNotFoundException e ) {
- LOG.warn( "unable to recover from disk workflows and decision queue: " + e.getMessage() );
- }
- sw.stop();
+ sw.stop();
- return saved;
+ return saved;
+ });
}
/**
* Serializes workflows and decisionqueue to disk. The format is private, don't touch it.
*/
- synchronized void serializeToDisk( final File f ) {
- try( final ObjectOutputStream out = new ObjectOutputStream( new BufferedOutputStream( Files.newOutputStream( f.toPath() ) ) ) ) {
- final StopWatch sw = new StopWatch();
- sw.start();
-
- out.writeLong( serialVersionUID );
- out.writeLong( System.currentTimeMillis() ); // Timestamp
- out.writeObject( m_workflows );
- out.writeObject( m_queue );
- out.writeObject( m_completed );
-
- sw.stop();
-
- LOG.debug( "serialization done - took " + sw );
- } catch( final IOException ioe ) {
- LOG.error( "Unable to serialize!", ioe );
- }
+ void serializeToDisk( final File f ) {
+ Synchronizer.synchronize(lock, () -> {
+ try( final ObjectOutputStream out = new ObjectOutputStream( new BufferedOutputStream( Files.newOutputStream( f.toPath() ) ) ) ) {
+ final StopWatch sw = new StopWatch();
+ sw.start();
+
+ out.writeLong( serialVersionUID );
+ out.writeLong( System.currentTimeMillis() ); // Timestamp
+ out.writeObject( m_workflows );
+ out.writeObject( m_queue );
+ out.writeObject( m_completed );
+
+ sw.stop();
+
+ LOG.debug( "serialization done - took " + sw );
+ } catch( final IOException ioe ) {
+ LOG.error( "Unable to serialize!", ioe );
+ }
+ });
}
/**
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/workflow/Task.java b/jspwiki-main/src/main/java/org/apache/wiki/workflow/Task.java
index 87a68400be..9552fac4f0 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/workflow/Task.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/workflow/Task.java
@@ -18,9 +18,12 @@ Licensed to the Apache Software Foundation (ASF) under one
*/
package org.apache.wiki.workflow;
+import org.apache.wiki.util.Synchronizer;
+
import java.io.Serializable;
import java.security.Principal;
import java.util.Map;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -38,6 +41,15 @@ public abstract class Task extends AbstractStep {
private static final long serialVersionUID = 4630293957752430807L;
private Step m_successor;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
/**
* Public constructor that creates a new Task with a specified message key. After construction, the protected method
@@ -81,8 +93,10 @@ public final Principal getActor() {
*
* @param step the successor
*/
- public final synchronized void setSuccessor( final Step step ) {
- m_successor = step;
+ public final void setSuccessor( final Step step ) {
+ Synchronizer.synchronize(lock, () -> {
+ m_successor = step;
+ });
}
/**
@@ -91,8 +105,8 @@ public final synchronized void setSuccessor( final Step step ) {
*
* @return the next step
*/
- public final synchronized Step getSuccessor() {
- return m_successor;
+ public final Step getSuccessor() {
+ return Synchronizer.synchronize(lock, () -> m_successor);
}
}
diff --git a/jspwiki-main/src/main/java/org/apache/wiki/workflow/Workflow.java b/jspwiki-main/src/main/java/org/apache/wiki/workflow/Workflow.java
index 4a607e020b..0d5739b283 100644
--- a/jspwiki-main/src/main/java/org/apache/wiki/workflow/Workflow.java
+++ b/jspwiki-main/src/main/java/org/apache/wiki/workflow/Workflow.java
@@ -22,12 +22,19 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.wiki.api.exceptions.WikiException;
import org.apache.wiki.event.WikiEventEmitter;
import org.apache.wiki.event.WorkflowEvent;
+import org.apache.wiki.util.Synchronizer;
import java.io.Serializable;
import java.security.Principal;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReentrantLock;
/**
@@ -205,6 +212,16 @@ public class Workflow implements Serializable {
private Step m_currentStep;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* Constructs a new Workflow object with a supplied message key, owner Principal, and undefined unique identifier {@link #ID_NOT_SET}.
* Once instantiated the Workflow is considered to be in the {@link #CREATED} state; a caller must explicitly invoke the
@@ -233,25 +250,27 @@ public Workflow( final String messageKey, final Principal owner ) {
* to completion, but it cannot be called twice. It finishes by calling the {@link #cleanup()} method to flush retained objects.
* If the Workflow had been previously aborted, this method throws an IllegalStateException.
*/
- public final synchronized void abort( final Context context ) {
- // Check corner cases: previous abort or completion
- if( m_state == ABORTED ) {
- throw new IllegalStateException( "The workflow has already been aborted." );
- }
- if( m_state == COMPLETED ) {
- throw new IllegalStateException( "The workflow has already completed." );
- }
+ public final void abort( final Context context ) {
+ Synchronizer.synchronize(lock, () -> {
+ // Check corner cases: previous abort or completion
+ if( m_state == ABORTED ) {
+ throw new IllegalStateException( "The workflow has already been aborted." );
+ }
+ if( m_state == COMPLETED ) {
+ throw new IllegalStateException( "The workflow has already completed." );
+ }
- if( m_currentStep != null ) {
- if( m_currentStep instanceof Decision ) {
- WikiEventEmitter.fireWorkflowEvent( m_currentStep, WorkflowEvent.DQ_REMOVAL, context );
+ if( m_currentStep != null ) {
+ if( m_currentStep instanceof Decision ) {
+ WikiEventEmitter.fireWorkflowEvent( m_currentStep, WorkflowEvent.DQ_REMOVAL, context );
+ }
+ m_currentStep.setOutcome( Outcome.STEP_ABORT );
+ m_history.addLast( m_currentStep );
}
- m_currentStep.setOutcome( Outcome.STEP_ABORT );
- m_history.addLast( m_currentStep );
- }
- m_state = ABORTED;
- WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.ABORTED );
- cleanup();
+ m_state = ABORTED;
+ WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.ABORTED );
+ cleanup();
+ });
}
/**
@@ -275,11 +294,13 @@ public final void addMessageArgument( final Serializable obj ) {
*
* @return the current actor
*/
- public final synchronized Principal getCurrentActor() {
- if( m_currentStep == null ) {
- return null;
- }
- return m_currentStep.getActor();
+ public final Principal getCurrentActor() {
+ return Synchronizer.synchronize(lock, () -> {
+ if( m_currentStep == null ) {
+ return null;
+ }
+ return m_currentStep.getActor();
+ });
}
/**
@@ -343,9 +364,9 @@ public final Date getEndTime() {
*
* @return the unique identifier
*/
- public final synchronized int getId()
+ public final int getId()
{
- return m_id;
+ return Synchronizer.synchronize(lock, () -> m_id);
}
/**
@@ -411,7 +432,7 @@ public final Date getStartTime()
* Returns a Step history for this Workflow as a List, chronologically, from the first Step to the currently executing one. The first
* step is the first item in the array. If the Workflow has not started, this method returns a zero-length array.
*
- * @return an array of Steps representing those that have executed, or are currently executing
+ * @return a List of Steps representing those that have executed, or are currently executing.
*/
public final List< Step > getHistory()
{
@@ -434,9 +455,11 @@ public final boolean isAborted()
*
* @return true if the workflow has been started but has no more steps to perform; false if not.
*/
- public final synchronized boolean isCompleted() {
- // If current step is null, then we're done
- return m_started && m_state == COMPLETED;
+ public final boolean isCompleted() {
+ return Synchronizer.synchronize(lock, () -> {
+ // If current step is null, then we're done
+ return m_started && m_state == COMPLETED;
+ });
}
/**
@@ -468,21 +491,23 @@ public final Step getPreviousStep()
* @param context current wiki context
* @throws WikiException if the current task's {@link Task#execute( Context )} method throws an exception
*/
- public final synchronized void restart( final Context context ) throws WikiException {
- if( m_state != WAITING ) {
- throw new IllegalStateException( "Workflow is not paused; cannot restart." );
- }
- WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.STARTED );
- m_state = RUNNING;
- WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.RUNNING );
-
- // Process current step
- try {
- processCurrentStep( context );
- } catch( final WikiException e ) {
- abort( context );
- throw e;
- }
+ public final void restart( final Context context ) throws WikiException {
+ Synchronizer.synchronize(lock, () -> {
+ if( m_state != WAITING ) {
+ throw new IllegalStateException( "Workflow is not paused; cannot restart." );
+ }
+ WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.STARTED );
+ m_state = RUNNING;
+ WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.RUNNING );
+
+ // Process current step
+ try {
+ processCurrentStep( context );
+ } catch( final WikiException e ) {
+ abort( context );
+ throw e;
+ }
+ });
}
/**
@@ -505,9 +530,11 @@ public final void setAttribute( final String attr, final Serializable obj ) {
*
* @param step the first step for the workflow
*/
- public final synchronized void setFirstStep( final Step step )
+ public final void setFirstStep( final Step step )
{
- m_firstStep = step;
+ Synchronizer.synchronize(lock, () -> {
+ m_firstStep = step;
+ });
}
/**
@@ -515,9 +542,11 @@ public final synchronized void setFirstStep( final Step step )
*
* @param id the unique identifier
*/
- public final synchronized void setId( final int id )
+ public final void setId( final int id )
{
- this.m_id = id;
+ Synchronizer.synchronize(lock, () -> {
+ this.m_id = id;
+ });
}
/**
@@ -528,41 +557,45 @@ public final synchronized void setId( final int id )
* @param context current wiki context.
* @throws WikiException if the current Step's {@link Step#start()} method throws an exception of any kind
*/
- public final synchronized void start( final Context context ) throws WikiException {
- if( m_state == ABORTED ) {
- throw new IllegalStateException( "Workflow cannot be started; it has already been aborted." );
- }
- if( m_started ) {
- throw new IllegalStateException( "Workflow has already started." );
- }
- WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.STARTED );
- m_started = true;
- m_state = RUNNING;
-
- WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.RUNNING );
- // Mark the first step as the current one & add to history
- m_currentStep = m_firstStep;
- m_history.add( m_currentStep );
-
- // Process current step
- try {
- processCurrentStep( context );
- } catch( final WikiException e ) {
- abort( context );
- throw e;
- }
+ public final void start( final Context context ) throws WikiException {
+ Synchronizer.synchronize(lock, () -> {
+ if( m_state == ABORTED ) {
+ throw new IllegalStateException( "Workflow cannot be started; it has already been aborted." );
+ }
+ if( m_started ) {
+ throw new IllegalStateException( "Workflow has already started." );
+ }
+ WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.STARTED );
+ m_started = true;
+ m_state = RUNNING;
+
+ WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.RUNNING );
+ // Mark the first step as the current one & add to history
+ m_currentStep = m_firstStep;
+ m_history.add( m_currentStep );
+
+ // Process current step
+ try {
+ processCurrentStep( context );
+ } catch( final WikiException e ) {
+ abort( context );
+ throw e;
+ }
+ });
}
/**
* Sets the Workflow in the {@link #WAITING} state. If the Workflow is not running or has already been paused, this method throws an
* IllegalStateException. Once paused, the Workflow can be un-paused by executing the {@link #restart(Context)} method.
*/
- public final synchronized void waitstate() {
- if ( m_state != RUNNING ) {
- throw new IllegalStateException( "Workflow is not running; cannot pause." );
- }
- m_state = WAITING;
- WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.WAITING );
+ public final void waitstate() {
+ Synchronizer.synchronize(lock, () -> {
+ if ( m_state != RUNNING ) {
+ throw new IllegalStateException( "Workflow is not running; cannot pause." );
+ }
+ m_state = WAITING;
+ WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.WAITING );
+ });
}
/**
@@ -577,12 +610,14 @@ protected void cleanup() {
* Protected helper method that changes the Workflow's state to {@link #COMPLETED} and sets the current Step to null. It
* calls the {@link #cleanup()} method to flush retained objects. This method will no-op if it has previously been called.
*/
- protected final synchronized void complete() {
- if( !isCompleted() ) {
- m_state = COMPLETED;
- WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.COMPLETED );
- cleanup();
- }
+ protected final void complete() {
+ Synchronizer.synchronize(lock, () -> {
+ if( !isCompleted() ) {
+ m_state = COMPLETED;
+ WikiEventEmitter.fireWorkflowEvent( this, WorkflowEvent.COMPLETED );
+ cleanup();
+ }
+ });
}
/**
@@ -638,5 +673,4 @@ protected final void processCurrentStep( final Context context ) throws WikiExce
}
}
-
}
diff --git a/jspwiki-main/src/test/resources/wkflmgr.ser b/jspwiki-main/src/test/resources/wkflmgr.ser
index 4a69d74d0c..82947dccae 100644
Binary files a/jspwiki-main/src/test/resources/wkflmgr.ser and b/jspwiki-main/src/test/resources/wkflmgr.ser differ
diff --git a/jspwiki-util/src/main/java/org/apache/wiki/util/CommentedProperties.java b/jspwiki-util/src/main/java/org/apache/wiki/util/CommentedProperties.java
index a04486fd3b..4d01387977 100644
--- a/jspwiki-util/src/main/java/org/apache/wiki/util/CommentedProperties.java
+++ b/jspwiki-util/src/main/java/org/apache/wiki/util/CommentedProperties.java
@@ -27,6 +27,7 @@ Licensed to the Apache Software Foundation (ASF) under one
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
+import java.util.concurrent.locks.ReentrantLock;
/**
* Extends {@link java.util.Properties} by providing support for comment
@@ -34,12 +35,22 @@ Licensed to the Apache Software Foundation (ASF) under one
* comments present in the file are preserved.
* @since 2.4.22
*/
-public class CommentedProperties extends Properties
-{
+public class CommentedProperties extends Properties {
+
private static final long serialVersionUID = 8057284636436329669L;
private String m_propertyString;
+ /**
+ * A lock used to ensure thread safety when accessing shared resources.
+ * This lock provides more flexibility and capabilities than the intrinsic locking mechanism,
+ * such as the ability to attempt to acquire a lock with a timeout, or to interrupt a thread
+ * waiting to acquire a lock.
+ *
+ * @see java.util.concurrent.locks.ReentrantLock
+ */
+ private final ReentrantLock lock = new ReentrantLock();
+
/**
* @see java.util.Properties#Properties()
*/
@@ -51,10 +62,9 @@ public CommentedProperties()
/**
* Creates new properties.
*
- * @param defaultValues A list of default values, which are used if in subsequent gets
- * a key is not found.
+ * @param defaultValues A list of default values, which are used if in subsequent gets a key is not found.
*/
- public CommentedProperties(final Properties defaultValues )
+ public CommentedProperties( final Properties defaultValues )
{
super( defaultValues );
}
@@ -63,13 +73,15 @@ public CommentedProperties(final Properties defaultValues )
* {@inheritDoc}
*/
@Override
- public synchronized void load(final InputStream inStream ) throws IOException
+ public void load(final InputStream inStream ) throws IOException
{
- // Load the file itself into a string
- m_propertyString = FileUtil.readContents( inStream, StandardCharsets.ISO_8859_1.name() );
+ Synchronizer.synchronize(lock, () -> {
+ // Load the file itself into a string
+ m_propertyString = FileUtil.readContents( inStream, StandardCharsets.ISO_8859_1.name() );
- // Now load it into the properties object as normal
- super.load( new ByteArrayInputStream( m_propertyString.getBytes(StandardCharsets.ISO_8859_1) ) );
+ // Now load it into the properties object as normal
+ super.load( new ByteArrayInputStream( m_propertyString.getBytes(StandardCharsets.ISO_8859_1) ) );
+ });
}
/**
@@ -79,83 +91,94 @@ public synchronized void load(final InputStream inStream ) throws IOException
* @throws IOException in case something goes wrong.
*/
@Override
- public synchronized void load(final Reader in ) throws IOException
+ public void load(final Reader in ) throws IOException
{
- m_propertyString = FileUtil.readContents( in );
+ Synchronizer.synchronize(lock, () -> {
+ m_propertyString = FileUtil.readContents( in );
- // Now load it into the properties object as normal
- super.load( new ByteArrayInputStream( m_propertyString.getBytes(StandardCharsets.ISO_8859_1) ) );
+ // Now load it into the properties object as normal
+ super.load( new ByteArrayInputStream( m_propertyString.getBytes(StandardCharsets.ISO_8859_1) ) );
+ });
}
/**
* {@inheritDoc}
*/
@Override
- public synchronized Object setProperty(final String key, final String value )
+ public Object setProperty(final String key, final String value )
{
- return put(key, value);
+ return Synchronizer.synchronize(lock, () -> put(key, value));
}
/**
* {@inheritDoc}
*/
@Override
- public synchronized void store(final OutputStream out, final String comments ) throws IOException
+ public void store(final OutputStream out, final String comments ) throws IOException
{
- final byte[] bytes = m_propertyString.getBytes( StandardCharsets.ISO_8859_1 );
- FileUtil.copyContents( new ByteArrayInputStream( bytes ), out );
- out.flush();
+ Synchronizer.synchronize(lock, () -> {
+ final byte[] bytes = m_propertyString.getBytes( StandardCharsets.ISO_8859_1 );
+ FileUtil.copyContents( new ByteArrayInputStream( bytes ), out );
+ out.flush();
+ });
}
/**
* {@inheritDoc}
*/
@Override
- public synchronized Object put(final Object arg0, final Object arg1 )
+ public Object put(final Object arg0, final Object arg1 )
{
- // Write the property to the stored string
- writeProperty( arg0, arg1 );
+ return Synchronizer.synchronize(lock, () -> {
+ // Write the property to the stored string
+ writeProperty( arg0, arg1 );
- // Return the result of from the superclass properties object
- return super.put(arg0, arg1);
+ // Return the result of from the superclass properties object
+ return super.put(arg0, arg1);
+ });
}
/**
* {@inheritDoc}
*/
@Override
- public synchronized void putAll(final Map< ? , ? > arg0 )
+ public void putAll(final Map< ? , ? > arg0 )
{
- // Shove all of the entries into the property string
- for (final Entry, ?> value : arg0.entrySet()) {
- @SuppressWarnings("unchecked") final Entry