Ds 3552 read only context and hibernate improvements (#1694)

* Refactor READ ONLY mode in Context and adjust hibernate settings accordingly

* Set Context in READ-ONLY mode when retrieving community lists

* Fix Hibernate EHCache configuration + fix some Hibernate warnings

* Cache authorized actions and group membership when Context is in READ-ONLY mode

* Set default Context mode

* Let ConfigurableBrowse use a READ-ONLY context

* Add 2nd level cache support for Site and EPerson DSpaceObjects

* Added 2nd level caching for Community and Collection

* Fix tests and license checks

* Cache collection and community queries

* Small refactorings + backwards compatibility

* Set Context to READ-ONLY for JSPUI submissions and 'select collection' step

* OAI improvements part 1

* OAI indexing improvements part 1

* OAI indexing improvements part 2

* DS-3552: Only uncache resource policies in AuthorizeService when in read-only

* DS-3552: Additional comment on caching handles

* DS-3552: Fix cache leakage in SolrServiceResourceRestrictionPlugin

* DS-3552: Clear the read-only cache when switching Context modes

* DS-3552: Correct Group 2nd level cache size

* DS-3552: Always clear the cache, except when going from READ_ONLY to READ_ONLY
This commit is contained in:
Tom Desair (Atmire)
2017-05-04 20:12:06 +02:00
committed by Tim Donohue
parent 9f46a1b812
commit 1ccd6d1e13
34 changed files with 1554 additions and 118 deletions

View File

@@ -244,6 +244,12 @@ public class AuthorizeServiceImpl implements AuthorizeService
return true; return true;
} }
// If authorization was given before and cached
Boolean cachedResult = c.getCachedAuthorizationResult(o, action, e);
if (cachedResult != null) {
return cachedResult.booleanValue();
}
// is eperson set? if not, userToCheck = null (anonymous) // is eperson set? if not, userToCheck = null (anonymous)
EPerson userToCheck = null; EPerson userToCheck = null;
if (e != null) if (e != null)
@@ -256,6 +262,7 @@ public class AuthorizeServiceImpl implements AuthorizeService
if (isAdmin(c, adminObject)) if (isAdmin(c, adminObject))
{ {
c.cacheAuthorizedAction(o, action, e, true, null);
return true; return true;
} }
} }
@@ -297,6 +304,11 @@ public class AuthorizeServiceImpl implements AuthorizeService
if (ignoreCustomPolicies if (ignoreCustomPolicies
&& ResourcePolicy.TYPE_CUSTOM.equals(rp.getRpType())) && ResourcePolicy.TYPE_CUSTOM.equals(rp.getRpType()))
{ {
if(c.isReadOnly()) {
//When we are in read-only mode, we will cache authorized actions in a different way
//So we remove this resource policy from the cache.
c.uncacheEntity(rp);
}
continue; continue;
} }
@@ -305,6 +317,7 @@ public class AuthorizeServiceImpl implements AuthorizeService
{ {
if (rp.getEPerson() != null && rp.getEPerson().equals(userToCheck)) if (rp.getEPerson() != null && rp.getEPerson().equals(userToCheck))
{ {
c.cacheAuthorizedAction(o, action, e, true, rp);
return true; // match return true; // match
} }
@@ -313,12 +326,20 @@ public class AuthorizeServiceImpl implements AuthorizeService
{ {
// group was set, and eperson is a member // group was set, and eperson is a member
// of that group // of that group
c.cacheAuthorizedAction(o, action, e, true, rp);
return true; return true;
} }
} }
if(c.isReadOnly()) {
//When we are in read-only mode, we will cache authorized actions in a different way
//So we remove this resource policy from the cache.
c.uncacheEntity(rp);
}
} }
// default authorization is denial // default authorization is denial
c.cacheAuthorizedAction(o, action, e, false, null);
return false; return false;
} }
@@ -361,6 +382,11 @@ public class AuthorizeServiceImpl implements AuthorizeService
return false; return false;
} }
Boolean cachedResult = c.getCachedAuthorizationResult(o, Constants.ADMIN, c.getCurrentUser());
if (cachedResult != null) {
return cachedResult.booleanValue();
}
// //
// First, check all Resource Policies directly on this object // First, check all Resource Policies directly on this object
// //
@@ -373,6 +399,7 @@ public class AuthorizeServiceImpl implements AuthorizeService
{ {
if (rp.getEPerson() != null && rp.getEPerson().equals(c.getCurrentUser())) if (rp.getEPerson() != null && rp.getEPerson().equals(c.getCurrentUser()))
{ {
c.cacheAuthorizedAction(o, Constants.ADMIN, c.getCurrentUser(), true, rp);
return true; // match return true; // match
} }
@@ -381,9 +408,16 @@ public class AuthorizeServiceImpl implements AuthorizeService
{ {
// group was set, and eperson is a member // group was set, and eperson is a member
// of that group // of that group
c.cacheAuthorizedAction(o, Constants.ADMIN, c.getCurrentUser(), true, rp);
return true; return true;
} }
} }
if(c.isReadOnly()) {
//When we are in read-only mode, we will cache authorized actions in a different way
//So we remove this resource policy from the cache.
c.uncacheEntity(rp);
}
} }
// If user doesn't have specific Admin permissions on this object, // If user doesn't have specific Admin permissions on this object,
@@ -393,9 +427,12 @@ public class AuthorizeServiceImpl implements AuthorizeService
DSpaceObject parent = serviceFactory.getDSpaceObjectService(o).getParentObject(c, o); DSpaceObject parent = serviceFactory.getDSpaceObjectService(o).getParentObject(c, o);
if (parent != null) if (parent != null)
{ {
return isAdmin(c, parent); boolean admin = isAdmin(c, parent);
c.cacheAuthorizedAction(o, Constants.ADMIN, c.getCurrentUser(), admin, null);
return admin;
} }
c.cacheAuthorizedAction(o, Constants.ADMIN, c.getCurrentUser(), false, null);
return false; return false;
} }

View File

@@ -55,7 +55,7 @@ public class ChecksumHistory implements ReloadableEntity<Long>
private String checksumCalculated; private String checksumCalculated;
@ManyToOne @ManyToOne
@JoinColumn(name = "result") @JoinColumn(name = "result", referencedColumnName = "result_code")
private ChecksumResult checksumResult; private ChecksumResult checksumResult;

View File

@@ -7,6 +7,8 @@
*/ */
package org.dspace.checker; package org.dspace.checker;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.dspace.content.Bitstream; import org.dspace.content.Bitstream;
import javax.persistence.*; import javax.persistence.*;
@@ -57,7 +59,7 @@ public class MostRecentChecksum implements Serializable
private boolean bitstreamFound; private boolean bitstreamFound;
@OneToOne @OneToOne
@JoinColumn(name= "result") @JoinColumn(name= "result", referencedColumnName = "result_code")
private ChecksumResult checksumResult; private ChecksumResult checksumResult;
/** /**
@@ -155,4 +157,44 @@ public class MostRecentChecksum implements Serializable
public void setBitstreamFound(boolean bitstreamFound) { public void setBitstreamFound(boolean bitstreamFound) {
this.bitstreamFound = bitstreamFound; this.bitstreamFound = bitstreamFound;
} }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MostRecentChecksum that = (MostRecentChecksum) o;
return new EqualsBuilder()
.append(toBeProcessed, that.toBeProcessed)
.append(matchedPrevChecksum, that.matchedPrevChecksum)
.append(infoFound, that.infoFound)
.append(bitstreamFound, that.bitstreamFound)
.append(bitstream, that.bitstream)
.append(expectedChecksum, that.expectedChecksum)
.append(currentChecksum, that.currentChecksum)
.append(processStartDate, that.processStartDate)
.append(processEndDate, that.processEndDate)
.append(checksumAlgorithm, that.checksumAlgorithm)
.append(checksumResult, that.checksumResult)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(bitstream)
.append(toBeProcessed)
.append(expectedChecksum)
.append(currentChecksum)
.append(processStartDate)
.append(processEndDate)
.append(checksumAlgorithm)
.append(matchedPrevChecksum)
.append(infoFound)
.append(bitstreamFound)
.append(checksumResult)
.toHashCode();
}
} }

View File

@@ -37,6 +37,8 @@ import java.util.*;
*/ */
@Entity @Entity
@Table(name="collection") @Table(name="collection")
@Cacheable
@org.hibernate.annotations.Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, include = "non-lazy")
public class Collection extends DSpaceObject implements DSpaceObjectLegacySupport public class Collection extends DSpaceObject implements DSpaceObjectLegacySupport
{ {

View File

@@ -34,6 +34,8 @@ import java.util.*;
*/ */
@Entity @Entity
@Table(name="community") @Table(name="community")
@Cacheable
@org.hibernate.annotations.Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, include = "non-lazy")
public class Community extends DSpaceObject implements DSpaceObjectLegacySupport public class Community extends DSpaceObject implements DSpaceObjectLegacySupport
{ {
/** log4j category */ /** log4j category */

View File

@@ -12,7 +12,9 @@ import org.dspace.content.service.SiteService;
import org.dspace.core.ConfigurationManager; import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants; import org.dspace.core.Constants;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.hibernate.annotations.CacheConcurrencyStrategy;
import javax.persistence.Cacheable;
import javax.persistence.Entity; import javax.persistence.Entity;
import javax.persistence.Table; import javax.persistence.Table;
import javax.persistence.Transient; import javax.persistence.Transient;
@@ -22,6 +24,8 @@ import javax.persistence.Transient;
* By default, the handle suffix "0" represents the Site, e.g. "1721.1/0" * By default, the handle suffix "0" represents the Site, e.g. "1721.1/0"
*/ */
@Entity @Entity
@Cacheable
@org.hibernate.annotations.Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE)
@Table(name = "site") @Table(name = "site")
public class Site extends DSpaceObject public class Site extends DSpaceObject
{ {

View File

@@ -121,6 +121,8 @@ public class CollectionDAOImpl extends AbstractHibernateDSODAO<Collection> imple
Restrictions.eq("resourcePolicy.eperson", ePerson), Restrictions.eq("resourcePolicy.eperson", ePerson),
actionQuery actionQuery
)); ));
criteria.setCacheable(true);
return list(criteria); return list(criteria);
} }
@@ -160,6 +162,8 @@ public class CollectionDAOImpl extends AbstractHibernateDSODAO<Collection> imple
query.append(" AND rp.epersonGroup.id IN (select g.id from Group g where (from EPerson e where e.id = :eperson_id) in elements(epeople))"); query.append(" AND rp.epersonGroup.id IN (select g.id from Group g where (from EPerson e where e.id = :eperson_id) in elements(epeople))");
Query hibernateQuery = createQuery(context, query.toString()); Query hibernateQuery = createQuery(context, query.toString());
hibernateQuery.setParameter("eperson_id", ePerson.getID()); hibernateQuery.setParameter("eperson_id", ePerson.getID());
hibernateQuery.setCacheable(true);
return list(hibernateQuery); return list(hibernateQuery);

View File

@@ -91,6 +91,7 @@ public class CommunityDAOImpl extends AbstractHibernateDSODAO<Community> impleme
Query query = createQuery(context, queryBuilder.toString()); Query query = createQuery(context, queryBuilder.toString());
query.setParameter(sortField.toString(), sortField.getID()); query.setParameter(sortField.toString(), sortField.getID());
query.setCacheable(true);
return findMany(context, query); return findMany(context, query);
} }
@@ -129,6 +130,8 @@ public class CommunityDAOImpl extends AbstractHibernateDSODAO<Community> impleme
Restrictions.eq("resourcePolicy.eperson", ePerson), Restrictions.eq("resourcePolicy.eperson", ePerson),
actionQuery actionQuery
)); ));
criteria.setCacheable(true);
return list(criteria); return list(criteria);
} }
@@ -164,6 +167,8 @@ public class CommunityDAOImpl extends AbstractHibernateDSODAO<Community> impleme
query.append(" AND rp.epersonGroup.id IN (select g.id from Group g where (from EPerson e where e.id = :eperson_id) in elements(epeople))"); query.append(" AND rp.epersonGroup.id IN (select g.id from Group g where (from EPerson e where e.id = :eperson_id) in elements(epeople))");
Query hibernateQuery = createQuery(context, query.toString()); Query hibernateQuery = createQuery(context, query.toString());
hibernateQuery.setParameter("eperson_id", ePerson.getID()); hibernateQuery.setParameter("eperson_id", ePerson.getID());
hibernateQuery.setCacheable(true);
return list(hibernateQuery); return list(hibernateQuery);
} }

View File

@@ -32,6 +32,7 @@ public class SiteDAOImpl extends AbstractHibernateDAO<Site> implements SiteDAO
@Override @Override
public Site findSite(Context context) throws SQLException { public Site findSite(Context context) throws SQLException {
Criteria criteria = createCriteria(context, Site.class); Criteria criteria = createCriteria(context, Site.class);
criteria.setCacheable(true);
return uniqueResult(criteria); return uniqueResult(criteria);
} }
} }

View File

@@ -8,6 +8,8 @@
package org.dspace.core; package org.dspace.core;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.dspace.authorize.ResourcePolicy;
import org.dspace.content.DSpaceObject;
import org.dspace.eperson.EPerson; import org.dspace.eperson.EPerson;
import org.dspace.eperson.Group; import org.dspace.eperson.Group;
import org.dspace.eperson.factory.EPersonServiceFactory; import org.dspace.eperson.factory.EPersonServiceFactory;
@@ -44,9 +46,6 @@ public class Context
{ {
private static final Logger log = Logger.getLogger(Context.class); private static final Logger log = Logger.getLogger(Context.class);
/** option flags */
public static final short READ_ONLY = 0x01;
/** Current user - null means anonymous access */ /** Current user - null means anonymous access */
private EPerson currentUser; private EPerson currentUser;
@@ -77,13 +76,22 @@ public class Context
/** Event dispatcher name */ /** Event dispatcher name */
private String dispName = null; private String dispName = null;
/** options */ /** Context mode */
private short options = 0; private Mode mode = Mode.READ_WRITE;
/** Cache that is only used the context is in READ_ONLY mode */
private ContextReadOnlyCache readOnlyCache = new ContextReadOnlyCache();
protected EventService eventService; protected EventService eventService;
private DBConnection dbConnection; private DBConnection dbConnection;
public enum Mode {
READ_ONLY,
READ_WRITE,
BATCH_EDIT
}
static static
{ {
// Before initializing a Context object, we need to ensure the database // Before initializing a Context object, we need to ensure the database
@@ -100,6 +108,7 @@ public class Context
} }
protected Context(EventService eventService, DBConnection dbConnection) { protected Context(EventService eventService, DBConnection dbConnection) {
this.mode = Mode.READ_WRITE;
this.eventService = eventService; this.eventService = eventService;
this.dbConnection = dbConnection; this.dbConnection = dbConnection;
init(); init();
@@ -112,18 +121,19 @@ public class Context
*/ */
public Context() public Context()
{ {
this.mode = Mode.READ_WRITE;
init(); init();
} }
/** /**
* Construct a new context object with passed options. A database connection is opened. * Construct a new context object with the given mode enabled. A database connection is opened.
* No user is authenticated. * No user is authenticated.
* *
* @param options context operation flags * @param mode The mode to use when opening the context.
*/ */
public Context(short options) public Context(Mode mode)
{ {
this.options = options; this.mode = mode;
init(); init();
} }
@@ -160,6 +170,7 @@ public class Context
authStateChangeHistory = new Stack<Boolean>(); authStateChangeHistory = new Stack<Boolean>();
authStateClassCallHistory = new Stack<String>(); authStateClassCallHistory = new Stack<String>();
setMode(this.mode);
} }
/** /**
@@ -382,12 +393,16 @@ public class Context
log.info("commit() was called on a closed Context object. No changes to commit."); log.info("commit() was called on a closed Context object. No changes to commit.");
} }
if(isReadOnly()) {
throw new UnsupportedOperationException("You cannot commit a read-only context");
}
// Our DB Connection (Hibernate) will decide if an actual commit is required or not // Our DB Connection (Hibernate) will decide if an actual commit is required or not
try try
{ {
// As long as we have a valid, writeable database connection, // As long as we have a valid, writeable database connection,
// commit any changes made as part of the transaction // commit any changes made as part of the transaction
if (isValid() && !isReadOnly()) if (isValid())
{ {
dispatchEvents(); dispatchEvents();
} }
@@ -518,7 +533,7 @@ public class Context
try try
{ {
// Rollback if we have a database connection, and it is NOT Read Only // Rollback if we have a database connection, and it is NOT Read Only
if (isValid() && !isReadOnly()) if (isValid())
{ {
dbConnection.rollback(); dbConnection.rollback();
} }
@@ -566,7 +581,7 @@ public class Context
*/ */
public boolean isReadOnly() public boolean isReadOnly()
{ {
return (options & READ_ONLY) > 0; return mode != null && mode == Mode.READ_ONLY;
} }
public void setSpecialGroup(UUID groupID) public void setSpecialGroup(UUID groupID)
@@ -632,15 +647,69 @@ public class Context
/** /**
* Returns the size of the cache of all object that have been read from the database so far. * Returns the size of the cache of all object that have been read from the database so far. A larger number
* means that more memory is consumed by the cache. This also has a negative impact on the query performance. In
* that case you should consider uncaching entities when they are no longer needed (see {@link Context#uncacheEntity(ReloadableEntity)} () uncacheEntity}).
* *
* @return connection cache size
* @throws SQLException When connecting to the active cache fails. * @throws SQLException When connecting to the active cache fails.
*/ */
public long getCacheSize() throws SQLException { public long getCacheSize() throws SQLException {
return this.getDBConnection().getCacheSize(); return this.getDBConnection().getCacheSize();
} }
/**
* Change the mode of this current context.
*
* BATCH_EDIT: Enabling batch edit mode means that the database connection is configured so that it is optimized to
* process a large number of records.
*
* READ_ONLY: READ ONLY mode will tell the database we are nog going to do any updates. This means it can disable
* optimalisations for delaying or grouping updates.
*
* READ_WRITE: This is the default mode and enables the normal database behaviour. This behaviour is optimal for querying and updating a
* small number of records.
*
* @param newMode The mode to put this context in
*/
public void setMode(Mode newMode) {
try {
//update the database settings
switch (newMode) {
case BATCH_EDIT:
dbConnection.setConnectionMode(true, false);
break;
case READ_ONLY:
dbConnection.setConnectionMode(false, true);
break;
case READ_WRITE:
dbConnection.setConnectionMode(false, false);
break;
default:
log.warn("New context mode detected that has nog been configured.");
break;
}
} catch(SQLException ex) {
log.warn("Unable to set database connection mode", ex);
}
//Always clear the cache, except when going from READ_ONLY to READ_ONLY
if(mode != Mode.READ_ONLY || newMode != Mode.READ_ONLY) {
//clear our read-only cache to prevent any inconsistencies
readOnlyCache.clear();
}
//save the new mode
mode = newMode;
}
/**
* The current database mode of this context.
* @return The current mode
*/
public Mode getCurrentMode() {
return mode;
}
/** /**
* Enable or disable "batch processing mode" for this context. * Enable or disable "batch processing mode" for this context.
* *
@@ -653,16 +722,22 @@ public class Context
* @param batchModeEnabled When true, batch processing mode will be enabled. If false, it will be disabled. * @param batchModeEnabled When true, batch processing mode will be enabled. If false, it will be disabled.
* @throws SQLException When configuring the database connection fails. * @throws SQLException When configuring the database connection fails.
*/ */
@Deprecated
public void enableBatchMode(boolean batchModeEnabled) throws SQLException { public void enableBatchMode(boolean batchModeEnabled) throws SQLException {
dbConnection.setOptimizedForBatchProcessing(batchModeEnabled); if(batchModeEnabled) {
setMode(Mode.BATCH_EDIT);
} else {
setMode(Mode.READ_WRITE);
}
} }
/** /**
* Check if "batch processing mode" is enabled for this context. * Check if "batch processing mode" is enabled for this context.
* @return True if batch processing mode is enabled, false otherwise. * @return True if batch processing mode is enabled, false otherwise.
*/ */
@Deprecated
public boolean isBatchModeEnabled() { public boolean isBatchModeEnabled() {
return dbConnection.isOptimizedForBatchProcessing(); return mode != null && mode == Mode.BATCH_EDIT;
} }
/** /**
@@ -691,6 +766,52 @@ public class Context
dbConnection.uncacheEntity(entity); dbConnection.uncacheEntity(entity);
} }
public Boolean getCachedAuthorizationResult(DSpaceObject dspaceObject, int action, EPerson eperson) {
if(isReadOnly()) {
return readOnlyCache.getCachedAuthorizationResult(dspaceObject, action, eperson);
} else {
return null;
}
}
public void cacheAuthorizedAction(DSpaceObject dspaceObject, int action, EPerson eperson, Boolean result, ResourcePolicy rp) {
if(isReadOnly()) {
readOnlyCache.cacheAuthorizedAction(dspaceObject, action, eperson, result);
try {
uncacheEntity(rp);
} catch (SQLException e) {
log.warn("Unable to uncache a resource policy when in read-only mode", e);
}
}
}
public Boolean getCachedGroupMembership(Group group, EPerson eperson) {
if(isReadOnly()) {
return readOnlyCache.getCachedGroupMembership(group, eperson);
} else {
return null;
}
}
public void cacheGroupMembership(Group group, EPerson eperson, Boolean isMember) {
if(isReadOnly()) {
readOnlyCache.cacheGroupMembership(group, eperson, isMember);
}
}
public void cacheAllMemberGroupsSet(EPerson ePerson, Set<Group> groups) {
if(isReadOnly()) {
readOnlyCache.cacheAllMemberGroupsSet(ePerson, groups);
}
}
public Set<Group> getCachedAllMemberGroupsSet(EPerson ePerson) {
if(isReadOnly()) {
return readOnlyCache.getCachedAllMemberGroupsSet(ePerson);
} else {
return null;
}
}
/** /**
* Reload all entities related to this context. * Reload all entities related to this context.

View File

@@ -0,0 +1,103 @@
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.core;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.ImmutableTriple;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.lang3.tuple.Triple;
import org.dspace.content.DSpaceObject;
import org.dspace.eperson.EPerson;
import org.dspace.eperson.Group;
import org.springframework.util.CollectionUtils;
import java.util.HashMap;
import java.util.Set;
/**
* Object that manages the read-only caches for the Context class
*/
public class ContextReadOnlyCache {
/**
* Authorized actions cache that is used when the context is in READ_ONLY mode.
* The key of the cache is: DSpace Object ID, action ID, Eperson ID.
*/
private final HashMap<Triple<String, Integer, String>, Boolean> authorizedActionsCache = new HashMap<>();
/**
* Group membership cache that is used when the context is in READ_ONLY mode.
* The key of the cache is: Group Name, Eperson ID.
*/
private final HashMap<Pair<String, String>, Boolean> groupMembershipCache = new HashMap<>();
/**
* Cache for all the groups the current ePerson is a member of when the context is in READ_ONLY mode.
*/
private final HashMap<String, Set<Group>> allMemberGroupsCache = new HashMap<>();
public Boolean getCachedAuthorizationResult(DSpaceObject dspaceObject, int action, EPerson eperson) {
return authorizedActionsCache.get(buildAuthorizedActionKey(dspaceObject, action, eperson));
}
public void cacheAuthorizedAction(DSpaceObject dspaceObject, int action, EPerson eperson, Boolean result) {
authorizedActionsCache.put(buildAuthorizedActionKey(dspaceObject, action, eperson), result);
}
public Boolean getCachedGroupMembership(Group group, EPerson eperson) {
String allMembersGroupKey = buildAllMembersGroupKey(eperson);
if (CollectionUtils.isEmpty(allMemberGroupsCache.get(allMembersGroupKey))) {
return groupMembershipCache.get(buildGroupMembershipKey(group, eperson));
} else {
return allMemberGroupsCache.get(allMembersGroupKey).contains(group);
}
}
public void cacheGroupMembership(Group group, EPerson eperson, Boolean isMember) {
if (CollectionUtils.isEmpty(allMemberGroupsCache.get(buildAllMembersGroupKey(eperson)))) {
groupMembershipCache.put(buildGroupMembershipKey(group, eperson), isMember);
}
}
public void cacheAllMemberGroupsSet(EPerson ePerson, Set<Group> groups) {
allMemberGroupsCache.put(buildAllMembersGroupKey(ePerson),
groups);
//clear the individual groupMembershipCache as we have all memberships now.
groupMembershipCache.clear();
}
public Set<Group> getCachedAllMemberGroupsSet(EPerson ePerson) {
return allMemberGroupsCache.get(buildAllMembersGroupKey(ePerson));
}
public void clear() {
authorizedActionsCache.clear();
groupMembershipCache.clear();
allMemberGroupsCache.clear();
}
private String buildAllMembersGroupKey(EPerson ePerson) {
return ePerson == null ? "" : ePerson.getID().toString();
}
private ImmutableTriple<String, Integer, String> buildAuthorizedActionKey(DSpaceObject dspaceObject, int action, EPerson eperson) {
return new ImmutableTriple<>(dspaceObject == null ? "" : dspaceObject.getID().toString(),
Integer.valueOf(action),
eperson == null ? "" : eperson.getID().toString());
}
private Pair<String, String> buildGroupMembershipKey(Group group, EPerson eperson) {
return new ImmutablePair<>(group == null ? "" : group.getName(),
eperson == null ? "" : eperson.getID().toString());
}
}

View File

@@ -40,7 +40,7 @@ public interface DBConnection<T> {
public DatabaseConfigVO getDatabaseConfig() throws SQLException; public DatabaseConfigVO getDatabaseConfig() throws SQLException;
public void setOptimizedForBatchProcessing(boolean batchOptimized) throws SQLException; public void setConnectionMode(boolean batchOptimized, boolean readOnlyOptimized) throws SQLException;
public boolean isOptimizedForBatchProcessing(); public boolean isOptimizedForBatchProcessing();

View File

@@ -14,11 +14,11 @@ import java.sql.SQLException;
import javax.persistence.EntityManagerFactory; import javax.persistence.EntityManagerFactory;
import javax.sql.DataSource; import javax.sql.DataSource;
import org.dspace.authorize.ResourcePolicy;
import org.dspace.content.*;
import org.dspace.handle.Handle;
import org.dspace.storage.rdbms.DatabaseConfigVO; import org.dspace.storage.rdbms.DatabaseConfigVO;
import org.hibernate.FlushMode; import org.hibernate.*;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.hibernate.Transaction;
import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.engine.spi.SessionFactoryImplementor;
import org.hibernate.proxy.HibernateProxyHelper; import org.hibernate.proxy.HibernateProxyHelper;
import org.hibernate.resource.transaction.spi.TransactionStatus; import org.hibernate.resource.transaction.spi.TransactionStatus;
@@ -38,12 +38,13 @@ public class HibernateDBConnection implements DBConnection<Session> {
private SessionFactory sessionFactory; private SessionFactory sessionFactory;
private boolean batchModeEnabled = false; private boolean batchModeEnabled = false;
private boolean readOnlyEnabled = false;
@Override @Override
public Session getSession() throws SQLException { public Session getSession() throws SQLException {
if(!isTransActionAlive()){ if(!isTransActionAlive()){
sessionFactory.getCurrentSession().beginTransaction(); sessionFactory.getCurrentSession().beginTransaction();
configureBatchMode(); configureDatabaseMode();
} }
return sessionFactory.getCurrentSession(); return sessionFactory.getCurrentSession();
} }
@@ -136,9 +137,10 @@ public class HibernateDBConnection implements DBConnection<Session> {
} }
@Override @Override
public void setOptimizedForBatchProcessing(final boolean batchOptimized) throws SQLException { public void setConnectionMode(final boolean batchOptimized, final boolean readOnlyOptimized) throws SQLException {
this.batchModeEnabled = batchOptimized; this.batchModeEnabled = batchOptimized;
configureBatchMode(); this.readOnlyEnabled = readOnlyOptimized;
configureDatabaseMode();
} }
@Override @Override
@@ -146,9 +148,11 @@ public class HibernateDBConnection implements DBConnection<Session> {
return batchModeEnabled; return batchModeEnabled;
} }
private void configureBatchMode() throws SQLException { private void configureDatabaseMode() throws SQLException {
if(batchModeEnabled) { if(batchModeEnabled) {
getSession().setFlushMode(FlushMode.ALWAYS); getSession().setFlushMode(FlushMode.ALWAYS);
} else if(readOnlyEnabled) {
getSession().setFlushMode(FlushMode.MANUAL);
} else { } else {
getSession().setFlushMode(FlushMode.AUTO); getSession().setFlushMode(FlushMode.AUTO);
} }
@@ -163,6 +167,83 @@ public class HibernateDBConnection implements DBConnection<Session> {
*/ */
@Override @Override
public <E extends ReloadableEntity> void uncacheEntity(E entity) throws SQLException { public <E extends ReloadableEntity> void uncacheEntity(E entity) throws SQLException {
if(entity != null) {
if (entity instanceof DSpaceObject) {
DSpaceObject dso = (DSpaceObject) entity;
// The metadatavalue relation has CascadeType.ALL, so they are evicted automatically
// and we don' need to uncache the values explicitly.
if(Hibernate.isInitialized(dso.getHandles())) {
for (Handle handle : Utils.emptyIfNull(dso.getHandles())) {
uncacheEntity(handle);
}
}
if(Hibernate.isInitialized(dso.getResourcePolicies())) {
for (ResourcePolicy policy : Utils.emptyIfNull(dso.getResourcePolicies())) {
uncacheEntity(policy);
}
}
}
if (entity instanceof Item) {
Item item = (Item) entity;
if(Hibernate.isInitialized(item.getSubmitter())) {
uncacheEntity(item.getSubmitter());
}
if(Hibernate.isInitialized(item.getBundles())) {
for (Bundle bundle : Utils.emptyIfNull(item.getBundles())) {
uncacheEntity(bundle);
}
}
} else if (entity instanceof Bundle) {
Bundle bundle = (Bundle) entity;
if(Hibernate.isInitialized(bundle.getBitstreams())) {
for (Bitstream bitstream : Utils.emptyIfNull(bundle.getBitstreams())) {
uncacheEntity(bitstream);
}
}
//} else if(entity instanceof Bitstream) {
// Bitstream bitstream = (Bitstream) entity;
// No specific child entities to decache
} else if (entity instanceof Community) {
Community community = (Community) entity;
if(Hibernate.isInitialized(community.getAdministrators())) {
uncacheEntity(community.getAdministrators());
}
if(Hibernate.isInitialized(community.getLogo())) {
uncacheEntity(community.getLogo());
}
} else if (entity instanceof Collection) {
Collection collection = (Collection) entity;
if(Hibernate.isInitialized(collection.getLogo())) {
uncacheEntity(collection.getLogo());
}
if(Hibernate.isInitialized(collection.getAdministrators())) {
uncacheEntity(collection.getAdministrators());
}
if(Hibernate.isInitialized(collection.getSubmitters())) {
uncacheEntity(collection.getSubmitters());
}
if(Hibernate.isInitialized(collection.getTemplateItem())) {
uncacheEntity(collection.getTemplateItem());
}
if(Hibernate.isInitialized(collection.getWorkflowStep1())) {
uncacheEntity(collection.getWorkflowStep1());
}
if(Hibernate.isInitialized(collection.getWorkflowStep2())) {
uncacheEntity(collection.getWorkflowStep2());
}
if(Hibernate.isInitialized(collection.getWorkflowStep3())) {
uncacheEntity(collection.getWorkflowStep3());
}
}
getSession().evict(entity); getSession().evict(entity);
} }
}
} }

View File

@@ -47,10 +47,9 @@ public class IndexClient {
* A general class of exceptions produced by failed or interrupted I/O operations. * A general class of exceptions produced by failed or interrupted I/O operations.
* @throws SearchServiceException if something went wrong with querying the solr server * @throws SearchServiceException if something went wrong with querying the solr server
*/ */
public static void main(String[] args) public static void main(String[] args) throws SQLException, IOException, SearchServiceException {
throws SQLException, IOException, SearchServiceException
{ Context context = new Context(Context.Mode.READ_ONLY);
Context context = new Context();
context.turnOffAuthorisationSystem(); context.turnOffAuthorisationSystem();
String usage = "org.dspace.discovery.IndexClient [-cbhf] | [-r <handle>] | [-i <handle>] or nothing to update/clean an existing index."; String usage = "org.dspace.discovery.IndexClient [-cbhf] | [-r <handle>] | [-i <handle>] or nothing to update/clean an existing index.";
@@ -151,8 +150,6 @@ public class IndexClient {
throw new IllegalArgumentException("Cannot resolve " + handle + " to a DSpace object"); throw new IllegalArgumentException("Cannot resolve " + handle + " to a DSpace object");
} }
log.info("Forcibly Indexing " + handle); log.info("Forcibly Indexing " + handle);
// Enable batch mode; we may be indexing a large number of items
context.enableBatchMode(true);
final long startTimeMillis = System.currentTimeMillis(); final long startTimeMillis = System.currentTimeMillis();
final long count = indexAll(indexer, ContentServiceFactory.getInstance().getItemService(), context, dso); final long count = indexAll(indexer, ContentServiceFactory.getInstance().getItemService(), context, dso);
final long seconds = (System.currentTimeMillis() - startTimeMillis ) / 1000; final long seconds = (System.currentTimeMillis() - startTimeMillis ) / 1000;

View File

@@ -8,32 +8,27 @@
package org.dspace.discovery; package org.dspace.discovery;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.SolrInputDocument;
import org.dspace.authorize.ResourcePolicy; import org.dspace.authorize.ResourcePolicy;
import org.dspace.authorize.service.AuthorizeService; import org.dspace.authorize.service.AuthorizeService;
import org.dspace.authorize.service.ResourcePolicyService;
import org.dspace.content.DSpaceObject;
import org.dspace.content.service.CollectionService; import org.dspace.content.service.CollectionService;
import org.dspace.content.service.CommunityService; import org.dspace.content.service.CommunityService;
import org.dspace.authorize.service.ResourcePolicyService;
import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.DSpaceObject;
import org.dspace.core.Constants; import org.dspace.core.Constants;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.core.LogManager; import org.dspace.core.LogManager;
import org.dspace.eperson.EPerson; import org.dspace.eperson.EPerson;
import org.dspace.eperson.Group; import org.dspace.eperson.Group;
import org.dspace.eperson.service.GroupService; import org.dspace.eperson.service.GroupService;
import org.dspace.services.factory.DSpaceServicesFactory;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.UUID; import java.util.Set;
import org.dspace.services.factory.DSpaceServicesFactory;
/** /**
* Restriction plugin that ensures that indexes all the resource policies. * Restriction plugin that ensures that indexes all the resource policies.
@@ -74,6 +69,9 @@ public class SolrServiceResourceRestrictionPlugin implements SolrServiceIndexPlu
} }
document.addField("read", fieldValue); document.addField("read", fieldValue);
//remove the policy from the cache to save memory
context.uncacheEntity(resourcePolicy);
} }
} catch (SQLException e) { } catch (SQLException e) {
log.error(LogManager.getHeader(context, "Error while indexing resource policies", "DSpace object: (id " + dso.getID() + " type " + dso.getType() + ")")); log.error(LogManager.getHeader(context, "Error while indexing resource policies", "DSpace object: (id " + dso.getID() + " type " + dso.getType() + ")"));
@@ -98,7 +96,7 @@ public class SolrServiceResourceRestrictionPlugin implements SolrServiceIndexPlu
} }
//Retrieve all the groups the current user is a member of ! //Retrieve all the groups the current user is a member of !
List<Group> groups = groupService.allMemberGroups(context, currentUser); Set<Group> groups = groupService.allMemberGroupsSet(context, currentUser);
for (Group group : groups) { for (Group group : groups) {
resourceQuery.append(" OR g").append(group.getID()); resourceQuery.append(" OR g").append(group.getID());
} }

View File

@@ -16,6 +16,7 @@ import org.dspace.core.Constants;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.eperson.factory.EPersonServiceFactory; import org.dspace.eperson.factory.EPersonServiceFactory;
import org.dspace.eperson.service.EPersonService; import org.dspace.eperson.service.EPersonService;
import org.hibernate.annotations.CacheConcurrencyStrategy;
import org.hibernate.proxy.HibernateProxyHelper; import org.hibernate.proxy.HibernateProxyHelper;
import javax.persistence.*; import javax.persistence.*;
@@ -31,6 +32,8 @@ import java.util.List;
* @version $Revision$ * @version $Revision$
*/ */
@Entity @Entity
@Cacheable
@org.hibernate.annotations.Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, include = "non-lazy")
@Table(name = "eperson") @Table(name = "eperson")
public class EPerson extends DSpaceObject implements DSpaceObjectLegacySupport public class EPerson extends DSpaceObject implements DSpaceObjectLegacySupport
{ {

View File

@@ -16,6 +16,7 @@ import org.dspace.core.Constants;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.eperson.factory.EPersonServiceFactory; import org.dspace.eperson.factory.EPersonServiceFactory;
import org.dspace.eperson.service.GroupService; import org.dspace.eperson.service.GroupService;
import org.hibernate.annotations.CacheConcurrencyStrategy;
import org.hibernate.proxy.HibernateProxyHelper; import org.hibernate.proxy.HibernateProxyHelper;
import javax.persistence.*; import javax.persistence.*;
@@ -29,6 +30,8 @@ import java.util.List;
* @author David Stuve * @author David Stuve
*/ */
@Entity @Entity
@Cacheable
@org.hibernate.annotations.Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, include = "non-lazy")
@Table(name = "epersongroup" ) @Table(name = "epersongroup" )
public class Group extends DSpaceObject implements DSpaceObjectLegacySupport public class Group extends DSpaceObject implements DSpaceObjectLegacySupport
{ {

View File

@@ -7,6 +7,7 @@
*/ */
package org.dspace.eperson; package org.dspace.eperson;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.hibernate.proxy.HibernateProxyHelper; import org.hibernate.proxy.HibernateProxyHelper;
import javax.persistence.*; import javax.persistence.*;
@@ -79,4 +80,12 @@ public class Group2GroupCache implements Serializable {
} }
return true; return true;
} }
@Override
public int hashCode() {
return new org.apache.commons.lang.builder.HashCodeBuilder()
.append(parent == null ? "" : parent.getID())
.append(child == null ? "" : child.getID())
.toHashCode();
}
} }

View File

@@ -158,38 +158,38 @@ public class GroupServiceImpl extends DSpaceObjectServiceImpl<Group> implements
@Override @Override
public boolean isMember(Context context, Group group) throws SQLException { public boolean isMember(Context context, Group group) throws SQLException {
return isMember(context, group.getName());
}
@Override
public boolean isMember(final Context context, final String groupName) throws SQLException {
// special, everyone is member of group 0 (anonymous)
if (StringUtils.equals(groupName, Group.ANONYMOUS))
{
return true;
} else if (context.getCurrentUser() != null) {
EPerson currentUser = context.getCurrentUser(); EPerson currentUser = context.getCurrentUser();
//First check the special groups if(group == null) {
List<Group> specialGroups = context.getSpecialGroups(); return false;
if (CollectionUtils.isNotEmpty(specialGroups)) {
for (Group specialGroup : specialGroups) // special, everyone is member of group 0 (anonymous)
{ } else if (StringUtils.equals(group.getName(), Group.ANONYMOUS)) {
//Check if the current special group is the one we are looking for OR retrieve all groups & make a check here.
if (StringUtils.equals(specialGroup.getName(), groupName) || allMemberGroups(context, currentUser).contains(findByName(context, groupName)))
{
return true; return true;
} } else if(currentUser != null) {
}
} Boolean cachedGroupMembership = context.getCachedGroupMembership(group, currentUser);
if(cachedGroupMembership != null) {
return cachedGroupMembership.booleanValue();
} else if(CollectionUtils.isNotEmpty(context.getSpecialGroups())) {
Set<Group> allMemberGroups = allMemberGroupsSet(context, currentUser);
boolean result = allMemberGroups.contains(group);
context.cacheGroupMembership(group, currentUser, result);
return result;
} else {
//lookup eperson in normal groups and subgroups //lookup eperson in normal groups and subgroups
return epersonInGroup(context, groupName, currentUser); boolean result = epersonInGroup(context, group.getName(), currentUser);
context.cacheGroupMembership(group, currentUser, result);
return result;
}
} else { } else {
// Check also for anonymous users if IP authentication used // Check also for anonymous users if IP authentication used
List<Group> specialGroups = context.getSpecialGroups(); List<Group> specialGroups = context.getSpecialGroups();
if(CollectionUtils.isNotEmpty(specialGroups)) { if(CollectionUtils.isNotEmpty(specialGroups)) {
for(Group specialGroup : specialGroups){ for(Group specialGroup : specialGroups){
if (StringUtils.equals(specialGroup.getName(), groupName)) { if (StringUtils.equals(specialGroup.getName(), group.getName())) {
return true; return true;
} }
} }
@@ -198,8 +198,23 @@ public class GroupServiceImpl extends DSpaceObjectServiceImpl<Group> implements
} }
} }
@Override
public boolean isMember(final Context context, final String groupName) throws SQLException {
return isMember(context, findByName(context, groupName));
}
@Override @Override
public List<Group> allMemberGroups(Context context, EPerson ePerson) throws SQLException { public List<Group> allMemberGroups(Context context, EPerson ePerson) throws SQLException {
return new ArrayList<>(allMemberGroupsSet(context, ePerson));
}
@Override
public Set<Group> allMemberGroupsSet(Context context, EPerson ePerson) throws SQLException {
Set<Group> cachedGroupMembership = context.getCachedAllMemberGroupsSet(ePerson);
if(cachedGroupMembership != null) {
return cachedGroupMembership;
}
Set<Group> groups = new HashSet<>(); Set<Group> groups = new HashSet<>();
if (ePerson != null) if (ePerson != null)
@@ -225,7 +240,6 @@ public class GroupServiceImpl extends DSpaceObjectServiceImpl<Group> implements
// all the users are members of the anonymous group // all the users are members of the anonymous group
groups.add(findByName(context, Group.ANONYMOUS)); groups.add(findByName(context, Group.ANONYMOUS));
List<Group2GroupCache> groupCache = group2GroupCacheDAO.findByChildren(context, groups); List<Group2GroupCache> groupCache = group2GroupCacheDAO.findByChildren(context, groups);
// now we have all owning groups, also grab all parents of owning groups // now we have all owning groups, also grab all parents of owning groups
// yes, I know this could have been done as one big query and a union, // yes, I know this could have been done as one big query and a union,
@@ -234,7 +248,8 @@ public class GroupServiceImpl extends DSpaceObjectServiceImpl<Group> implements
groups.add(group2GroupCache.getParent()); groups.add(group2GroupCache.getParent());
} }
return new ArrayList<>(groups); context.cacheAllMemberGroupsSet(ePerson, groups);
return groups;
} }
@Override @Override

View File

@@ -43,6 +43,8 @@ public class EPersonDAOImpl extends AbstractHibernateDSODAO<EPerson> implements
// All email addresses are stored as lowercase, so ensure that the email address is lowercased for the lookup // All email addresses are stored as lowercase, so ensure that the email address is lowercased for the lookup
Criteria criteria = createCriteria(context, EPerson.class); Criteria criteria = createCriteria(context, EPerson.class);
criteria.add(Restrictions.eq("email", email.toLowerCase())); criteria.add(Restrictions.eq("email", email.toLowerCase()));
criteria.setCacheable(true);
return uniqueResult(criteria); return uniqueResult(criteria);
} }
@@ -52,6 +54,8 @@ public class EPersonDAOImpl extends AbstractHibernateDSODAO<EPerson> implements
{ {
Criteria criteria = createCriteria(context, EPerson.class); Criteria criteria = createCriteria(context, EPerson.class);
criteria.add(Restrictions.eq("netid", netid)); criteria.add(Restrictions.eq("netid", netid));
criteria.setCacheable(true);
return uniqueResult(criteria); return uniqueResult(criteria);
} }

View File

@@ -9,6 +9,7 @@ package org.dspace.eperson.service;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List; import java.util.List;
import java.util.Set;
import org.dspace.authorize.AuthorizeException; import org.dspace.authorize.AuthorizeException;
import org.dspace.content.MetadataField; import org.dspace.content.MetadataField;
@@ -154,6 +155,8 @@ public interface GroupService extends DSpaceObjectService<Group>, DSpaceObjectLe
*/ */
public List<Group> allMemberGroups(Context context, EPerson ePerson) throws SQLException; public List<Group> allMemberGroups(Context context, EPerson ePerson) throws SQLException;
Set<Group> allMemberGroupsSet(Context context, EPerson ePerson) throws SQLException;
/** /**
* Get all of the epeople who are a member of the * Get all of the epeople who are a member of the
* specified group, or a member of a sub-group of the * specified group, or a member of a sub-group of the

View File

@@ -220,12 +220,13 @@ public class OAIHarvester {
*/ */
public void runHarvest() throws SQLException, IOException, AuthorizeException public void runHarvest() throws SQLException, IOException, AuthorizeException
{ {
boolean originalMode = ourContext.isBatchModeEnabled(); Context.Mode originalMode = ourContext.getCurrentMode();
ourContext.enableBatchMode(true); ourContext.setMode(Context.Mode.BATCH_EDIT);
// figure out the relevant parameters // figure out the relevant parameters
String oaiSource = harvestRow.getOaiSource(); String oaiSource = harvestRow.getOaiSource();
String oaiSetId = harvestRow.getOaiSetId(); String oaiSetId = harvestRow.getOaiSetId();
//If we have all selected then make sure that we do not include a set filter //If we have all selected then make sure that we do not include a set filter
if ("all".equals(oaiSetId)) if ("all".equals(oaiSetId))
{ {
@@ -437,7 +438,7 @@ public class OAIHarvester {
log.info("Harvest from " + oaiSource + " successful. The process took " + timeTaken + " milliseconds. Harvested " + currentRecord + " items."); log.info("Harvest from " + oaiSource + " successful. The process took " + timeTaken + " milliseconds. Harvested " + currentRecord + " items.");
harvestedCollection.update(ourContext, harvestRow); harvestedCollection.update(ourContext, harvestRow);
ourContext.enableBatchMode(originalMode); ourContext.setMode(originalMode);
} }
private void intermediateCommit() throws SQLException { private void intermediateCommit() throws SQLException {

View File

@@ -320,7 +320,7 @@ public class RDFConsumer implements Consumer
// create a new context, to be sure to work as anonymous user // create a new context, to be sure to work as anonymous user
// we don't want to store private data in a triplestore with public // we don't want to store private data in a triplestore with public
// SPARQL endpoint. // SPARQL endpoint.
ctx = new Context(Context.READ_ONLY); ctx = new Context(Context.Mode.READ_ONLY);
if (toDelete == null) if (toDelete == null)
{ {
log.debug("Deletion queue does not exists, creating empty queue."); log.debug("Deletion queue does not exists, creating empty queue.");

View File

@@ -84,7 +84,7 @@ public class RDFizer {
this.dryrun = false; this.dryrun = false;
this.lang = "TURTLE"; this.lang = "TURTLE";
this.processed = new CopyOnWriteArraySet<UUID>(); this.processed = new CopyOnWriteArraySet<UUID>();
this.context = new Context(Context.READ_ONLY); this.context = new Context(Context.Mode.READ_ONLY);
this.configurationService = DSpaceServicesFactory.getInstance().getConfigurationService(); this.configurationService = DSpaceServicesFactory.getInstance().getConfigurationService();
this.contentServiceFactory = ContentServiceFactory.getInstance(); this.contentServiceFactory = ContentServiceFactory.getInstance();
@@ -818,7 +818,7 @@ public class RDFizer {
// data into a triple store that provides a public sparql endpoint. // data into a triple store that provides a public sparql endpoint.
// all exported rdf data can be read by anonymous users. // all exported rdf data can be read by anonymous users.
// We won't change the database => read_only context will assure this. // We won't change the database => read_only context will assure this.
Context context = new Context(Context.READ_ONLY); Context context = new Context(Context.Mode.READ_ONLY);
RDFizer myself = null; RDFizer myself = null;
myself = new RDFizer(); myself = new RDFizer();

View File

@@ -20,10 +20,7 @@ import org.springframework.beans.factory.annotation.Autowired;
import java.io.IOException; import java.io.IOException;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList; import java.util.*;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
/** /**
* Service implementation for the PoolTask object. * Service implementation for the PoolTask object.
@@ -92,7 +89,7 @@ public class PoolTaskServiceImpl implements PoolTaskService {
else{ else{
//If the user does not have a claimedtask yet, see whether one of the groups of the user has pooltasks //If the user does not have a claimedtask yet, see whether one of the groups of the user has pooltasks
//for this workflow item //for this workflow item
List<Group> groups = groupService.allMemberGroups(context, ePerson); Set<Group> groups = groupService.allMemberGroupsSet(context, ePerson);
for (Group group : groups) { for (Group group : groups) {
poolTask = poolTaskDAO.findByWorkflowItemAndGroup(context, group, workflowItem); poolTask = poolTaskDAO.findByWorkflowItemAndGroup(context, group, workflowItem);
if(poolTask != null) if(poolTask != null)

View File

@@ -0,0 +1,121 @@
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.core;
import org.dspace.content.Item;
import org.dspace.eperson.EPerson;
import org.dspace.eperson.Group;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.runners.MockitoJUnitRunner;
import java.util.Arrays;
import java.util.HashSet;
import java.util.UUID;
import static org.junit.Assert.*;
import static org.mockito.Mockito.when;
/**
* Class to test the read-only Context cache
*/
@RunWith(MockitoJUnitRunner.class)
public class ContextReadOnlyCacheTest {
private ContextReadOnlyCache readOnlyCache;
@Mock
private EPerson ePerson;
@Before
public void init() {
readOnlyCache = new ContextReadOnlyCache();
when(ePerson.getID()).thenReturn(UUID.randomUUID());
}
@Test
public void cacheAuthorizedAction() throws Exception {
Item item = Mockito.mock(Item.class);
when(item.getID()).thenReturn(UUID.randomUUID());
readOnlyCache.cacheAuthorizedAction(item, Constants.READ, ePerson, true);
readOnlyCache.cacheAuthorizedAction(item, Constants.WRITE, ePerson, false);
assertTrue(readOnlyCache.getCachedAuthorizationResult(item, Constants.READ, ePerson));
assertFalse(readOnlyCache.getCachedAuthorizationResult(item, Constants.WRITE, ePerson));
assertNull(readOnlyCache.getCachedAuthorizationResult(item, Constants.ADMIN, ePerson));
assertNull(readOnlyCache.getCachedAuthorizationResult(item, Constants.READ, null));
assertNull(readOnlyCache.getCachedAuthorizationResult(null, Constants.READ, ePerson));
}
@Test
public void cacheGroupMembership() throws Exception {
Group group1 = buildGroupMock("Test Group 1");
Group group2 = buildGroupMock("Test Group 2");
Group group3 = buildGroupMock("Test Group 3");
readOnlyCache.cacheGroupMembership(group1, ePerson, true);
readOnlyCache.cacheGroupMembership(group2, ePerson, false);
assertTrue(readOnlyCache.getCachedGroupMembership(group1, ePerson));
assertFalse(readOnlyCache.getCachedGroupMembership(group2, ePerson));
assertNull(readOnlyCache.getCachedGroupMembership(group3, ePerson));
assertNull(readOnlyCache.getCachedGroupMembership(null, ePerson));
assertNull(readOnlyCache.getCachedGroupMembership(group2, null));
}
@Test
public void cacheAllMemberGroupsSet() throws Exception {
Group group1 = buildGroupMock("Test Group 1");
Group group2 = buildGroupMock("Test Group 2");
Group group3 = buildGroupMock("Test Group 3");
readOnlyCache.cacheAllMemberGroupsSet(ePerson, new HashSet<>(Arrays.asList(group1, group2)));
assertTrue(readOnlyCache.getCachedGroupMembership(group1, ePerson));
assertTrue(readOnlyCache.getCachedGroupMembership(group2, ePerson));
assertFalse(readOnlyCache.getCachedGroupMembership(group3, ePerson));
assertFalse(readOnlyCache.getCachedGroupMembership(null, ePerson));
assertNull(readOnlyCache.getCachedGroupMembership(group2, null));
}
@Test
public void clear() throws Exception {
Item item = Mockito.mock(Item.class);
when(item.getID()).thenReturn(UUID.randomUUID());
Group group1 = buildGroupMock("Test Group 1");
//load data into the cache
readOnlyCache.cacheAuthorizedAction(item, Constants.READ, ePerson, true);
readOnlyCache.cacheGroupMembership(group1, ePerson, true);
//double check the data is there
assertTrue(readOnlyCache.getCachedAuthorizationResult(item, Constants.READ, ePerson));
assertTrue(readOnlyCache.getCachedGroupMembership(group1, ePerson));
//clear the cache
readOnlyCache.clear();
//check that the data is not present anymore
assertNull(readOnlyCache.getCachedAuthorizationResult(item, Constants.READ, ePerson));
assertNull(readOnlyCache.getCachedGroupMembership(group1, ePerson));
}
private Group buildGroupMock(final String name) {
Group group = Mockito.mock(Group.class);
when(group.getName()).thenReturn(name);
return group;
}
}

View File

@@ -307,9 +307,36 @@ public class ContextTest extends AbstractUnitTest
assertThat("testIsReadOnly 0", context.isReadOnly(), equalTo(false)); assertThat("testIsReadOnly 0", context.isReadOnly(), equalTo(false));
// Create a new read-only context // Create a new read-only context
Context instance = new Context(Context.READ_ONLY); Context instance = new Context(Context.Mode.READ_ONLY);
assertThat("testIsReadOnly 1", instance.isReadOnly(), equalTo(true)); assertThat("testIsReadOnly 1", instance.isReadOnly(), equalTo(true));
//When in read-only, we only support abort().
instance.abort();
// Cleanup our context
cleanupContext(instance);
}
/**
* Test that commit cannot be called when the context is in read-only mode
*/
@Test
public void testIsReadOnlyCommit() throws SQLException
{
// Create a new read-only context
Context instance = new Context(Context.Mode.READ_ONLY);
assertThat("testIsReadOnly 1", instance.isReadOnly(), equalTo(true));
try {
//When in read-only, calling commit() should result in an error
instance.commit();
fail();
} catch (Exception ex) {
assertTrue(ex instanceof UnsupportedOperationException);
}
instance.abort();
// Cleanup our context // Cleanup our context
cleanupContext(instance); cleanupContext(instance);
} }

View File

@@ -11,19 +11,6 @@ import com.lyncode.xoai.dataprovider.exceptions.ConfigurationException;
import com.lyncode.xoai.dataprovider.exceptions.MetadataBindException; import com.lyncode.xoai.dataprovider.exceptions.MetadataBindException;
import com.lyncode.xoai.dataprovider.exceptions.WritingXmlException; import com.lyncode.xoai.dataprovider.exceptions.WritingXmlException;
import com.lyncode.xoai.dataprovider.xml.XmlOutputContext; import com.lyncode.xoai.dataprovider.xml.XmlOutputContext;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.ConnectException;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import javax.xml.stream.XMLStreamException;
import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
@@ -36,35 +23,38 @@ import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.SolrInputDocument;
import org.dspace.authorize.factory.AuthorizeServiceFactory; import org.dspace.authorize.factory.AuthorizeServiceFactory;
import org.dspace.authorize.service.AuthorizeService; import org.dspace.authorize.service.AuthorizeService;
import org.dspace.content.Bitstream; import org.dspace.content.*;
import org.dspace.content.Bundle;
import org.dspace.content.Collection; import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.Item;
import org.dspace.content.MetadataValue;
import org.dspace.content.MetadataField;
import org.dspace.content.factory.ContentServiceFactory; import org.dspace.content.factory.ContentServiceFactory;
import org.dspace.content.service.ItemService; import org.dspace.content.service.ItemService;
import org.dspace.core.ConfigurationManager; import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants; import org.dspace.core.Constants;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.core.Utils;
import org.dspace.handle.Handle;
import org.dspace.xoai.exceptions.CompilingException; import org.dspace.xoai.exceptions.CompilingException;
import org.dspace.xoai.services.api.CollectionsService;
import org.dspace.xoai.services.api.cache.XOAICacheService; import org.dspace.xoai.services.api.cache.XOAICacheService;
import org.dspace.xoai.services.api.cache.XOAIItemCacheService; import org.dspace.xoai.services.api.cache.XOAIItemCacheService;
import org.dspace.xoai.services.api.cache.XOAILastCompilationCacheService; import org.dspace.xoai.services.api.cache.XOAILastCompilationCacheService;
import org.dspace.xoai.services.api.config.ConfigurationService; import org.dspace.xoai.services.api.config.ConfigurationService;
import org.dspace.xoai.services.api.CollectionsService;
import org.dspace.xoai.services.api.solr.SolrServerResolver; import org.dspace.xoai.services.api.solr.SolrServerResolver;
import org.dspace.xoai.solr.DSpaceSolrSearch; import org.dspace.xoai.solr.DSpaceSolrSearch;
import org.dspace.xoai.solr.exceptions.DSpaceSolrException; import org.dspace.xoai.solr.exceptions.DSpaceSolrException;
import org.dspace.xoai.solr.exceptions.DSpaceSolrIndexerException; import org.dspace.xoai.solr.exceptions.DSpaceSolrIndexerException;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.AnnotationConfigApplicationContext; import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import javax.xml.stream.XMLStreamException;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.ConnectException;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.*;
import static com.lyncode.xoai.dataprovider.core.Granularity.Second; import static com.lyncode.xoai.dataprovider.core.Granularity.Second;
import static org.dspace.xoai.util.ItemUtils.retrieveMetadata; import static org.dspace.xoai.util.ItemUtils.retrieveMetadata;
@@ -206,7 +196,12 @@ public class XOAI {
SolrServer server = solrServerResolver.getServer(); SolrServer server = solrServerResolver.getServer();
while (iterator.hasNext()) { while (iterator.hasNext()) {
try { try {
server.add(this.index(iterator.next())); Item item = iterator.next();
server.add(this.index(item));
//Uncache the item to keep memory consumption low
context.uncacheEntity(item);
} catch (SQLException | MetadataBindException | ParseException } catch (SQLException | MetadataBindException | ParseException
| XMLStreamException | WritingXmlException ex) { | XMLStreamException | WritingXmlException ex) {
log.error(ex.getMessage(), ex); log.error(ex.getMessage(), ex);
@@ -273,7 +268,6 @@ public class XOAI {
println("Item with handle " + handle + " indexed"); println("Item with handle " + handle + " indexed");
} }
return doc; return doc;
} }
@@ -382,7 +376,7 @@ public class XOAI {
String command = line.getArgs()[0]; String command = line.getArgs()[0];
if (COMMAND_IMPORT.equals(command)) { if (COMMAND_IMPORT.equals(command)) {
ctx = new Context(); ctx = new Context(Context.Mode.READ_ONLY);
XOAI indexer = new XOAI(ctx, XOAI indexer = new XOAI(ctx,
line.hasOption('o'), line.hasOption('o'),
line.hasOption('c'), line.hasOption('c'),

View File

@@ -87,7 +87,7 @@ public class DataProviderServlet extends HttpServlet {
DSpaceObject dso = null; DSpaceObject dso = null;
try try
{ {
context = new Context(Context.READ_ONLY); context = new Context(Context.Mode.READ_ONLY);
dso = handleService.resolveToObject(context, handle); dso = handleService.resolveToObject(context, handle);
} }
catch (SQLException ex) catch (SQLException ex)

View File

@@ -69,7 +69,7 @@ public class LocalURIRedirectionServlet extends HttpServlet
DSpaceObject dso = null; DSpaceObject dso = null;
try try
{ {
context = new Context(Context.READ_ONLY); context = new Context(Context.Mode.READ_ONLY);
dso = handleService.resolveToObject(context, handle); dso = handleService.resolveToObject(context, handle);
} }
catch (SQLException ex) catch (SQLException ex)

265
dspace/config/ehcache.xsd Normal file
View File

@@ -0,0 +1,265 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!--
The contents of this file are subject to the license and copyright
detailed in the LICENSE and NOTICE files at the root of the source
tree and available online at
http://www.dspace.org/license/
-->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" version="1.7">
<xs:element name="ehcache">
<xs:complexType>
<xs:sequence>
<xs:element maxOccurs="1" minOccurs="0" ref="diskStore"/>
<xs:element maxOccurs="1" minOccurs="0" ref="transactionManagerLookup"/>
<xs:element maxOccurs="1" minOccurs="0" ref="cacheManagerEventListenerFactory"/>
<xs:element maxOccurs="unbounded" minOccurs="0" ref="cacheManagerPeerProviderFactory"/>
<xs:element maxOccurs="unbounded" minOccurs="0" ref="cacheManagerPeerListenerFactory"/>
<xs:element maxOccurs="1" minOccurs="0" ref="terracottaConfig"/>
<xs:element ref="defaultCache"/>
<xs:element maxOccurs="unbounded" minOccurs="0" ref="cache"/>
</xs:sequence>
<xs:attribute name="name" use="optional"/>
<xs:attribute default="true" name="updateCheck" type="xs:boolean" use="optional"/>
<xs:attribute default="autodetect" name="monitoring" type="monitoringType" use="optional"/>
<xs:attribute default="true" name="dynamicConfig" type="xs:boolean" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="diskStore">
<xs:complexType>
<xs:attribute name="path" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="transactionManagerLookup">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="cacheManagerEventListenerFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="cacheManagerPeerProviderFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="cacheManagerPeerListenerFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="terracottaConfig">
<xs:complexType>
<xs:sequence>
<xs:element maxOccurs="1" minOccurs="0" name="tc-config">
<xs:complexType>
<xs:sequence>
<xs:any maxOccurs="unbounded" minOccurs="0" processContents="skip"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
<xs:attribute default="localhost:9510" name="url" use="optional"/>
<xs:attribute name="registerStatsMBean" type="xs:boolean" use="optional"/>
</xs:complexType>
</xs:element>
<!-- add clone support for addition of cacheExceptionHandler. Important! -->
<xs:element name="defaultCache">
<xs:complexType>
<xs:sequence>
<xs:element minOccurs="0" maxOccurs="unbounded" ref="cacheEventListenerFactory"/>
<xs:element minOccurs="0" maxOccurs="unbounded" ref="cacheExtensionFactory"/>
<xs:element minOccurs="0" maxOccurs="unbounded" ref="cacheLoaderFactory"/>
<xs:element minOccurs="0" maxOccurs="1" ref="bootstrapCacheLoaderFactory"/>
<xs:element minOccurs="0" maxOccurs="1" ref="cacheExceptionHandlerFactory"/>
<xs:element minOccurs="0" maxOccurs="1" ref="terracotta"/>
<xs:element minOccurs="0" maxOccurs="1" ref="cacheWriter"/>
<xs:element minOccurs="0" maxOccurs="1" ref="copyStrategy"/>
</xs:sequence>
<xs:attribute name="diskExpiryThreadIntervalSeconds" type="xs:integer" use="optional"/>
<xs:attribute name="diskSpoolBufferSizeMB" type="xs:integer" use="optional"/>
<xs:attribute name="diskPersistent" type="xs:boolean" use="optional"/>
<xs:attribute name="diskAccessStripes" type="xs:integer" use="optional" default="1"/>
<xs:attribute name="eternal" type="xs:boolean" use="required"/>
<xs:attribute name="maxElementsInMemory" type="xs:integer" use="required"/>
<xs:attribute name="clearOnFlush" type="xs:boolean" use="optional"/>
<xs:attribute name="memoryStoreEvictionPolicy" type="xs:string" use="optional"/>
<xs:attribute name="overflowToDisk" type="xs:boolean" use="required"/>
<xs:attribute name="timeToIdleSeconds" type="xs:integer" use="optional"/>
<xs:attribute name="timeToLiveSeconds" type="xs:integer" use="optional"/>
<xs:attribute name="maxElementsOnDisk" type="xs:integer" use="optional"/>
<xs:attribute name="transactionalMode" type="transactionalMode" use="optional" default="off"/>
<xs:attribute name="statistics" type="xs:boolean" use="optional" default="false"/>
<xs:attribute name="copyOnRead" type="xs:boolean" use="optional" default="false"/>
<xs:attribute name="copyOnWrite" type="xs:boolean" use="optional" default="false"/>
</xs:complexType>
</xs:element>
<xs:element name="cache">
<xs:complexType>
<xs:sequence>
<xs:element minOccurs="0" maxOccurs="unbounded" ref="cacheEventListenerFactory"/>
<xs:element minOccurs="0" maxOccurs="unbounded" ref="cacheExtensionFactory"/>
<xs:element minOccurs="0" maxOccurs="unbounded" ref="cacheLoaderFactory"/>
<xs:element minOccurs="0" maxOccurs="unbounded" ref="cacheDecoratorFactory"/>
<xs:element minOccurs="0" maxOccurs="1" ref="bootstrapCacheLoaderFactory"/>
<xs:element minOccurs="0" maxOccurs="1" ref="cacheExceptionHandlerFactory"/>
<xs:element minOccurs="0" maxOccurs="1" ref="terracotta"/>
<xs:element minOccurs="0" maxOccurs="1" ref="cacheWriter"/>
<xs:element minOccurs="0" maxOccurs="1" ref="copyStrategy"/>
</xs:sequence>
<xs:attribute name="diskExpiryThreadIntervalSeconds" type="xs:integer" use="optional"/>
<xs:attribute name="diskSpoolBufferSizeMB" type="xs:integer" use="optional"/>
<xs:attribute name="diskPersistent" type="xs:boolean" use="optional"/>
<xs:attribute name="diskAccessStripes" type="xs:integer" use="optional" default="1"/>
<xs:attribute name="eternal" type="xs:boolean" use="required"/>
<xs:attribute name="maxElementsInMemory" type="xs:integer" use="required"/>
<xs:attribute name="memoryStoreEvictionPolicy" type="xs:string" use="optional"/>
<xs:attribute name="clearOnFlush" type="xs:boolean" use="optional"/>
<xs:attribute name="name" type="xs:string" use="required"/>
<xs:attribute name="overflowToDisk" type="xs:boolean" use="required"/>
<xs:attribute name="timeToIdleSeconds" type="xs:integer" use="optional"/>
<xs:attribute name="timeToLiveSeconds" type="xs:integer" use="optional"/>
<xs:attribute name="maxElementsOnDisk" type="xs:integer" use="optional"/>
<xs:attribute name="transactionalMode" type="transactionalMode" use="optional" default="off" />
<xs:attribute name="statistics" type="xs:boolean" use="optional" default="false"/>
<xs:attribute name="copyOnRead" type="xs:boolean" use="optional" default="false"/>
<xs:attribute name="copyOnWrite" type="xs:boolean" use="optional" default="false"/>
<xs:attribute name="logging" type="xs:boolean" use="optional" default="false"/>
</xs:complexType>
</xs:element>
<xs:element name="cacheEventListenerFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
<xs:attribute name="listenFor" use="optional" type="notificationScope" default="all"/>
</xs:complexType>
</xs:element>
<xs:element name="bootstrapCacheLoaderFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="cacheExtensionFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="cacheExceptionHandlerFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="cacheLoaderFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="cacheDecoratorFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="terracotta">
<xs:complexType>
<xs:attribute name="clustered" use="optional" type="xs:boolean" default="true"/>
<xs:attribute name="valueMode" use="optional" type="terracottaCacheValueType" default="serialization"/>
<xs:attribute name="coherentReads" use="optional" type="xs:boolean" default="true"/>
<xs:attribute name="localKeyCache" use="optional" type="xs:boolean" default="false"/>
<xs:attribute name="localKeyCacheSize" use="optional" type="xs:positiveInteger" default="300000"/>
<xs:attribute name="orphanEviction" use="optional" type="xs:boolean" default="true"/>
<xs:attribute name="orphanEvictionPeriod" use="optional" type="xs:positiveInteger" default="4"/>
<xs:attribute name="copyOnRead" use="optional" type="xs:boolean" default="false"/>
<xs:attribute name="coherent" use="optional" type="xs:boolean" default="true"/>
<xs:attribute name="synchronousWrites" use="optional" type="xs:boolean" default="false"/>
</xs:complexType>
</xs:element>
<xs:simpleType name="monitoringType">
<xs:restriction base="xs:string">
<xs:enumeration value="autodetect"/>
<xs:enumeration value="on"/>
<xs:enumeration value="off"/>
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="terracottaCacheValueType">
<xs:restriction base="xs:string">
<xs:enumeration value="serialization" />
<xs:enumeration value="identity" />
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="transactionalMode">
<xs:restriction base="xs:string">
<xs:enumeration value="off"/>
<xs:enumeration value="xa"/>
</xs:restriction>
</xs:simpleType>
<xs:element name="cacheWriter">
<xs:complexType>
<xs:sequence >
<xs:element minOccurs="0" maxOccurs="1" ref="cacheWriterFactory"/>
</xs:sequence>
<xs:attribute name="writeMode" use="optional" type="writeModeType" default="write-through"/>
<xs:attribute name="notifyListenersOnException" use="optional" type="xs:boolean" default="false"/>
<xs:attribute name="minWriteDelay" use="optional" type="xs:nonNegativeInteger" default="1"/>
<xs:attribute name="maxWriteDelay" use="optional" type="xs:nonNegativeInteger" default="1"/>
<xs:attribute name="rateLimitPerSecond" use="optional" type="xs:nonNegativeInteger" default="0"/>
<xs:attribute name="writeCoalescing" use="optional" type="xs:boolean" default="false"/>
<xs:attribute name="writeBatching" use="optional" type="xs:boolean" default="false"/>
<xs:attribute name="writeBatchSize" use="optional" type="xs:positiveInteger" default="1"/>
<xs:attribute name="retryAttempts" use="optional" type="xs:nonNegativeInteger" default="0"/>
<xs:attribute name="retryAttemptDelaySeconds" use="optional" type="xs:nonNegativeInteger" default="1"/>
</xs:complexType>
</xs:element>
<xs:simpleType name="writeModeType">
<xs:restriction base="xs:string">
<xs:enumeration value="write-through" />
<xs:enumeration value="write-behind" />
</xs:restriction>
</xs:simpleType>
<xs:element name="cacheWriterFactory">
<xs:complexType>
<xs:attribute name="class" use="required"/>
<xs:attribute name="properties" use="optional"/>
<xs:attribute name="propertySeparator" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="copyStrategy">
<xs:complexType>
<xs:attribute name="class" use="required" type="xs:string" />
</xs:complexType>
</xs:element>
<xs:simpleType name="notificationScope">
<xs:restriction base="xs:string">
<xs:enumeration value="local"/>
<xs:enumeration value="remote"/>
<xs:enumeration value="all"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>

View File

@@ -0,0 +1,586 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
The contents of this file are subject to the license and copyright
detailed in the LICENSE and NOTICE files at the root of the source
tree and available online at
http://www.dspace.org/license/
-->
<ehcache xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="ehcache.xsd">
<!-- Sets the path to the directory where cache .data files are created.
If the path is a Java System Property it is replaced by
its value in the running VM.
The following properties are translated:
user.home - User's home directory
user.dir - User's current working directory
java.io.tmpdir - Default temp file path -->
<!-- WARNING: If you are running multiple DSpace instances on the same server, make sure to start
each DSpace instance with another value for java.io.tmpdir !!! -->
<diskStore path="java.io.tmpdir/DSpaceHibernateCache"/>
<!--
Cache configuration
===================
The following attributes are required.
name:
Sets the name of the cache. This is used to identify the cache. It must be unique.
maxElementsInMemory:
Sets the maximum number of objects that will be created in memory
maxElementsOnDisk:
Sets the maximum number of objects that will be maintained in the DiskStore
The default value is zero, meaning unlimited.
eternal:
Sets whether elements are eternal. If eternal, timeouts are ignored and the
element is never expired.
overflowToDisk:
Sets whether elements can overflow to disk when the memory store
has reached the maxInMemory limit.
The following attributes and elements are optional.
timeToIdleSeconds:
Sets the time to idle for an element before it expires.
i.e. The maximum amount of time between accesses before an element expires
Is only used if the element is not eternal.
Optional attribute. A value of 0 means that an Element can idle for infinity.
The default value is 0.
timeToLiveSeconds:
Sets the time to live for an element before it expires.
i.e. The maximum time between creation time and when an element expires.
Is only used if the element is not eternal.
Optional attribute. A value of 0 means that and Element can live for infinity.
The default value is 0.
diskPersistent:
Whether the disk store persists between restarts of the Virtual Machine.
The default value is false.
diskExpiryThreadIntervalSeconds:
The number of seconds between runs of the disk expiry thread. The default value
is 120 seconds.
diskSpoolBufferSizeMB:
This is the size to allocate the DiskStore for a spool buffer. Writes are made
to this area and then asynchronously written to disk. The default size is 30MB.
Each spool buffer is used only by its cache. If you get OutOfMemory errors consider
lowering this value. To improve DiskStore performance consider increasing it. Trace level
logging in the DiskStore will show if put back ups are occurring.
memoryStoreEvictionPolicy:
Policy would be enforced upon reaching the maxElementsInMemory limit. Default
policy is Least Recently Used (specified as LRU). Other policies available -
First In First Out (specified as FIFO) and Less Frequently Used
(specified as LFU)
*NOTE: LFU seems to have some possible issues -AZ
Cache elements can also contain sub elements which take the same format of a factory class
and properties. Defined sub-elements are:
* cacheEventListenerFactory - Enables registration of listeners for cache events, such as
put, remove, update, and expire.
* bootstrapCacheLoaderFactory - Specifies a BootstrapCacheLoader, which is called by a
cache on initialisation to prepopulate itself.
* cacheExtensionFactory - Specifies a CacheExtension, a generic mechansim to tie a class
which holds a reference to a cache to the cache lifecycle.
RMI Cache Replication
Each cache that will be distributed needs to set a cache event listener which replicates
messages to the other CacheManager peers. For the built-in RMI implementation this is done
by adding a cacheEventListenerFactory element of type RMICacheReplicatorFactory to each
distributed cache's configuration as per the following example:
<cacheEventListenerFactory class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicateAsynchronously=true,
replicatePuts=true,
replicateUpdates=true,
replicateUpdatesViaCopy=true,
replicateRemovals=true
asynchronousReplicationIntervalMillis=<number of milliseconds"
propertySeparator="," />
The RMICacheReplicatorFactory recognises the following properties:
* replicatePuts=true|false - whether new elements placed in a cache are
replicated to others. Defaults to true.
* replicateUpdates=true|false - whether new elements which override an
element already existing with the same key are replicated. Defaults to true.
* replicateRemovals=true - whether element removals are replicated. Defaults to true.
* replicateAsynchronously=true | false - whether replications are
asynchronous (true) or synchronous (false). Defaults to true.
* replicateUpdatesViaCopy=true | false - whether the new elements are
copied to other caches (true), or whether a remove message is sent. Defaults to true.
* asynchronousReplicationIntervalMillis=<number of milliseconds> - The asynchronous
replicator runs at a set interval of milliseconds. The default is 1000. The minimum
is 10. This property is only applicable if replicateAsynchronously=true
Cluster Bootstrapping
The RMIBootstrapCacheLoader bootstraps caches in clusters where RMICacheReplicators are
used. It is configured as per the following example:
<bootstrapCacheLoaderFactory
class="net.sf.ehcache.distribution.RMIBootstrapCacheLoaderFactory"
properties="bootstrapAsynchronously=true, maximumChunkSizeBytes=5000000"
propertySeparator="," />
The RMIBootstrapCacheLoaderFactory recognises the following optional properties:
* bootstrapAsynchronously=true|false - whether the bootstrap happens in the background
after the cache has started. If false, bootstrapping must complete before the cache is
made available. The default value is true.
* maximumChunkSizeBytes=<integer> - Caches can potentially be very large, larger than the
memory limits of the VM. This property allows the bootstraper to fetched elements in
chunks. The default chunk size is 5000000 (5MB).
Cache Exception Handling
By default, most cache operations will propagate a runtime CacheException on failure. An interceptor,
using a dynamic proxy, may be configured so that a CacheExceptionHandler can be configured to
intercept Exceptions. Errors are not intercepted. It is configured as per the following example:
<cacheExceptionHandlerFactory class="net.sf.ehcache.exceptionhandler.CountingExceptionHandlerFactory"
properties="logLevel=FINE"/>
Caches with ExceptionHandling configured are not of type Cache, but are of type Ehcache only, and are not available
using CacheManager.getCache(), but using CacheManager.getEhcache().
CacheLoader
A CacheLoader may be configured against a cache.
<cacheLoaderFactory class="net.sf.ehcache.loader.CountingCacheLoaderFactory"
properties="type=int,startCounter=10"/>
-->
<!--
Mandatory Default Cache configuration. These settings will be applied to caches
created programmtically using CacheManager.add(String cacheName).
The defaultCache has an implicit name "default" which is a reserved cache name.
-->
<defaultCache
maxElementsInMemory="3000"
eternal="false"
timeToIdleSeconds="1"
timeToLiveSeconds="1200"
overflowToDisk="true"
diskSpoolBufferSizeMB="30"
maxElementsOnDisk="10000"
diskPersistent="false"
diskExpiryThreadIntervalSeconds="120"
memoryStoreEvictionPolicy="LRU">
</defaultCache>
<!--Predefined caches. Add your cache configuration settings here.
If you do not have a configuration for your cache a WARNING will be issued when the
CacheManager starts
The following attributes are required for defaultCache:
name - Sets the name of the cache. This is used to identify the cache. It must be unique.
maxInMemory - Sets the maximum number of objects that will be created in memory
eternal - Sets whether elements are eternal. If eternal, timeouts are ignored and the element
is never expired.
timeToIdleSeconds - Sets the time to idle for an element before it expires.
i.e. The maximum amount of time between accesses before an element expires
Is only used if the element is not eternal.
Optional attribute. A value of 0 means that an Element can idle for infinity
timeToLiveSeconds - Sets the time to live for an element before it expires.
i.e. The maximum time between creation time and when an element expires.
Is only used if the element is not eternal.
Optional attribute. A value of 0 means that an Element can live for infinity
overflowToDisk - Sets whether elements can overflow to disk when the in-memory cache
has reached the maxInMemory limit.
-->
<!-- CACHES FOR TESTING -->
<!--
<cache name="org.dspace.caching.MemOnly"
maxElementsInMemory="10000"
eternal="false"
timeToIdleSeconds="600"
timeToLiveSeconds="1200"
overflowToDisk="false"
diskSpoolBufferSizeMB="0"
maxElementsOnDisk="0"
diskPersistent="false"
diskExpiryThreadIntervalSeconds="120"
memoryStoreEvictionPolicy="LRU">
</cache>
<cache name="org.dspace.caching.DiskOnly"
maxElementsInMemory="1"
eternal="false"
timeToIdleSeconds="600"
timeToLiveSeconds="1200"
overflowToDisk="true"
diskSpoolBufferSizeMB="30"
maxElementsOnDisk="10000"
diskPersistent="false"
diskExpiryThreadIntervalSeconds="120"
memoryStoreEvictionPolicy="LRU">
</cache>
<cache name="org.dspace.caching.Distributed"
maxElementsInMemory="10"
eternal="false"
timeToIdleSeconds="10"
timeToLiveSeconds="30"
overflowToDisk="false"
memoryStoreEvictionPolicy="LRU">
<cacheEventListenerFactory class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicateAsynchronously=true,
replicatePuts=true,
replicateUpdates=true,
replicateUpdatesViaCopy=false,
replicateRemovals=true "/>
</cache>
<cache name="org.dspace.caching.DistributedDisk"
maxElementsInMemory="5"
eternal="false"
timeToIdleSeconds="10"
timeToLiveSeconds="30"
overflowToDisk="true"
diskSpoolBufferSizeMB="30"
maxElementsOnDisk="10"
diskPersistent="false"
diskExpiryThreadIntervalSeconds="120"
memoryStoreEvictionPolicy="LRU">
<cacheEventListenerFactory class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicateAsynchronously=true,
replicatePuts=true,
replicateUpdates=true,
replicateUpdatesViaCopy=false,
replicateRemovals=true "/>
</cache>
-->
<!-- Place configuration for your caches following -->
<!-- this cache tracks the timestamps of the most recent updates to particular tables.
It is important that the cache timeout of the underlying cache implementation be set to a
higher value than the timeouts of any of the query caches. In fact, it is recommended that
the the underlying cache not be configured for expiry at all. -->
<cache name="org.hibernate.cache.spi.UpdateTimestampsCache"
maxElementsInMemory="6000" eternal="true" overflowToDisk="false" />
<!-- this cache stores the actual objects pulled out of the DB by hibernate -->
<cache name="org.hibernate.cache.internal.StandardQueryCache"
maxElementsInMemory="2000" eternal="false" timeToIdleSeconds="1800"
timeToLiveSeconds="600" overflowToDisk="false" diskExpiryThreadIntervalSeconds="60"
memoryStoreEvictionPolicy="LRU"/>
<!-- DSpace classes in the second level cache -->
<!-- We only have 1 site object, so it is best to cache it -->
<cache name="org.dspace.content.Site"
maxElementsInMemory="1" eternal="false" timeToIdleSeconds="86400"
timeToLiveSeconds="86400" overflowToDisk="false"
memoryStoreEvictionPolicy="LRU"/>
<!-- The number of metadata schemas is limited and not updated frequently, so if we cache them
the likelihood of a cache hit is very high -->
<cache name="org.dspace.content.MetadataSchema"
maxElementsInMemory="100" eternal="false" timeToIdleSeconds="3600"
timeToLiveSeconds="3600" overflowToDisk="true" diskExpiryThreadIntervalSeconds="60"
memoryStoreEvictionPolicy="LRU"/>
<!-- The number of metadata fields is limited and not updated frequently, so if we cache them
the likelihood of a cache hit is very high -->
<cache name="org.dspace.content.MetadataField"
maxElementsInMemory="2000" eternal="false" timeToIdleSeconds="3600"
timeToLiveSeconds="3600" overflowToDisk="true" diskExpiryThreadIntervalSeconds="60"
memoryStoreEvictionPolicy="LRU"/>
<!-- It is not a good idea to cache Item records. Most repositories have a large number of items
so the cache would have to be updated frequently. In addition there are many processes that
touch a lot of different items (discovery search, filter media, curation tasks...) which also makes
the cache less efficient. The probably of having a cache hit is thus very low and that is why Items
should not be cached. The same reasoning applies to Metadata values, Bundles, Bitstreams and Handles. -->
<!-- The number of groups in a repository can be very big, but only a small percentage of them is used
very frequently. So it makes sense to cache Group records because the cache hit rate is likely to be high -->
<cache name="org.dspace.eperson.Group"
maxElementsInMemory="5000" eternal="false" timeToIdleSeconds="1800"
timeToLiveSeconds="3600" overflowToDisk="false"
memoryStoreEvictionPolicy="LRU"/>
<!-- Like items, there are too many different Resource policy records for the cache to work efficiently.
In addition, resource policies are the core security mechanism in DSpace so want need to be 100% we
do not receive a stale policy when querying them. -->
<!-- The total number of epersons in DSpace can be very large, but the number of concurrent authenticated users is mostly
limited. Therefor having the authenticated users data cached will increase performance as the cache hit rate will
be high. -->
<cache name="org.dspace.eperson.EPerson"
maxElementsInMemory="1000" eternal="false" timeToIdleSeconds="1800"
timeToLiveSeconds="1800" overflowToDisk="false"
memoryStoreEvictionPolicy="LRU"/>
<!-- The number of collections is mostly a fixed set in a repository which is not updated frequently. This means that
most queries for a collection will be able to use the cached version. So adding caching here makes sense. -->
<cache name="org.dspace.content.Collection"
maxElementsInMemory="4000" eternal="false" timeToIdleSeconds="1800"
timeToLiveSeconds="1800" overflowToDisk="true" diskExpiryThreadIntervalSeconds="60"
memoryStoreEvictionPolicy="LRU"/>
<!-- Like collections, the same applies to communities. So we also setup a cache for communities. -->
<cache name="org.dspace.content.Community"
maxElementsInMemory="2000" eternal="false" timeToIdleSeconds="1800"
timeToLiveSeconds="1800" overflowToDisk="true" diskExpiryThreadIntervalSeconds="60"
memoryStoreEvictionPolicy="LRU"/>
<!-- DISTRIBUTED CACHING
Add cacheEventListenerFactory to the cache
==========================================
<cacheEventListenerFactory class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicateAsynchronously=true,
replicatePuts=true,
replicateUpdates=true,
replicateUpdatesViaCopy=false,
replicateRemovals=true "/>
For Example:
============
<cache name="org.dspace.authz.api.SecurityService.cache"
maxElementsInMemory="20000"
eternal="false"
timeToIdleSeconds="1800"
timeToLiveSeconds="2400"
overflowToDisk="true"
diskSpoolBufferSizeMB="30"
maxElementsOnDisk="100000"
diskPersistent="true"
diskExpiryThreadIntervalSeconds="120">
<cacheEventListenerFactory class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicateAsynchronously=true,
replicatePuts=true,
replicateUpdates=true,
replicateUpdatesViaCopy=false,
replicateRemovals=true "/>
</cache>
CacheManagerPeerProvider
========================
(Enable for distributed operation)
Specifies a CacheManagerPeerProviderFactory which will be used to create a
CacheManagerPeerProvider, which discovers other CacheManagers in the cluster.
The attributes of cacheManagerPeerProviderFactory are:
* class - a fully qualified factory class name
* properties - comma separated properties having meaning only to the factory.
Ehcache comes with a built-in RMI-based distribution system with two means of discovery of
CacheManager peers participating in the cluster:
* automatic, using a multicast group. This one automatically discovers peers and detects
changes such as peers entering and leaving the group
* manual, using manual rmiURL configuration. A hardcoded list of peers is provided at
configuration time.
Configuring Automatic Discovery:
Automatic discovery is configured as per the following example:
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=automatic, multicastGroupAddress=230.0.0.1,
multicastGroupPort=4446, timeToLive=32"/>
Valid properties are:
* peerDiscovery (mandatory) - specify "automatic"
* multicastGroupAddress (mandatory) - specify a valid multicast group address
* multicastGroupPort (mandatory) - specify a dedicated port for the multicast heartbeat
traffic
* timeToLive - specify a value between 0 and 255 which determines how far the packets will propagate.
By convention, the restrictions are:
0 - the same host
1 - the same subnet
32 - the same site
64 - the same region
128 - the same continent
255 - unrestricted
Configuring Manual Discovery:
Manual discovery is configured as per the following example:
<cacheManagerPeerProviderFactory class=
"net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=manual,
rmiUrls=//server1:40000/sampleCache1|//server2:40000/sampleCache1
| //server1:40000/sampleCache2|//server2:40000/sampleCache2"
propertySeparator="," />
Valid properties are:
* peerDiscovery (mandatory) - specify "manual"
* rmiUrls (mandatory) - specify a pipe separated list of rmiUrls, in the form
//hostname:port
The hostname is the hostname of the remote CacheManager peer. The port is the listening
port of the RMICacheManagerPeerListener of the remote CacheManager peer.
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=automatic,
multicastGroupAddress=230.0.0.1,
multicastGroupPort=4446, timeToLive=1"
propertySeparator=","
/>
-->
<!--
CacheManagerPeerListener
========================
(Enable for distributed operation)
Specifies a CacheManagerPeerListenerFactory which will be used to create a
CacheManagerPeerListener, which
listens for messages from cache replicators participating in the cluster.
The attributes of cacheManagerPeerListenerFactory are:
class - a fully qualified factory class name
properties - comma separated properties having meaning only to the factory.
Ehcache comes with a built-in RMI-based distribution system. The listener component is
RMICacheManagerPeerListener which is configured using
RMICacheManagerPeerListenerFactory. It is configured as per the following example:
<cacheManagerPeerListenerFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"
properties="hostName=fully_qualified_hostname_or_ip,
port=40001,
socketTimeoutMillis=120000"
propertySeparator="," />
All properties are optional. They are:
* hostName - the hostName of the host the listener is running on. Specify
where the host is multihomed and you want to control the interface over which cluster
messages are received. Defaults to the host name of the default interface if not
specified.
* port - the port the listener listens on. This defaults to a free port if not specified.
* socketTimeoutMillis - the number of ms client sockets will stay open when sending
messages to the listener. This should be long enough for the slowest message.
If not specified it defaults 120000ms.
* timeToLive - You can control how far the multicast packets propagate by setting the
badly misnamed time to live. Using the multicast IP protocol, the timeToLive value
indicates the scope or range in which a packet may be forwarded. By convention:
0 is restricted to the same host
1 is restricted to the same subnet
32 is restricted to the same site
64 is restricted to the same region
128 is restricted to the same continent
255 is unrestricted
<cacheManagerPeerListenerFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"/>
-->
<!-- Sample Distributed cache settings -->
<!-- Option A: automatic discovery using tcp multicast
For the multicast version, the IP address is no longer that of the server.
Rather, you need to choose the address and port in a designated range that has been reserved for multicasting.
It is as if the server and all potential clients are agreeing that messages will be left in a particular location.
The available addresses for multicasting are in the range 224.0.0.0 through 239.255.255.255.
You can check for the assigned addresses in this range (http://www.iana.org/assignments/multicast-addresses).
Recommended default is to use the IP address 230.0.0.1 and the port 4446.
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=automatic,
multicastGroupAddress=230.0.0.1,
multicastGroupPort=4446,
timeToLive=1" />
-->
<!-- Option B: manual discovery (you must customize this to include every node in the cluster) -->
<!--
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=manual,
rmiUrls=//server2:40001/sampleCache11|//server2:40001/sampleCache12" />
-->
<!--
A CacheManagerPeerListener listens for messages from peers to the current CacheManager.
You configure the CacheManagerPeerListener by specifiying a CacheManagerPeerListenerFactory which is used to create
the CacheManagerPeerListener using the plugin mechanism.
The attributes of cacheManagerPeerListenerFactory are:
* class - a fully qualified factory class name
* properties - comma separated properties having meaning only to the factory.
Ehcache comes with a built-in RMI-based distribution system. The listener component is RMICacheManagerPeerListener
which is configured using RMICacheManagerPeerListenerFactory. It is configured as per the following example:
<cacheManagerPeerListenerFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"
properties="hostName=localhost, port=40001, socketTimeoutMillis=2000" />
Valid properties are:
* hostName (optional) - the hostName of the host the listener is running on. Specify where the host is
multihomed and you want to control the interface over which cluster messages are received.
The hostname is checked for reachability during CacheManager initialisation.
If the hostName is unreachable, the CacheManager will refuse to start and an CacheException will be thrown
indicating connection was refused.
If unspecified, the hostname will use InetAddress.getLocalHost().getHostAddress(), which corresponds to the
default host network interface.
Warning: Explicitly setting this to localhost refers to the local loopback of 127.0.0.1, which is not network
visible and will cause no replications to be received from remote hosts. You should only use this setting when
multiple CacheManagers are on the same machine.
NOTE: this is not obvious, but if you run a netstat -an | grep LISTEN you will see that the configuration shown below:
properties="hostName=127.0.0.1, port=40001"
will actually cause a bind to happen to *:40001 which is what we want and will allow the servers to start
even if there is no netowkr connection. On the other hand, if you use localhost there will be a resolution
failure on startup. This is probably what you should use regardless of the setup of the peer provider factory. -AZ
* port (mandatory) - the port the listener listens on.
* socketTimeoutMillis (optional) - the number of seconds client sockets will wait when sending messages to this
listener until they give up. By default this is 2000ms.
-->
<!-- For Distributed Caching in a cluster, this will listen for the messages from peers
<cacheManagerPeerListenerFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"
properties="hostName=127.0.0.1, port=40001" />
-->
</ehcache>

View File

@@ -18,14 +18,14 @@
<!--Debug property that can be used to display the sql--> <!--Debug property that can be used to display the sql-->
<property name="show_sql">false</property> <property name="show_sql">false</property>
<!--Second level cache configuration--> <!--Second level cache configuration-->
<property name="hibernate.cache.use_second_level_cache">true</property>
<property name="hibernate.cache.use_query_cache">true</property> <property name="hibernate.cache.use_query_cache">true</property>
<property name="hibernate.cache.region.factory_class"> <property name="hibernate.cache.use_second_level_cache">true</property>
org.hibernate.cache.ehcache.SingletonEhCacheRegionFactory <property name="hibernate.cache.region.factory_class">org.hibernate.cache.ehcache.SingletonEhCacheRegionFactory</property>
</property> <property name="hibernate.cache.use_structured_entries">true</property>
<property name="javax.persistence.sharedCache.mode">ENABLE_SELECTIVE</property> <property name="javax.persistence.sharedCache.mode">ENABLE_SELECTIVE</property>
<!-- Set in config/spring/api/core-hibernate.xml -->
<!--<property name="net.sf.ehcache.configurationResourceName">file:${dspace.dir}/config/hibernate-ehcache-config.xml</property>-->
<!-- Entities to be loaded by hibernate --> <!-- Entities to be loaded by hibernate -->

View File

@@ -21,6 +21,17 @@
</property> </property>
</bean> </bean>
<bean class="org.springframework.beans.factory.config.MethodInvokingFactoryBean">
<property name="targetClass" value="java.lang.System" />
<property name="targetMethod" value="setProperty" />
<property name="arguments">
<list>
<value>net.sf.ehcache.configurationResourceName</value>
<value>file:${dspace.dir}/config/hibernate-ehcache-config.xml</value>
</list>
</property>
</bean>
<bean id='dataSource' <bean id='dataSource'
class='org.springframework.jndi.JndiObjectFactoryBean'> class='org.springframework.jndi.JndiObjectFactoryBean'>
<description> <description>