Merge branch 'DSpace:main' into synced

This commit is contained in:
Ma-Tador
2023-05-03 08:34:27 +02:00
committed by GitHub
65 changed files with 1842 additions and 331 deletions

View File

@@ -6,6 +6,5 @@ dspace/modules/*/target/
Dockerfile.* Dockerfile.*
dspace/src/main/docker/dspace-postgres-pgcrypto dspace/src/main/docker/dspace-postgres-pgcrypto
dspace/src/main/docker/dspace-postgres-pgcrypto-curl dspace/src/main/docker/dspace-postgres-pgcrypto-curl
dspace/src/main/docker/solr
dspace/src/main/docker/README.md dspace/src/main/docker/README.md
dspace/src/main/docker-compose/ dspace/src/main/docker-compose/

View File

@@ -79,6 +79,39 @@ jobs:
name: ${{ matrix.type }} results name: ${{ matrix.type }} results
path: ${{ matrix.resultsdir }} path: ${{ matrix.resultsdir }}
# https://github.com/codecov/codecov-action # Upload code coverage report to artifact, so that it can be shared with the 'codecov' job (see below)
- name: Upload code coverage report to Artifact
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.type }} coverage report
path: 'dspace/target/site/jacoco-aggregate/jacoco.xml'
retention-days: 14
# Codecov upload is a separate job in order to allow us to restart this separate from the entire build/test
# job above. This is necessary because Codecov uploads seem to randomly fail at times.
# See https://community.codecov.com/t/upload-issues-unable-to-locate-build-via-github-actions-api/3954
codecov:
# Must run after 'tests' job above
needs: tests
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
# Download artifacts from previous 'tests' job
- name: Download coverage artifacts
uses: actions/download-artifact@v3
# Now attempt upload to Codecov using its action.
# NOTE: We use a retry action to retry the Codecov upload if it fails the first time.
#
# Retry action: https://github.com/marketplace/actions/retry-action
# Codecov action: https://github.com/codecov/codecov-action
- name: Upload coverage to Codecov.io - name: Upload coverage to Codecov.io
uses: codecov/codecov-action@v3 uses: Wandalen/wretry.action@v1.0.36
with:
action: codecov/codecov-action@v3
# Try upload 5 times max
attempt_limit: 5
# Run again in 30 seconds
attempt_delay: 30000

View File

@@ -170,3 +170,86 @@ jobs:
# Use tags / labels provided by 'docker/metadata-action' above # Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_cli.outputs.tags }} tags: ${{ steps.meta_build_cli.outputs.tags }}
labels: ${{ steps.meta_build_cli.outputs.labels }} labels: ${{ steps.meta_build_cli.outputs.labels }}
###########################################
# Build/Push the 'dspace/dspace-solr' image
###########################################
# Get Metadata for docker_build_solr step below
- name: Sync metadata (tags, labels) from GitHub to Docker for 'dspace-solr' image
id: meta_build_solr
uses: docker/metadata-action@v4
with:
images: dspace/dspace-solr
tags: ${{ env.IMAGE_TAGS }}
flavor: ${{ env.TAGS_FLAVOR }}
- name: Build and push 'dspace-solr' image
id: docker_build_solr
uses: docker/build-push-action@v3
with:
context: .
file: ./dspace/src/main/docker/dspace-solr/Dockerfile
platforms: ${{ env.PLATFORMS }}
# For pull requests, we run the Docker build (to ensure no PR changes break the build),
# but we ONLY do an image push to DockerHub if it's NOT a PR
push: ${{ github.event_name != 'pull_request' }}
# Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_solr.outputs.tags }}
labels: ${{ steps.meta_build_solr.outputs.labels }}
###########################################################
# Build/Push the 'dspace/dspace-postgres-pgcrypto' image
###########################################################
# Get Metadata for docker_build_postgres step below
- name: Sync metadata (tags, labels) from GitHub to Docker for 'dspace-postgres-pgcrypto' image
id: meta_build_postgres
uses: docker/metadata-action@v4
with:
images: dspace/dspace-postgres-pgcrypto
tags: ${{ env.IMAGE_TAGS }}
flavor: ${{ env.TAGS_FLAVOR }}
- name: Build and push 'dspace-postgres-pgcrypto' image
id: docker_build_postgres
uses: docker/build-push-action@v3
with:
# Must build out of subdirectory to have access to install script for pgcrypto
context: ./dspace/src/main/docker/dspace-postgres-pgcrypto/
dockerfile: Dockerfile
platforms: ${{ env.PLATFORMS }}
# For pull requests, we run the Docker build (to ensure no PR changes break the build),
# but we ONLY do an image push to DockerHub if it's NOT a PR
push: ${{ github.event_name != 'pull_request' }}
# Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_postgres.outputs.tags }}
labels: ${{ steps.meta_build_postgres.outputs.labels }}
###########################################################
# Build/Push the 'dspace/dspace-postgres-pgcrypto' image ('-loadsql' tag)
###########################################################
# Get Metadata for docker_build_postgres_loadsql step below
- name: Sync metadata (tags, labels) from GitHub to Docker for 'dspace-postgres-pgcrypto-loadsql' image
id: meta_build_postgres_loadsql
uses: docker/metadata-action@v4
with:
images: dspace/dspace-postgres-pgcrypto
tags: ${{ env.IMAGE_TAGS }}
# Suffix all tags with "-loadsql". Otherwise, it uses the same
# tagging logic as the primary 'dspace/dspace-postgres-pgcrypto' image above.
flavor: ${{ env.TAGS_FLAVOR }}
suffix=-loadsql
- name: Build and push 'dspace-postgres-pgcrypto-loadsql' image
id: docker_build_postgres_loadsql
uses: docker/build-push-action@v3
with:
# Must build out of subdirectory to have access to install script for pgcrypto
context: ./dspace/src/main/docker/dspace-postgres-pgcrypto-curl/
dockerfile: Dockerfile
platforms: ${{ env.PLATFORMS }}
# For pull requests, we run the Docker build (to ensure no PR changes break the build),
# but we ONLY do an image push to DockerHub if it's NOT a PR
push: ${{ github.event_name != 'pull_request' }}
# Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_postgres_loadsql.outputs.tags }}
labels: ${{ steps.meta_build_postgres_loadsql.outputs.labels }}

View File

@@ -16,7 +16,7 @@ jobs:
# Only add to project board if issue is flagged as "needs triage" or has no labels # Only add to project board if issue is flagged as "needs triage" or has no labels
# NOTE: By default we flag new issues as "needs triage" in our issue template # NOTE: By default we flag new issues as "needs triage" in our issue template
if: (contains(github.event.issue.labels.*.name, 'needs triage') || join(github.event.issue.labels.*.name) == '') if: (contains(github.event.issue.labels.*.name, 'needs triage') || join(github.event.issue.labels.*.name) == '')
uses: actions/add-to-project@v0.3.0 uses: actions/add-to-project@v0.5.0
# Note, the authentication token below is an ORG level Secret. # Note, the authentication token below is an ORG level Secret.
# It must be created/recreated manually via a personal access token with admin:org, project, public_repo permissions # It must be created/recreated manually via a personal access token with admin:org, project, public_repo permissions
# See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token#permissions-for-the-github_token # See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token#permissions-for-the-github_token

View File

@@ -23,7 +23,7 @@ jobs:
steps: steps:
# See: https://github.com/prince-chrismc/label-merge-conflicts-action # See: https://github.com/prince-chrismc/label-merge-conflicts-action
- name: Auto-label PRs with merge conflicts - name: Auto-label PRs with merge conflicts
uses: prince-chrismc/label-merge-conflicts-action@v2 uses: prince-chrismc/label-merge-conflicts-action@v3
# Add "merge conflict" label if a merge conflict is detected. Remove it when resolved. # Add "merge conflict" label if a merge conflict is detected. Remove it when resolved.
# Note, the authentication token is created automatically # Note, the authentication token is created automatically
# See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token # See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token

View File

@@ -31,7 +31,7 @@ ARG TARGET_DIR=dspace-installer
COPY --from=build /install /dspace-src COPY --from=build /install /dspace-src
WORKDIR /dspace-src WORKDIR /dspace-src
# Create the initial install deployment using ANT # Create the initial install deployment using ANT
ENV ANT_VERSION 1.10.12 ENV ANT_VERSION 1.10.13
ENV ANT_HOME /tmp/ant-$ANT_VERSION ENV ANT_HOME /tmp/ant-$ANT_VERSION
ENV PATH $ANT_HOME/bin:$PATH ENV PATH $ANT_HOME/bin:$PATH
# Need wget to install ant # Need wget to install ant

View File

@@ -30,7 +30,7 @@ ARG TARGET_DIR=dspace-installer
COPY --from=build /install /dspace-src COPY --from=build /install /dspace-src
WORKDIR /dspace-src WORKDIR /dspace-src
# Create the initial install deployment using ANT # Create the initial install deployment using ANT
ENV ANT_VERSION 1.10.12 ENV ANT_VERSION 1.10.13
ENV ANT_HOME /tmp/ant-$ANT_VERSION ENV ANT_HOME /tmp/ant-$ANT_VERSION
ENV PATH $ANT_HOME/bin:$PATH ENV PATH $ANT_HOME/bin:$PATH
# Need wget to install ant # Need wget to install ant

View File

@@ -62,13 +62,17 @@ services:
while (!</dev/tcp/dspacedb/5432) > /dev/null 2>&1; do sleep 1; done; while (!</dev/tcp/dspacedb/5432) > /dev/null 2>&1; do sleep 1; done;
/dspace/bin/dspace database migrate /dspace/bin/dspace database migrate
catalina.sh run catalina.sh run
# DSpace database container # DSpace PostgreSQL database container
dspacedb: dspacedb:
container_name: dspacedb container_name: dspacedb
# Uses a custom Postgres image with pgcrypto installed
image: "${DOCKER_OWNER:-dspace}/dspace-postgres-pgcrypto:${DSPACE_VER:-dspace-7_x}"
build:
# Must build out of subdirectory to have access to install script for pgcrypto
context: ./dspace/src/main/docker/dspace-postgres-pgcrypto/
environment: environment:
PGDATA: /pgdata PGDATA: /pgdata
# Uses a custom Postgres image with pgcrypto installed POSTGRES_PASSWORD: dspace
image: dspace/dspace-postgres-pgcrypto
networks: networks:
dspacenet: dspacenet:
ports: ports:
@@ -77,12 +81,17 @@ services:
stdin_open: true stdin_open: true
tty: true tty: true
volumes: volumes:
# Keep Postgres data directory between reboots
- pgdata:/pgdata - pgdata:/pgdata
# DSpace Solr container # DSpace Solr container
dspacesolr: dspacesolr:
container_name: dspacesolr container_name: dspacesolr
# Uses official Solr image at https://hub.docker.com/_/solr/ image: "${DOCKER_OWNER:-dspace}/dspace-solr:${DSPACE_VER:-dspace-7_x}"
image: solr:8.11-slim build:
context: .
dockerfile: ./dspace/src/main/docker/dspace-solr/Dockerfile
args:
SOLR_VERSION: "${SOLR_VER:-8.11}"
networks: networks:
dspacenet: dspacenet:
ports: ports:
@@ -92,30 +101,25 @@ services:
tty: true tty: true
working_dir: /var/solr/data working_dir: /var/solr/data
volumes: volumes:
# Mount our local Solr core configs so that they are available as Solr configsets on container
- ./dspace/solr/authority:/opt/solr/server/solr/configsets/authority
- ./dspace/solr/oai:/opt/solr/server/solr/configsets/oai
- ./dspace/solr/search:/opt/solr/server/solr/configsets/search
- ./dspace/solr/statistics:/opt/solr/server/solr/configsets/statistics
# Keep Solr data directory between reboots # Keep Solr data directory between reboots
- solr_data:/var/solr/data - solr_data:/var/solr/data
# Initialize all DSpace Solr cores using the mounted local configsets (see above), then start Solr # Initialize all DSpace Solr cores then start Solr:
# * First, run precreate-core to create the core (if it doesn't yet exist). If exists already, this is a no-op # * First, run precreate-core to create the core (if it doesn't yet exist). If exists already, this is a no-op
# * Second, copy updated configs from mounted configsets to this core. If it already existed, this updates core # * Second, copy configsets to this core:
# to the latest configs. If it's a newly created core, this is a no-op. # Updates to Solr configs require the container to be rebuilt/restarted: `docker compose -p d7 up -d --build dspacesolr`
entrypoint: entrypoint:
- /bin/bash - /bin/bash
- '-c' - '-c'
- | - |
init-var-solr init-var-solr
precreate-core authority /opt/solr/server/solr/configsets/authority precreate-core authority /opt/solr/server/solr/configsets/authority
cp -r -u /opt/solr/server/solr/configsets/authority/* authority cp -r /opt/solr/server/solr/configsets/authority/* authority
precreate-core oai /opt/solr/server/solr/configsets/oai precreate-core oai /opt/solr/server/solr/configsets/oai
cp -r -u /opt/solr/server/solr/configsets/oai/* oai cp -r /opt/solr/server/solr/configsets/oai/* oai
precreate-core search /opt/solr/server/solr/configsets/search precreate-core search /opt/solr/server/solr/configsets/search
cp -r -u /opt/solr/server/solr/configsets/search/* search cp -r /opt/solr/server/solr/configsets/search/* search
precreate-core statistics /opt/solr/server/solr/configsets/statistics precreate-core statistics /opt/solr/server/solr/configsets/statistics
cp -r -u /opt/solr/server/solr/configsets/statistics/* statistics cp -r /opt/solr/server/solr/configsets/statistics/* statistics
exec solr -f exec solr -f
volumes: volumes:
assetstore: assetstore:

View File

@@ -776,7 +776,7 @@
<dependency> <dependency>
<groupId>org.json</groupId> <groupId>org.json</groupId>
<artifactId>json</artifactId> <artifactId>json</artifactId>
<version>20180130</version> <version>20230227</version>
</dependency> </dependency>
<!-- Useful for testing command-line tools --> <!-- Useful for testing command-line tools -->

View File

@@ -51,6 +51,7 @@ import org.dspace.content.service.CollectionService;
import org.dspace.content.service.CommunityService; import org.dspace.content.service.CommunityService;
import org.dspace.content.service.ItemService; import org.dspace.content.service.ItemService;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.core.I18nUtil;
import org.dspace.discovery.IndexableObject; import org.dspace.discovery.IndexableObject;
import org.dspace.discovery.indexobject.IndexableCollection; import org.dspace.discovery.indexobject.IndexableCollection;
import org.dspace.discovery.indexobject.IndexableCommunity; import org.dspace.discovery.indexobject.IndexableCommunity;
@@ -91,6 +92,7 @@ public class SyndicationFeed {
// default DC fields for entry // default DC fields for entry
protected String defaultTitleField = "dc.title"; protected String defaultTitleField = "dc.title";
protected String defaultDescriptionField = "dc.description";
protected String defaultAuthorField = "dc.contributor.author"; protected String defaultAuthorField = "dc.contributor.author";
protected String defaultDateField = "dc.date.issued"; protected String defaultDateField = "dc.date.issued";
private static final String[] defaultDescriptionFields = private static final String[] defaultDescriptionFields =
@@ -196,15 +198,15 @@ public class SyndicationFeed {
// dso is null for the whole site, or a search without scope // dso is null for the whole site, or a search without scope
if (dso == null) { if (dso == null) {
defaultTitle = configurationService.getProperty("dspace.name"); defaultTitle = configurationService.getProperty("dspace.name");
feed.setDescription(localize(labels, MSG_FEED_DESCRIPTION)); defaultDescriptionField = localize(labels, MSG_FEED_DESCRIPTION);
objectURL = resolveURL(request, null); objectURL = resolveURL(request, null);
} else { } else {
Bitstream logo = null; Bitstream logo = null;
if (dso instanceof IndexableCollection) { if (dso instanceof IndexableCollection) {
Collection col = ((IndexableCollection) dso).getIndexedObject(); Collection col = ((IndexableCollection) dso).getIndexedObject();
defaultTitle = col.getName(); defaultTitle = col.getName();
feed.setDescription(collectionService.getMetadataFirstValue(col, defaultDescriptionField = collectionService.getMetadataFirstValue(col,
CollectionService.MD_SHORT_DESCRIPTION, Item.ANY)); CollectionService.MD_SHORT_DESCRIPTION, Item.ANY);
logo = col.getLogo(); logo = col.getLogo();
String cols = configurationService.getProperty("webui.feed.podcast.collections"); String cols = configurationService.getProperty("webui.feed.podcast.collections");
if (cols != null && cols.length() > 1 && cols.contains(col.getHandle())) { if (cols != null && cols.length() > 1 && cols.contains(col.getHandle())) {
@@ -214,8 +216,8 @@ public class SyndicationFeed {
} else if (dso instanceof IndexableCommunity) { } else if (dso instanceof IndexableCommunity) {
Community comm = ((IndexableCommunity) dso).getIndexedObject(); Community comm = ((IndexableCommunity) dso).getIndexedObject();
defaultTitle = comm.getName(); defaultTitle = comm.getName();
feed.setDescription(communityService.getMetadataFirstValue(comm, defaultDescriptionField = communityService.getMetadataFirstValue(comm,
CommunityService.MD_SHORT_DESCRIPTION, Item.ANY)); CommunityService.MD_SHORT_DESCRIPTION, Item.ANY);
logo = comm.getLogo(); logo = comm.getLogo();
String comms = configurationService.getProperty("webui.feed.podcast.communities"); String comms = configurationService.getProperty("webui.feed.podcast.communities");
if (comms != null && comms.length() > 1 && comms.contains(comm.getHandle())) { if (comms != null && comms.length() > 1 && comms.contains(comm.getHandle())) {
@@ -230,6 +232,12 @@ public class SyndicationFeed {
} }
feed.setTitle(labels.containsKey(MSG_FEED_TITLE) ? feed.setTitle(labels.containsKey(MSG_FEED_TITLE) ?
localize(labels, MSG_FEED_TITLE) : defaultTitle); localize(labels, MSG_FEED_TITLE) : defaultTitle);
if (defaultDescriptionField == null || defaultDescriptionField == "") {
defaultDescriptionField = I18nUtil.getMessage("org.dspace.app.util.SyndicationFeed.no-description");
}
feed.setDescription(defaultDescriptionField);
feed.setLink(objectURL); feed.setLink(objectURL);
feed.setPublishedDate(new Date()); feed.setPublishedDate(new Date());
feed.setUri(objectURL); feed.setUri(objectURL);

View File

@@ -52,11 +52,6 @@ public class IPAuthentication implements AuthenticationMethod {
*/ */
private static Logger log = org.apache.logging.log4j.LogManager.getLogger(IPAuthentication.class); private static Logger log = org.apache.logging.log4j.LogManager.getLogger(IPAuthentication.class);
/**
* Whether to look for x-forwarded headers for logging IP addresses
*/
protected static Boolean useProxies;
/** /**
* All the IP matchers * All the IP matchers
*/ */
@@ -250,7 +245,7 @@ public class IPAuthentication implements AuthenticationMethod {
log.debug(LogHelper.getHeader(context, "authenticated", log.debug(LogHelper.getHeader(context, "authenticated",
"special_groups=" + gsb.toString() "special_groups=" + gsb.toString()
+ " (by IP=" + addr + ", useProxies=" + useProxies.toString() + ")" + " (by IP=" + addr + ")"
)); ));
} }

View File

@@ -332,8 +332,8 @@ public class BitstreamServiceImpl extends DSpaceObjectServiceImpl<Bitstream> imp
} }
@Override @Override
public List<Bitstream> findDeletedBitstreams(Context context) throws SQLException { public List<Bitstream> findDeletedBitstreams(Context context, int limit, int offset) throws SQLException {
return bitstreamDAO.findDeletedBitstreams(context); return bitstreamDAO.findDeletedBitstreams(context, limit, offset);
} }
@Override @Override

View File

@@ -48,6 +48,12 @@ public abstract class DSpaceObject implements Serializable, ReloadableEntity<jav
@Transient @Transient
private StringBuffer eventDetails = null; private StringBuffer eventDetails = null;
/**
* The same order should be applied inside this comparator
* {@link MetadataValueComparators#defaultComparator} to preserve
* ordering while the list has been modified and not yet persisted
* and reloaded.
*/
@OneToMany(fetch = FetchType.LAZY, mappedBy = "dSpaceObject", cascade = CascadeType.ALL, orphanRemoval = true) @OneToMany(fetch = FetchType.LAZY, mappedBy = "dSpaceObject", cascade = CascadeType.ALL, orphanRemoval = true)
@OrderBy("metadataField, place") @OrderBy("metadataField, place")
private List<MetadataValue> metadata = new ArrayList<>(); private List<MetadataValue> metadata = new ArrayList<>();
@@ -116,7 +122,7 @@ public abstract class DSpaceObject implements Serializable, ReloadableEntity<jav
* @return summary of event details, or null if there are none. * @return summary of event details, or null if there are none.
*/ */
public String getDetails() { public String getDetails() {
return (eventDetails == null ? null : eventDetails.toString()); return eventDetails == null ? null : eventDetails.toString();
} }
/** /**
@@ -145,7 +151,7 @@ public abstract class DSpaceObject implements Serializable, ReloadableEntity<jav
* one * one
*/ */
public String getHandle() { public String getHandle() {
return (CollectionUtils.isNotEmpty(handles) ? handles.get(0).getHandle() : null); return CollectionUtils.isNotEmpty(handles) ? handles.get(0).getHandle() : null;
} }
void setHandle(List<Handle> handle) { void setHandle(List<Handle> handle) {

View File

@@ -126,6 +126,11 @@ public abstract class DSpaceObjectServiceImpl<T extends DSpaceObject> implements
} }
} }
// Sort the metadataValues if they have been modified,
// is used to preserve the default order.
if (dso.isMetadataModified()) {
values.sort(MetadataValueComparators.defaultComparator);
}
// Create an array of matching values // Create an array of matching values
return values; return values;
} }
@@ -542,7 +547,7 @@ public abstract class DSpaceObjectServiceImpl<T extends DSpaceObject> implements
int add = 4 - tokens.length; int add = 4 - tokens.length;
if (add > 0) { if (add > 0) {
tokens = (String[]) ArrayUtils.addAll(tokens, new String[add]); tokens = ArrayUtils.addAll(tokens, new String[add]);
} }
return tokens; return tokens;
@@ -603,21 +608,18 @@ public abstract class DSpaceObjectServiceImpl<T extends DSpaceObject> implements
//If two places are the same then the MetadataValue instance will be placed before the //If two places are the same then the MetadataValue instance will be placed before the
//RelationshipMetadataValue instance. //RelationshipMetadataValue instance.
//This is done to ensure that the order is correct. //This is done to ensure that the order is correct.
metadataValues.sort(new Comparator<MetadataValue>() { metadataValues.sort((o1, o2) -> {
@Override int compare = o1.getPlace() - o2.getPlace();
public int compare(MetadataValue o1, MetadataValue o2) { if (compare == 0) {
int compare = o1.getPlace() - o2.getPlace(); if (o1 instanceof RelationshipMetadataValue && o2 instanceof RelationshipMetadataValue) {
if (compare == 0) { return compare;
if (o1 instanceof RelationshipMetadataValue && o2 instanceof RelationshipMetadataValue) { } else if (o1 instanceof RelationshipMetadataValue) {
return compare; return 1;
} else if (o1 instanceof RelationshipMetadataValue) { } else if (o2 instanceof RelationshipMetadataValue) {
return 1; return -1;
} else if (o2 instanceof RelationshipMetadataValue) {
return -1;
}
} }
return compare;
} }
return compare;
}); });
for (MetadataValue metadataValue : metadataValues) { for (MetadataValue metadataValue : metadataValues) {
//Retrieve & store the place for each metadata value //Retrieve & store the place for each metadata value
@@ -634,7 +636,7 @@ public abstract class DSpaceObjectServiceImpl<T extends DSpaceObject> implements
String authority = metadataValue.getAuthority(); String authority = metadataValue.getAuthority();
String relationshipId = StringUtils.split(authority, "::")[1]; String relationshipId = StringUtils.split(authority, "::")[1];
Relationship relationship = relationshipService.find(context, Integer.parseInt(relationshipId)); Relationship relationship = relationshipService.find(context, Integer.parseInt(relationshipId));
if (relationship.getLeftItem().equals((Item) dso)) { if (relationship.getLeftItem().equals(dso)) {
relationship.setLeftPlace(mvPlace); relationship.setLeftPlace(mvPlace);
} else { } else {
relationship.setRightPlace(mvPlace); relationship.setRightPlace(mvPlace);

View File

@@ -12,7 +12,6 @@ import java.io.InputStream;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator;
import java.util.Date; import java.util.Date;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
@@ -288,9 +287,10 @@ public class ItemServiceImpl extends DSpaceObjectServiceImpl<Item> implements It
return itemDAO.findAll(context, true, true); return itemDAO.findAll(context, true, true);
} }
@Override
public Iterator<Item> findAllRegularItems(Context context) throws SQLException { public Iterator<Item> findAllRegularItems(Context context) throws SQLException {
return itemDAO.findAllRegularItems(context); return itemDAO.findAllRegularItems(context);
}; }
@Override @Override
public Iterator<Item> findBySubmitter(Context context, EPerson eperson) throws SQLException { public Iterator<Item> findBySubmitter(Context context, EPerson eperson) throws SQLException {
@@ -1054,7 +1054,7 @@ public class ItemServiceImpl extends DSpaceObjectServiceImpl<Item> implements It
List<Collection> linkedCollections = item.getCollections(); List<Collection> linkedCollections = item.getCollections();
List<Collection> notLinkedCollections = new ArrayList<>(allCollections.size() - linkedCollections.size()); List<Collection> notLinkedCollections = new ArrayList<>(allCollections.size() - linkedCollections.size());
if ((allCollections.size() - linkedCollections.size()) == 0) { if (allCollections.size() - linkedCollections.size() == 0) {
return notLinkedCollections; return notLinkedCollections;
} }
for (Collection collection : allCollections) { for (Collection collection : allCollections) {
@@ -1149,6 +1149,7 @@ public class ItemServiceImpl extends DSpaceObjectServiceImpl<Item> implements It
* @return <code>true</code> if the item is an inprogress submission, i.e. a WorkspaceItem or WorkflowItem * @return <code>true</code> if the item is an inprogress submission, i.e. a WorkspaceItem or WorkflowItem
* @throws SQLException An exception that provides information on a database access error or other errors. * @throws SQLException An exception that provides information on a database access error or other errors.
*/ */
@Override
public boolean isInProgressSubmission(Context context, Item item) throws SQLException { public boolean isInProgressSubmission(Context context, Item item) throws SQLException {
return workspaceItemService.findByItem(context, item) != null return workspaceItemService.findByItem(context, item) != null
|| workflowItemService.findByItem(context, item) != null; || workflowItemService.findByItem(context, item) != null;
@@ -1179,8 +1180,8 @@ prevent the generation of resource policy entry values with null dspace_object a
if (!authorizeService if (!authorizeService
.isAnIdenticalPolicyAlreadyInPlace(context, dso, defaultPolicy.getGroup(), Constants.READ, .isAnIdenticalPolicyAlreadyInPlace(context, dso, defaultPolicy.getGroup(), Constants.READ,
defaultPolicy.getID()) && defaultPolicy.getID()) &&
((!appendMode && this.isNotAlreadyACustomRPOfThisTypeOnDSO(context, dso)) || (!appendMode && this.isNotAlreadyACustomRPOfThisTypeOnDSO(context, dso) ||
(appendMode && this.shouldBeAppended(context, dso, defaultPolicy)))) { appendMode && this.shouldBeAppended(context, dso, defaultPolicy))) {
ResourcePolicy newPolicy = resourcePolicyService.clone(context, defaultPolicy); ResourcePolicy newPolicy = resourcePolicyService.clone(context, defaultPolicy);
newPolicy.setdSpaceObject(dso); newPolicy.setdSpaceObject(dso);
newPolicy.setAction(Constants.READ); newPolicy.setAction(Constants.READ);
@@ -1611,7 +1612,7 @@ prevent the generation of resource policy entry values with null dspace_object a
fullMetadataValueList.addAll(relationshipMetadataService.getRelationshipMetadata(item, true)); fullMetadataValueList.addAll(relationshipMetadataService.getRelationshipMetadata(item, true));
fullMetadataValueList.addAll(dbMetadataValues); fullMetadataValueList.addAll(dbMetadataValues);
item.setCachedMetadata(sortMetadataValueList(fullMetadataValueList)); item.setCachedMetadata(MetadataValueComparators.sort(fullMetadataValueList));
} }
log.debug("Called getMetadata for " + item.getID() + " based on cache"); log.debug("Called getMetadata for " + item.getID() + " based on cache");
@@ -1653,28 +1654,6 @@ prevent the generation of resource policy entry values with null dspace_object a
} }
} }
/**
* This method will sort the List of MetadataValue objects based on the MetadataSchema, MetadataField Element,
* MetadataField Qualifier and MetadataField Place in that order.
* @param listToReturn The list to be sorted
* @return The list sorted on those criteria
*/
private List<MetadataValue> sortMetadataValueList(List<MetadataValue> listToReturn) {
Comparator<MetadataValue> comparator = Comparator.comparing(
metadataValue -> metadataValue.getMetadataField().getMetadataSchema().getName(),
Comparator.nullsFirst(Comparator.naturalOrder()));
comparator = comparator.thenComparing(metadataValue -> metadataValue.getMetadataField().getElement(),
Comparator.nullsFirst(Comparator.naturalOrder()));
comparator = comparator.thenComparing(metadataValue -> metadataValue.getMetadataField().getQualifier(),
Comparator.nullsFirst(Comparator.naturalOrder()));
comparator = comparator.thenComparing(metadataValue -> metadataValue.getPlace(),
Comparator.nullsFirst(Comparator.naturalOrder()));
Stream<MetadataValue> metadataValueStream = listToReturn.stream().sorted(comparator);
listToReturn = metadataValueStream.collect(Collectors.toList());
return listToReturn;
}
@Override @Override
public MetadataValue addMetadata(Context context, Item dso, String schema, String element, String qualifier, public MetadataValue addMetadata(Context context, Item dso, String schema, String element, String qualifier,
String lang, String value, String authority, int confidence, int place) throws SQLException { String lang, String value, String authority, int confidence, int place) throws SQLException {

View File

@@ -19,6 +19,7 @@ import javax.persistence.Lob;
import javax.persistence.ManyToOne; import javax.persistence.ManyToOne;
import javax.persistence.SequenceGenerator; import javax.persistence.SequenceGenerator;
import javax.persistence.Table; import javax.persistence.Table;
import javax.persistence.Transient;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.core.ReloadableEntity; import org.dspace.core.ReloadableEntity;
@@ -171,6 +172,14 @@ public class MetadataValue implements ReloadableEntity<Integer> {
this.metadataField = metadataField; this.metadataField = metadataField;
} }
/**
* @return {@code MetadataField#getID()}
*/
@Transient
protected Integer getMetadataFieldId() {
return getMetadataField().getID();
}
/** /**
* Get the metadata value. * Get the metadata value.
* *

View File

@@ -0,0 +1,51 @@
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Collectors;
/**
* This class contains only static members that can be used
* to sort list of {@link MetadataValue}
*
* @author Vincenzo Mecca (vins01-4science - vincenzo.mecca at 4science.com)
*
*/
public final class MetadataValueComparators {
private MetadataValueComparators() {}
/**
* This is the default comparator that mimics the ordering
* applied by the standard {@code @OrderBy} annotation inside
* {@link DSpaceObject#getMetadata()}
*/
public static final Comparator<MetadataValue> defaultComparator =
Comparator.comparing(MetadataValue::getMetadataFieldId)
.thenComparing(
MetadataValue::getPlace,
Comparator.nullsFirst(Comparator.naturalOrder())
);
/**
* This method creates a new {@code List<MetadataValue>} ordered by the
* {@code MetadataComparators#defaultComparator}.
*
* @param metadataValues
* @return {@code List<MetadataValue>} ordered copy list using stream.
*/
public static final List<MetadataValue> sort(List<MetadataValue> metadataValues) {
return metadataValues
.stream()
.sorted(MetadataValueComparators.defaultComparator)
.collect(Collectors.toList());
}
}

View File

@@ -29,7 +29,7 @@ public interface BitstreamDAO extends DSpaceObjectLegacySupportDAO<Bitstream> {
public Iterator<Bitstream> findAll(Context context, int limit, int offset) throws SQLException; public Iterator<Bitstream> findAll(Context context, int limit, int offset) throws SQLException;
public List<Bitstream> findDeletedBitstreams(Context context) throws SQLException; public List<Bitstream> findDeletedBitstreams(Context context, int limit, int offset) throws SQLException;
public List<Bitstream> findDuplicateInternalIdentifier(Context context, Bitstream bitstream) throws SQLException; public List<Bitstream> findDuplicateInternalIdentifier(Context context, Bitstream bitstream) throws SQLException;

View File

@@ -41,13 +41,14 @@ public class BitstreamDAOImpl extends AbstractHibernateDSODAO<Bitstream> impleme
} }
@Override @Override
public List<Bitstream> findDeletedBitstreams(Context context) throws SQLException { public List<Bitstream> findDeletedBitstreams(Context context, int limit, int offset) throws SQLException {
CriteriaBuilder criteriaBuilder = getCriteriaBuilder(context); CriteriaBuilder criteriaBuilder = getCriteriaBuilder(context);
CriteriaQuery criteriaQuery = getCriteriaQuery(criteriaBuilder, Bitstream.class); CriteriaQuery criteriaQuery = getCriteriaQuery(criteriaBuilder, Bitstream.class);
Root<Bitstream> bitstreamRoot = criteriaQuery.from(Bitstream.class); Root<Bitstream> bitstreamRoot = criteriaQuery.from(Bitstream.class);
criteriaQuery.select(bitstreamRoot); criteriaQuery.select(bitstreamRoot);
criteriaQuery.orderBy(criteriaBuilder.desc(bitstreamRoot.get(Bitstream_.ID)));
criteriaQuery.where(criteriaBuilder.equal(bitstreamRoot.get(Bitstream_.deleted), true)); criteriaQuery.where(criteriaBuilder.equal(bitstreamRoot.get(Bitstream_.deleted), true));
return list(context, criteriaQuery, false, Bitstream.class, -1, -1); return list(context, criteriaQuery, false, Bitstream.class, limit, offset);
} }

View File

@@ -183,7 +183,7 @@ public interface BitstreamService extends DSpaceObjectService<Bitstream>, DSpace
* @return a list of all bitstreams that have been "deleted" * @return a list of all bitstreams that have been "deleted"
* @throws SQLException if database error * @throws SQLException if database error
*/ */
public List<Bitstream> findDeletedBitstreams(Context context) throws SQLException; public List<Bitstream> findDeletedBitstreams(Context context, int limit, int offset) throws SQLException;
/** /**

View File

@@ -10,18 +10,16 @@ package org.dspace.event;
import org.dspace.core.Context; import org.dspace.core.Context;
/** /**
* Interface for content event consumers. Note that the consumer cannot tell if * Interface for content event consumers. Note that the consumer cannot tell
* it is invoked synchronously or asynchronously; the consumer interface and * if it is invoked synchronously or asynchronously; the consumer interface
* sequence of calls is the same for both. Asynchronous consumers may see more * and sequence of calls is the same for both. Asynchronous consumers may see
* consume() calls between the start and end of the event stream, if they are * more consume() calls between the start and end of the event stream, if they
* invoked asynchronously, once in a long time period, rather than synchronously * are invoked asynchronously, once in a long time period, rather than
* after every Context.commit(). * synchronously after every Context.commit().
*
* @version $Revision$
*/ */
public interface Consumer { public interface Consumer {
/** /**
* Initialize - allocate any resources required to operate. This may include * Allocate any resources required to operate. This may include
* initializing any pooled JMS resources. Called ONCE when created by the * initializing any pooled JMS resources. Called ONCE when created by the
* dispatcher pool. This should be used to set up expensive resources that * dispatcher pool. This should be used to set up expensive resources that
* will remain for the lifetime of the consumer. * will remain for the lifetime of the consumer.
@@ -31,12 +29,17 @@ public interface Consumer {
public void initialize() throws Exception; public void initialize() throws Exception;
/** /**
* Consume an event; events may get filtered at the dispatcher level, hiding * Consume an event. Events may be filtered by a dispatcher, hiding them
* it from the consumer. This behavior is based on the dispatcher/consumer * from the consumer. This behavior is based on the dispatcher/consumer
* configuration. Should include logic to initialize any resources required * configuration. Should include logic to initialize any resources
* for a batch of events. * required for a batch of events.
* *
* @param ctx the execution context object * <p>This method <em>must not</em> commit the context. Committing causes
* re-dispatch of the event queue, which can result in infinite recursion
* leading to memory exhaustion as seen in
* {@link https://github.com/DSpace/DSpace/pull/8756}.
*
* @param ctx the current DSpace session
* @param event the content event * @param event the content event
* @throws Exception if error * @throws Exception if error
*/ */

View File

@@ -0,0 +1,20 @@
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
/**
* Actions which alter DSpace model objects can queue {@link Event}s, which
* are presented to {@link Consumer}s by a {@link Dispatcher}. A pool of
* {@code Dispatcher}s is managed by an {@link service.EventService}, guided
* by configuration properties {@code event.dispatcher.*}.
*
* <p>One must be careful not to commit the current DSpace {@code Context}
* during event dispatch. {@code commit()} triggers event dispatching, and
* doing this during event dispatch can lead to infinite recursion and
* memory exhaustion.
*/
package org.dspace.event;

View File

@@ -306,6 +306,7 @@ public class VersionedHandleIdentifierProviderWithCanonicalHandles extends Ident
public DSpaceObject resolve(Context context, String identifier, String... attributes) { public DSpaceObject resolve(Context context, String identifier, String... attributes) {
// We can do nothing with this, return null // We can do nothing with this, return null
try { try {
identifier = handleService.parseHandle(identifier);
return handleService.resolveToObject(context, identifier); return handleService.resolveToObject(context, identifier);
} catch (IllegalStateException | SQLException e) { } catch (IllegalStateException | SQLException e) {
log.error(LogHelper.getHeader(context, "Error while resolving handle to item", "handle: " + identifier), log.error(LogHelper.getHeader(context, "Error while resolving handle to item", "handle: " + identifier),
@@ -426,6 +427,19 @@ public class VersionedHandleIdentifierProviderWithCanonicalHandles extends Ident
} }
} }
DSpaceObject itemWithCanonicalHandle = handleService.resolveToObject(context, canonical);
if (itemWithCanonicalHandle != null) {
if (itemWithCanonicalHandle.getID() != previous.getItem().getID()) {
log.warn("The previous version's item (" + previous.getItem().getID() +
") does not match with the item containing handle " + canonical +
" (" + itemWithCanonicalHandle.getID() + ")");
}
// Move the original handle from whatever item it's on to the newest version
handleService.modifyHandleDSpaceObject(context, canonical, dso);
} else {
handleService.createHandle(context, dso, canonical);
}
// add a new Identifier for this item: 12345/100.x // add a new Identifier for this item: 12345/100.x
String idNew = canonical + DOT + version.getVersionNumber(); String idNew = canonical + DOT + version.getVersionNumber();
//Make sure we don't have an old handle hanging around (if our previous version was deleted in the workspace) //Make sure we don't have an old handle hanging around (if our previous version was deleted in the workspace)

View File

@@ -141,7 +141,6 @@ public class DOIConsumer implements Consumer {
+ item.getID() + " and DOI " + doi + ".", ex); + item.getID() + " and DOI " + doi + ".", ex);
} }
} }
ctx.commit();
} }
} }

View File

@@ -60,7 +60,8 @@ public class LiveImportClientImpl implements LiveImportClient {
requestConfigBuilder.setConnectionRequestTimeout(timeout); requestConfigBuilder.setConnectionRequestTimeout(timeout);
RequestConfig defaultRequestConfig = requestConfigBuilder.build(); RequestConfig defaultRequestConfig = requestConfigBuilder.build();
method = new HttpGet(buildUrl(URL, params.get(URI_PARAMETERS))); String uri = buildUrl(URL, params.get(URI_PARAMETERS));
method = new HttpGet(uri);
method.setConfig(defaultRequestConfig); method.setConfig(defaultRequestConfig);
Map<String, String> headerParams = params.get(HEADER_PARAMETERS); Map<String, String> headerParams = params.get(HEADER_PARAMETERS);
@@ -71,7 +72,9 @@ public class LiveImportClientImpl implements LiveImportClient {
} }
configureProxy(method, defaultRequestConfig); configureProxy(method, defaultRequestConfig);
if (log.isDebugEnabled()) {
log.debug("Performing GET request to \"" + uri + "\"...");
}
HttpResponse httpResponse = httpClient.execute(method); HttpResponse httpResponse = httpClient.execute(method);
if (isNotSuccessfull(httpResponse)) { if (isNotSuccessfull(httpResponse)) {
throw new RuntimeException("The request failed with: " + getStatusCode(httpResponse) + " code, reason= " throw new RuntimeException("The request failed with: " + getStatusCode(httpResponse) + " code, reason= "
@@ -98,7 +101,8 @@ public class LiveImportClientImpl implements LiveImportClient {
Builder requestConfigBuilder = RequestConfig.custom(); Builder requestConfigBuilder = RequestConfig.custom();
RequestConfig defaultRequestConfig = requestConfigBuilder.build(); RequestConfig defaultRequestConfig = requestConfigBuilder.build();
method = new HttpPost(buildUrl(URL, params.get(URI_PARAMETERS))); String uri = buildUrl(URL, params.get(URI_PARAMETERS));
method = new HttpPost(uri);
method.setConfig(defaultRequestConfig); method.setConfig(defaultRequestConfig);
if (StringUtils.isNotBlank(entry)) { if (StringUtils.isNotBlank(entry)) {
method.setEntity(new StringEntity(entry)); method.setEntity(new StringEntity(entry));
@@ -106,7 +110,9 @@ public class LiveImportClientImpl implements LiveImportClient {
setHeaderParams(method, params); setHeaderParams(method, params);
configureProxy(method, defaultRequestConfig); configureProxy(method, defaultRequestConfig);
if (log.isDebugEnabled()) {
log.debug("Performing POST request to \"" + uri + "\"..." );
}
HttpResponse httpResponse = httpClient.execute(method); HttpResponse httpResponse = httpClient.execute(method);
if (isNotSuccessfull(httpResponse)) { if (isNotSuccessfull(httpResponse)) {
throw new RuntimeException(); throw new RuntimeException();

View File

@@ -121,12 +121,14 @@ public class PubmedDateMetadatumContributor<T> implements MetadataContributor<T>
int j = 0; int j = 0;
// Use the first dcDate that has been formatted (Config should go from most specific to most lenient) // Use the first dcDate that has been formatted (Config should go from most specific to most lenient)
while (j < dateFormatsToAttempt.size() && dcDate == null) { while (j < dateFormatsToAttempt.size()) {
String dateFormat = dateFormatsToAttempt.get(j); String dateFormat = dateFormatsToAttempt.get(j);
try { try {
SimpleDateFormat formatter = new SimpleDateFormat(dateFormat); SimpleDateFormat formatter = new SimpleDateFormat(dateFormat);
Date date = formatter.parse(dateString); Date date = formatter.parse(dateString);
dcDate = new DCDate(date); dcDate = new DCDate(date);
values.add(metadataFieldMapping.toDCValue(field, formatter.format(date)));
break;
} catch (ParseException e) { } catch (ParseException e) {
// Multiple dateformats can be configured, we don't want to print the entire stacktrace every // Multiple dateformats can be configured, we don't want to print the entire stacktrace every
// time one of those formats fails. // time one of those formats fails.
@@ -136,9 +138,7 @@ public class PubmedDateMetadatumContributor<T> implements MetadataContributor<T>
} }
j++; j++;
} }
if (dcDate != null) { if (dcDate == null) {
values.add(metadataFieldMapping.toDCValue(field, dcDate.toString()));
} else {
log.info( log.info(
"Failed parsing " + dateString + ", check " + "Failed parsing " + dateString + ", check " +
"the configured dataformats in config/spring/api/pubmed-integration.xml"); "the configured dataformats in config/spring/api/pubmed-integration.xml");

View File

@@ -292,7 +292,14 @@ public class PubmedImportMetadataSourceServiceImpl extends AbstractImportMetadat
int countAttempt = 0; int countAttempt = 0;
while (StringUtils.isBlank(response) && countAttempt <= attempt) { while (StringUtils.isBlank(response) && countAttempt <= attempt) {
countAttempt++; countAttempt++;
long time = System.currentTimeMillis() - lastRequest;
if ((time) < interRequestTime) {
Thread.sleep(interRequestTime - time);
}
response = liveImportClient.executeHttpGetRequest(1000, uriBuilder.toString(), params); response = liveImportClient.executeHttpGetRequest(1000, uriBuilder.toString(), params);
lastRequest = System.currentTimeMillis();
} }
if (StringUtils.isBlank(response)) { if (StringUtils.isBlank(response)) {
@@ -316,7 +323,13 @@ public class PubmedImportMetadataSourceServiceImpl extends AbstractImportMetadat
countAttempt = 0; countAttempt = 0;
while (StringUtils.isBlank(response2) && countAttempt <= attempt) { while (StringUtils.isBlank(response2) && countAttempt <= attempt) {
countAttempt++; countAttempt++;
long time = System.currentTimeMillis() - lastRequest;
if ((time) < interRequestTime) {
Thread.sleep(interRequestTime - time);
}
response2 = liveImportClient.executeHttpGetRequest(1000, uriBuilder2.toString(), params2); response2 = liveImportClient.executeHttpGetRequest(1000, uriBuilder2.toString(), params2);
lastRequest = System.currentTimeMillis();
} }
if (StringUtils.isBlank(response2)) { if (StringUtils.isBlank(response2)) {
@@ -418,7 +431,13 @@ public class PubmedImportMetadataSourceServiceImpl extends AbstractImportMetadat
int countAttempt = 0; int countAttempt = 0;
while (StringUtils.isBlank(response) && countAttempt <= attempt) { while (StringUtils.isBlank(response) && countAttempt <= attempt) {
countAttempt++; countAttempt++;
long time = System.currentTimeMillis() - lastRequest;
if ((time) < interRequestTime) {
Thread.sleep(interRequestTime - time);
}
response = liveImportClient.executeHttpGetRequest(1000, uriBuilder.toString(), params); response = liveImportClient.executeHttpGetRequest(1000, uriBuilder.toString(), params);
lastRequest = System.currentTimeMillis();
} }
if (StringUtils.isBlank(response)) { if (StringUtils.isBlank(response)) {
@@ -441,7 +460,12 @@ public class PubmedImportMetadataSourceServiceImpl extends AbstractImportMetadat
countAttempt = 0; countAttempt = 0;
while (StringUtils.isBlank(response2) && countAttempt <= attempt) { while (StringUtils.isBlank(response2) && countAttempt <= attempt) {
countAttempt++; countAttempt++;
long time = System.currentTimeMillis() - lastRequest;
if ((time) < interRequestTime) {
Thread.sleep(interRequestTime - time);
}
response2 = liveImportClient.executeHttpGetRequest(1000, uriBuilder2.toString(), params2); response2 = liveImportClient.executeHttpGetRequest(1000, uriBuilder2.toString(), params2);
lastRequest = System.currentTimeMillis();
} }
if (StringUtils.isBlank(response2)) { if (StringUtils.isBlank(response2)) {

View File

@@ -183,6 +183,7 @@ public abstract class AbstractRemoteMetadataSource {
log.warn("Error in trying operation " + operationId + " " + retry + " " + warning + ", retrying !", e); log.warn("Error in trying operation " + operationId + " " + retry + " " + warning + ", retrying !", e);
} finally { } finally {
this.lastRequest = System.currentTimeMillis();
lock.unlock(); lock.unlock();
} }
@@ -262,5 +263,7 @@ public abstract class AbstractRemoteMetadataSource {
*/ */
public abstract void init() throws Exception; public abstract void init() throws Exception;
public void setInterRequestTime(final long interRequestTime) {
this.interRequestTime = interRequestTime;
}
} }

View File

@@ -79,6 +79,8 @@ public class OrcidHistory implements ReloadableEntity<Integer> {
/** /**
* A description of the synchronized resource. * A description of the synchronized resource.
*/ */
@Lob
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType")
@Column(name = "description") @Column(name = "description")
private String description; private String description;

View File

@@ -64,6 +64,8 @@ public class OrcidQueue implements ReloadableEntity<Integer> {
/** /**
* A description of the resource to be synchronized. * A description of the resource to be synchronized.
*/ */
@Lob
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType")
@Column(name = "description") @Column(name = "description")
private String description; private String description;

View File

@@ -17,6 +17,7 @@ import java.util.Map;
import java.util.UUID; import java.util.UUID;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections4.MapUtils; import org.apache.commons.collections4.MapUtils;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
@@ -224,25 +225,62 @@ public class BitstreamStorageServiceImpl implements BitstreamStorageService, Ini
@Override @Override
public void cleanup(boolean deleteDbRecords, boolean verbose) throws SQLException, IOException, AuthorizeException { public void cleanup(boolean deleteDbRecords, boolean verbose) throws SQLException, IOException, AuthorizeException {
Context context = new Context(Context.Mode.BATCH_EDIT); Context context = new Context(Context.Mode.BATCH_EDIT);
int commitCounter = 0;
int offset = 0;
int limit = 100;
int cleanedBitstreamCount = 0;
int deletedBitstreamCount = bitstreamService.countDeletedBitstreams(context);
System.out.println("Found " + deletedBitstreamCount + " deleted bistream to cleanup");
try { try {
context.turnOffAuthorisationSystem(); context.turnOffAuthorisationSystem();
List<Bitstream> storage = bitstreamService.findDeletedBitstreams(context); while (cleanedBitstreamCount < deletedBitstreamCount) {
for (Bitstream bitstream : storage) {
UUID bid = bitstream.getID(); List<Bitstream> storage = bitstreamService.findDeletedBitstreams(context, limit, offset);
Map wantedMetadata = new HashMap();
wantedMetadata.put("size_bytes", null); if (CollectionUtils.isEmpty(storage)) {
wantedMetadata.put("modified", null); break;
Map receivedMetadata = this.getStore(bitstream.getStoreNumber()).about(bitstream, wantedMetadata); }
for (Bitstream bitstream : storage) {
UUID bid = bitstream.getID();
Map wantedMetadata = new HashMap();
wantedMetadata.put("size_bytes", null);
wantedMetadata.put("modified", null);
Map receivedMetadata = this.getStore(bitstream.getStoreNumber()).about(bitstream, wantedMetadata);
// Make sure entries which do not exist are removed // Make sure entries which do not exist are removed
if (MapUtils.isEmpty(receivedMetadata)) { if (MapUtils.isEmpty(receivedMetadata)) {
log.debug("bitstore.about is empty, so file is not present"); log.debug("bitstore.about is empty, so file is not present");
if (deleteDbRecords) {
log.debug("deleting record");
if (verbose) {
System.out.println(" - Deleting bitstream information (ID: " + bid + ")");
}
checksumHistoryService.deleteByBitstream(context, bitstream);
if (verbose) {
System.out.println(" - Deleting bitstream record from database (ID: " + bid + ")");
}
bitstreamService.expunge(context, bitstream);
}
context.uncacheEntity(bitstream);
continue;
}
// This is a small chance that this is a file which is
// being stored -- get it next time.
if (isRecent(Long.valueOf(receivedMetadata.get("modified").toString()))) {
log.debug("file is recent");
context.uncacheEntity(bitstream);
continue;
}
if (deleteDbRecords) { if (deleteDbRecords) {
log.debug("deleting record"); log.debug("deleting db record");
if (verbose) { if (verbose) {
System.out.println(" - Deleting bitstream information (ID: " + bid + ")"); System.out.println(" - Deleting bitstream information (ID: " + bid + ")");
} }
@@ -252,64 +290,42 @@ public class BitstreamStorageServiceImpl implements BitstreamStorageService, Ini
} }
bitstreamService.expunge(context, bitstream); bitstreamService.expunge(context, bitstream);
} }
if (isRegisteredBitstream(bitstream.getInternalId())) {
context.uncacheEntity(bitstream);
continue; // do not delete registered bitstreams
}
// Since versioning allows for multiple bitstreams, check if the internal
// identifier isn't used on
// another place
if (bitstreamService.findDuplicateInternalIdentifier(context, bitstream).isEmpty()) {
this.getStore(bitstream.getStoreNumber()).remove(bitstream);
String message = ("Deleted bitstreamID " + bid + ", internalID " + bitstream.getInternalId());
if (log.isDebugEnabled()) {
log.debug(message);
}
if (verbose) {
System.out.println(message);
}
}
context.uncacheEntity(bitstream); context.uncacheEntity(bitstream);
continue;
} }
// This is a small chance that this is a file which is // Commit actual changes to DB after dispatch events
// being stored -- get it next time. System.out.print("Performing incremental commit to the database...");
if (isRecent(Long.valueOf(receivedMetadata.get("modified").toString()))) { context.commit();
log.debug("file is recent"); System.out.println(" Incremental commit done!");
context.uncacheEntity(bitstream);
continue; cleanedBitstreamCount = cleanedBitstreamCount + storage.size();
if (!deleteDbRecords) {
offset = offset + limit;
} }
if (deleteDbRecords) {
log.debug("deleting db record");
if (verbose) {
System.out.println(" - Deleting bitstream information (ID: " + bid + ")");
}
checksumHistoryService.deleteByBitstream(context, bitstream);
if (verbose) {
System.out.println(" - Deleting bitstream record from database (ID: " + bid + ")");
}
bitstreamService.expunge(context, bitstream);
}
if (isRegisteredBitstream(bitstream.getInternalId())) {
context.uncacheEntity(bitstream);
continue; // do not delete registered bitstreams
}
// Since versioning allows for multiple bitstreams, check if the internal identifier isn't used on
// another place
if (bitstreamService.findDuplicateInternalIdentifier(context, bitstream).isEmpty()) {
this.getStore(bitstream.getStoreNumber()).remove(bitstream);
String message = ("Deleted bitstreamID " + bid + ", internalID " + bitstream.getInternalId());
if (log.isDebugEnabled()) {
log.debug(message);
}
if (verbose) {
System.out.println(message);
}
}
// Make sure to commit our outstanding work every 100
// iterations. Otherwise you risk losing the entire transaction
// if we hit an exception, which isn't useful at all for large
// amounts of bitstreams.
commitCounter++;
if (commitCounter % 100 == 0) {
context.dispatchEvents();
// Commit actual changes to DB after dispatch events
System.out.print("Performing incremental commit to the database...");
context.commit();
System.out.println(" Incremental commit done!");
}
context.uncacheEntity(bitstream);
} }
System.out.print("Committing changes to the database..."); System.out.print("Committing changes to the database...");

View File

@@ -51,6 +51,7 @@ metadata.bitstream.iiif-virtual.bytes = File size
metadata.bitstream.iiif-virtual.checksum = Checksum metadata.bitstream.iiif-virtual.checksum = Checksum
org.dspace.app.itemexport.no-result = The DSpaceObject that you specified has no items. org.dspace.app.itemexport.no-result = The DSpaceObject that you specified has no items.
org.dspace.app.util.SyndicationFeed.no-description = No Description
org.dspace.checker.ResultsLogger.bitstream-format = Bitstream format org.dspace.checker.ResultsLogger.bitstream-format = Bitstream format
org.dspace.checker.ResultsLogger.bitstream-found = Bitstream found org.dspace.checker.ResultsLogger.bitstream-found = Bitstream found
org.dspace.checker.ResultsLogger.bitstream-id = Bitstream ID org.dspace.checker.ResultsLogger.bitstream-id = Bitstream ID

View File

@@ -0,0 +1,10 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
ALTER TABLE orcid_history ALTER COLUMN description SET DATA TYPE CLOB;
ALTER TABLE orcid_queue ALTER COLUMN description SET DATA TYPE CLOB;

View File

@@ -0,0 +1,10 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
ALTER TABLE orcid_history MODIFY (description CLOB);
ALTER TABLE orcid_queue MODIFY (description CLOB);

View File

@@ -0,0 +1,10 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
ALTER TABLE orcid_history ALTER COLUMN description TYPE TEXT;
ALTER TABLE orcid_queue ALTER COLUMN description TYPE TEXT;

View File

@@ -57,6 +57,7 @@
<property name="urlFetch" value="${pubmed.url.fetch}"/> <property name="urlFetch" value="${pubmed.url.fetch}"/>
<property name="urlSearch" value="${pubmed.url.search}"/> <property name="urlSearch" value="${pubmed.url.search}"/>
<property name="generateQueryForItem" ref="pubmedService"></property> <property name="generateQueryForItem" ref="pubmedService"></property>
<property name="interRequestTime" value="500"/>
<property name="supportedExtensions"> <property name="supportedExtensions">
<list> <list>
<value>xml</value> <value>xml</value>

View File

@@ -13,15 +13,6 @@
xsi:schemaLocation="http://www.springframework.org/schema/beans xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-2.5.xsd"> http://www.springframework.org/schema/beans/spring-beans-2.5.xsd">
<!-- Identifier Service Application Interface. Will be autowired with
any Identifier Providers present in Spring context.
-->
<bean id="org.dspace.identifier.service.IdentifierService"
class="org.dspace.identifier.IdentifierServiceImpl"
autowire="byType"
scope="singleton"/>
<bean id="org.dspace.services.ConfigurationService" <bean id="org.dspace.services.ConfigurationService"
class="org.dspace.servicemanager.config.DSpaceConfigurationService" scope="singleton"/> class="org.dspace.servicemanager.config.DSpaceConfigurationService" scope="singleton"/>
@@ -31,12 +22,6 @@
<property name="configurationService" ref="org.dspace.services.ConfigurationService"/> <property name="configurationService" ref="org.dspace.services.ConfigurationService"/>
</bean--> </bean-->
<bean id="org.dspace.identifier.VersionedHandleIdentifierProvider"
class="org.dspace.identifier.VersionedHandleIdentifierProvider"
scope="singleton">
<property name="configurationService" ref="org.dspace.services.ConfigurationService"/>
</bean>
<bean name="org.dspace.core.DBConnection" class="org.dspace.core.HibernateDBConnection" lazy-init="true" scope="prototype"/> <bean name="org.dspace.core.DBConnection" class="org.dspace.core.HibernateDBConnection" lazy-init="true" scope="prototype"/>
<!-- Register all our Flyway callback classes (which run before/after database migrations) --> <!-- Register all our Flyway callback classes (which run before/after database migrations) -->

View File

@@ -19,7 +19,18 @@
<bean id="org.dspace.identifier.service.IdentifierService" <bean id="org.dspace.identifier.service.IdentifierService"
class="org.dspace.identifier.IdentifierServiceImpl" class="org.dspace.identifier.IdentifierServiceImpl"
autowire="byType" autowire="byType"
scope="singleton"/> scope="singleton">
<property name="providers">
<list>
<ref bean="org.dspace.identifier.HandleIdentifierProvider"/>
<ref bean="org.dspace.identifier.DOIIdentifierProvider"/>
</list>
</property>
</bean>
<bean id="org.dspace.identifier.HandleIdentifierProvider" class="org.dspace.identifier.VersionedHandleIdentifierProvider" scope="singleton">
<property name="configurationService" ref="org.dspace.services.ConfigurationService"/>
</bean>
<!-- provider to mint and register DOIs with DSpace. <!-- provider to mint and register DOIs with DSpace.
To mint DOIs you need a registration agency. The DOIIdentifierProvider To mint DOIs you need a registration agency. The DOIIdentifierProvider

View File

@@ -9,6 +9,7 @@ package org.dspace.content.service;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@@ -112,6 +113,177 @@ public class ItemServiceTest extends AbstractIntegrationTestWithDatabase {
} }
} }
@Test
public void preserveMetadataOrder() throws Exception {
context.turnOffAuthorisationSystem();
itemService
.addMetadata(
context, item, dcSchema, contributorElement, authorQualifier, null, "test, one", null, 0, 2
);
MetadataValue placeZero =
itemService
.addMetadata(
context, item, dcSchema, contributorElement, authorQualifier, null, "test, two", null, 0, 0
);
itemService
.addMetadata(
context, item, dcSchema, contributorElement, authorQualifier, null, "test, three", null, 0, 1
);
context.commit();
context.restoreAuthSystemState();
// check the correct order using default method `getMetadata`
List<MetadataValue> defaultMetadata =
this.itemService.getMetadata(item, dcSchema, contributorElement, authorQualifier, Item.ANY);
assertThat(defaultMetadata,hasSize(3));
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, two", null, 0, defaultMetadata.get(0)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, three", null, 1, defaultMetadata.get(1)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, one", null, 2, defaultMetadata.get(2)
);
// check the correct order using the method `getMetadata` without virtual fields
List<MetadataValue> nonVirtualMetadatas =
this.itemService.getMetadata(item, dcSchema, contributorElement, authorQualifier, Item.ANY, false);
// if we don't reload the item the place order is not applied correctly
// item = context.reloadEntity(item);
assertThat(nonVirtualMetadatas,hasSize(3));
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, two", null, 0, nonVirtualMetadatas.get(0)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, three", null, 1, nonVirtualMetadatas.get(1)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, one", null, 2, nonVirtualMetadatas.get(2)
);
context.turnOffAuthorisationSystem();
item = context.reloadEntity(item);
// now just add one metadata to be the last
this.itemService.addMetadata(
context, item, dcSchema, contributorElement, authorQualifier, Item.ANY, "test, latest", null, 0
);
// now just remove first metadata
this.itemService.removeMetadataValues(context, item, List.of(placeZero));
// now just add one metadata to place 0
this.itemService.addAndShiftRightMetadata(
context, item, dcSchema, contributorElement, authorQualifier, Item.ANY, "test, new", null, 0, 0
);
// check the metadata using method `getMetadata`
defaultMetadata =
this.itemService.getMetadata(item, dcSchema, contributorElement, authorQualifier, Item.ANY);
// check correct places
assertThat(defaultMetadata,hasSize(4));
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, new", null, 0, defaultMetadata.get(0)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, three", null, 1, defaultMetadata.get(1)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, one", null, 2, defaultMetadata.get(2)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, latest", null, 3, defaultMetadata.get(3)
);
// check metadata using nonVirtualMethod
nonVirtualMetadatas =
this.itemService.getMetadata(item, dcSchema, contributorElement, authorQualifier, Item.ANY, false);
// check correct places
assertThat(nonVirtualMetadatas,hasSize(4));
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, new", null, 0, nonVirtualMetadatas.get(0)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, three", null, 1, nonVirtualMetadatas.get(1)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, one", null, 2, nonVirtualMetadatas.get(2)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, latest", null, 3, nonVirtualMetadatas.get(3)
);
// check both lists
assertThat(defaultMetadata.size(), equalTo(nonVirtualMetadatas.size()));
assertThat(defaultMetadata.get(0), equalTo(nonVirtualMetadatas.get(0)));
assertThat(defaultMetadata.get(1), equalTo(nonVirtualMetadatas.get(1)));
assertThat(defaultMetadata.get(2), equalTo(nonVirtualMetadatas.get(2)));
assertThat(defaultMetadata.get(3), equalTo(nonVirtualMetadatas.get(3)));
context.commit();
context.restoreAuthSystemState();
item = context.reloadEntity(item);
// check after commit
defaultMetadata =
this.itemService.getMetadata(item, dcSchema, contributorElement, authorQualifier, Item.ANY);
// check correct places
assertThat(defaultMetadata,hasSize(4));
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, new", null, 0, defaultMetadata.get(0)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, three", null, 1, defaultMetadata.get(1)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, one", null, 2, defaultMetadata.get(2)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, latest", null, 3, defaultMetadata.get(3)
);
// check metadata using nonVirtualMethod
nonVirtualMetadatas =
this.itemService.getMetadata(item, dcSchema, contributorElement, authorQualifier, Item.ANY, false);
// check correct places
assertThat(nonVirtualMetadatas,hasSize(4));
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, new", null, 0, nonVirtualMetadatas.get(0)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, three", null, 1, nonVirtualMetadatas.get(1)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, one", null, 2, nonVirtualMetadatas.get(2)
);
assertMetadataValue(
authorQualifier, contributorElement, dcSchema, "test, latest", null, 3, nonVirtualMetadatas.get(3)
);
// check both lists
assertThat(defaultMetadata.size(), equalTo(nonVirtualMetadatas.size()));
assertThat(defaultMetadata.get(0), equalTo(nonVirtualMetadatas.get(0)));
assertThat(defaultMetadata.get(1), equalTo(nonVirtualMetadatas.get(1)));
assertThat(defaultMetadata.get(2), equalTo(nonVirtualMetadatas.get(2)));
assertThat(defaultMetadata.get(3), equalTo(nonVirtualMetadatas.get(3)));
}
@Test @Test
public void InsertAndMoveMetadataShiftPlaceTest() throws Exception { public void InsertAndMoveMetadataShiftPlaceTest() throws Exception {
context.turnOffAuthorisationSystem(); context.turnOffAuthorisationSystem();

View File

@@ -0,0 +1,115 @@
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.identifier;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import org.dspace.AbstractIntegrationTestWithDatabase;
import org.dspace.authorize.AuthorizeException;
import org.dspace.builder.CollectionBuilder;
import org.dspace.builder.CommunityBuilder;
import org.dspace.builder.ItemBuilder;
import org.dspace.builder.VersionBuilder;
import org.dspace.content.Collection;
import org.dspace.content.Item;
import org.dspace.kernel.ServiceManager;
import org.dspace.services.factory.DSpaceServicesFactory;
import org.junit.Before;
import org.junit.Test;
public class VersionedHandleIdentifierProviderTest extends AbstractIntegrationTestWithDatabase {
private ServiceManager serviceManager;
private IdentifierServiceImpl identifierService;
private String firstHandle;
private Collection collection;
private Item itemV1;
private Item itemV2;
private Item itemV3;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
context.turnOffAuthorisationSystem();
serviceManager = DSpaceServicesFactory.getInstance().getServiceManager();
identifierService = serviceManager.getServicesByType(IdentifierServiceImpl.class).get(0);
// Clean out providers to avoid any being used for creation of community and collection
identifierService.setProviders(new ArrayList<>());
parentCommunity = CommunityBuilder.createCommunity(context)
.withName("Parent Community")
.build();
collection = CollectionBuilder.createCollection(context, parentCommunity)
.withName("Collection")
.build();
}
private void registerProvider(Class type) {
// Register our new provider
serviceManager.registerServiceClass(type.getName(), type);
IdentifierProvider identifierProvider =
(IdentifierProvider) serviceManager.getServiceByName(type.getName(), type);
// Overwrite the identifier-service's providers with the new one to ensure only this provider is used
identifierService.setProviders(List.of(identifierProvider));
}
private void createVersions() throws SQLException, AuthorizeException {
itemV1 = ItemBuilder.createItem(context, collection)
.withTitle("First version")
.build();
firstHandle = itemV1.getHandle();
itemV2 = VersionBuilder.createVersion(context, itemV1, "Second version").build().getItem();
itemV3 = VersionBuilder.createVersion(context, itemV1, "Third version").build().getItem();
}
@Test
public void testDefaultVersionedHandleProvider() throws Exception {
registerProvider(VersionedHandleIdentifierProvider.class);
createVersions();
// Confirm the original item only has its original handle
assertEquals(firstHandle, itemV1.getHandle());
assertEquals(1, itemV1.getHandles().size());
// Confirm the second item has the correct version handle
assertEquals(firstHandle + ".2", itemV2.getHandle());
assertEquals(1, itemV2.getHandles().size());
// Confirm the last item has the correct version handle
assertEquals(firstHandle + ".3", itemV3.getHandle());
assertEquals(1, itemV3.getHandles().size());
}
@Test
public void testCanonicalVersionedHandleProvider() throws Exception {
registerProvider(VersionedHandleIdentifierProviderWithCanonicalHandles.class);
createVersions();
// Confirm the original item only has a version handle
assertEquals(firstHandle + ".1", itemV1.getHandle());
assertEquals(1, itemV1.getHandles().size());
// Confirm the second item has the correct version handle
assertEquals(firstHandle + ".2", itemV2.getHandle());
assertEquals(1, itemV2.getHandles().size());
// Confirm the last item has both the correct version handle and the original handle
assertEquals(firstHandle, itemV3.getHandle());
assertEquals(2, itemV3.getHandles().size());
containsHandle(itemV3, firstHandle + ".3");
}
private void containsHandle(Item item, String handle) {
assertTrue(item.getHandles().stream().anyMatch(h -> handle.equals(h.getHandle())));
}
}

View File

@@ -215,6 +215,62 @@ public class OrcidBulkPushIT extends AbstractIntegrationTestWithDatabase {
} }
@Test
public void testWithVeryLongTitleQueueRecords() throws Exception {
Item firstProfileItem = createProfileItemItem("0000-1111-2222-3333", eperson, BATCH);
Item firstEntity = createPublication("Publication with a very very very very very very very very very " +
"very very very very very very very very very very very very very very very very very very very very " +
"very very very very very very very very very very very very very very very very very even " +
"extremely long title");
when(orcidClientMock.push(any(), eq("0000-1111-2222-3333"), any()))
.thenReturn(createdResponse("12345"));
when(orcidClientMock.update(any(), eq("0000-1111-2222-3333"), any(), eq("98765")))
.thenReturn(updatedResponse("98765"));
when(orcidClientMock.deleteByPutCode(
any(),
eq("0000-1111-2222-3333"),
eq("22222"),
eq("/work"))
).thenReturn(deletedResponse());
createOrcidQueue(context, firstProfileItem, firstEntity);
createOrcidQueue(context, firstProfileItem, "Description", "Publication", "22222");
context.commit();
TestDSpaceRunnableHandler handler = runBulkSynchronization(false);
String firstProfileItemId = firstProfileItem.getID().toString();
assertThat(handler.getInfoMessages(), hasSize(5));
assertThat(handler.getInfoMessages(), containsInAnyOrder(
"Found 2 queue records to synchronize with ORCID",
"Addition of Publication for profile with ID: " + firstProfileItemId,
"History record created with status 201. The operation was completed successfully",
"Deletion of Publication for profile with ID: " + firstProfileItemId + " by put code 22222",
"History record created with status 204. The operation was completed successfully"));
assertThat(handler.getErrorMessages(), empty());
assertThat(handler.getWarningMessages(), empty());
verify(orcidClientMock).push(any(), eq("0000-1111-2222-3333"), any());
verify(orcidClientMock).deleteByPutCode(
any(),
eq("0000-1111-2222-3333"),
eq("22222"),
eq("/work"));
verifyNoMoreInteractions(orcidClientMock);
List<OrcidHistory> historyRecords = orcidHistoryService.findAll(context);
assertThat(historyRecords, hasSize(2));
assertThat(historyRecords, hasItem(matches(history(firstProfileItem, firstEntity, 201, INSERT))));
assertThat(historyRecords, hasItem(matches(history(firstProfileItem, 204, DELETE))));
}
@Test @Test
public void testWithOneValidationError() throws Exception { public void testWithOneValidationError() throws Exception {

View File

@@ -39,6 +39,7 @@ import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController; import org.springframework.web.bind.annotation.RestController;
/** /**
@@ -69,6 +70,8 @@ public class ItemOwningCollectionUpdateRestController {
* moving the item to the new collection. * moving the item to the new collection.
* *
* @param uuid The UUID of the item that will be moved * @param uuid The UUID of the item that will be moved
* @param inheritCollectionPolicies Boolean flag whether to inherit the target collection policies when
* moving the item
* @param response The response object * @param response The response object
* @param request The request object * @param request The request object
* @return The wrapped resource containing the new owning collection or null when the item was not moved * @return The wrapped resource containing the new owning collection or null when the item was not moved
@@ -79,7 +82,10 @@ public class ItemOwningCollectionUpdateRestController {
@RequestMapping(method = RequestMethod.PUT, consumes = {"text/uri-list"}) @RequestMapping(method = RequestMethod.PUT, consumes = {"text/uri-list"})
@PreAuthorize("hasPermission(#uuid, 'ITEM','WRITE')") @PreAuthorize("hasPermission(#uuid, 'ITEM','WRITE')")
@PostAuthorize("returnObject != null") @PostAuthorize("returnObject != null")
public CollectionRest move(@PathVariable UUID uuid, HttpServletResponse response, public CollectionRest move(@PathVariable UUID uuid,
@RequestParam(name = "inheritPolicies", defaultValue = "false")
Boolean inheritCollectionPolicies,
HttpServletResponse response,
HttpServletRequest request) HttpServletRequest request)
throws SQLException, IOException, AuthorizeException { throws SQLException, IOException, AuthorizeException {
Context context = ContextUtil.obtainContext(request); Context context = ContextUtil.obtainContext(request);
@@ -91,7 +97,8 @@ public class ItemOwningCollectionUpdateRestController {
"or the data cannot be resolved to a collection."); "or the data cannot be resolved to a collection.");
} }
Collection targetCollection = performItemMove(context, uuid, (Collection) dsoList.get(0)); Collection targetCollection = performItemMove(context, uuid, (Collection) dsoList.get(0),
inheritCollectionPolicies);
if (targetCollection == null) { if (targetCollection == null) {
return null; return null;
@@ -107,17 +114,19 @@ public class ItemOwningCollectionUpdateRestController {
* @param item The item to be moved * @param item The item to be moved
* @param currentCollection The current owning collection of the item * @param currentCollection The current owning collection of the item
* @param targetCollection The target collection of the item * @param targetCollection The target collection of the item
* @param inheritPolicies Boolean flag whether to inherit the target collection policies when moving the item
* @return The target collection * @return The target collection
* @throws SQLException If something goes wrong * @throws SQLException If something goes wrong
* @throws IOException If something goes wrong * @throws IOException If something goes wrong
* @throws AuthorizeException If the user is not authorized to perform the move action * @throws AuthorizeException If the user is not authorized to perform the move action
*/ */
private Collection moveItem(final Context context, final Item item, final Collection currentCollection, private Collection moveItem(final Context context, final Item item, final Collection currentCollection,
final Collection targetCollection) final Collection targetCollection,
final boolean inheritPolicies)
throws SQLException, IOException, AuthorizeException { throws SQLException, IOException, AuthorizeException {
itemService.move(context, item, currentCollection, targetCollection); itemService.move(context, item, currentCollection, targetCollection, inheritPolicies);
//Necessary because Controller does not pass through general RestResourceController, and as such does not do its // Necessary because Controller does not pass through general RestResourceController, and as such does not do
// commit in DSpaceRestRepository.createAndReturn() or similar // its commit in DSpaceRestRepository.createAndReturn() or similar
context.commit(); context.commit();
return context.reloadEntity(targetCollection); return context.reloadEntity(targetCollection);
@@ -129,12 +138,14 @@ public class ItemOwningCollectionUpdateRestController {
* @param context The context Object * @param context The context Object
* @param itemUuid The uuid of the item to be moved * @param itemUuid The uuid of the item to be moved
* @param targetCollection The target collection * @param targetCollection The target collection
* @param inheritPolicies Whether to inherit the target collection policies when moving the item
* @return The new owning collection of the item when authorized or null when not authorized * @return The new owning collection of the item when authorized or null when not authorized
* @throws SQLException If something goes wrong * @throws SQLException If something goes wrong
* @throws IOException If something goes wrong * @throws IOException If something goes wrong
* @throws AuthorizeException If the user is not authorized to perform the move action * @throws AuthorizeException If the user is not authorized to perform the move action
*/ */
private Collection performItemMove(final Context context, final UUID itemUuid, final Collection targetCollection) private Collection performItemMove(final Context context, final UUID itemUuid, final Collection targetCollection,
boolean inheritPolicies)
throws SQLException, IOException, AuthorizeException { throws SQLException, IOException, AuthorizeException {
Item item = itemService.find(context, itemUuid); Item item = itemService.find(context, itemUuid);
@@ -153,7 +164,7 @@ public class ItemOwningCollectionUpdateRestController {
if (authorizeService.authorizeActionBoolean(context, currentCollection, Constants.ADMIN)) { if (authorizeService.authorizeActionBoolean(context, currentCollection, Constants.ADMIN)) {
return moveItem(context, item, currentCollection, targetCollection); return moveItem(context, item, currentCollection, targetCollection, inheritPolicies);
} }
return null; return null;

View File

@@ -42,6 +42,7 @@ import org.springframework.web.bind.annotation.ControllerAdvice;
import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.context.request.WebRequest; import org.springframework.web.context.request.WebRequest;
import org.springframework.web.multipart.MaxUploadSizeExceededException;
import org.springframework.web.multipart.MultipartException; import org.springframework.web.multipart.MultipartException;
import org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler; import org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler;
@@ -97,6 +98,13 @@ public class DSpaceApiExceptionControllerAdvice extends ResponseEntityExceptionH
sendErrorResponse(request, response, ex, "Request is invalid or incorrect", HttpServletResponse.SC_BAD_REQUEST); sendErrorResponse(request, response, ex, "Request is invalid or incorrect", HttpServletResponse.SC_BAD_REQUEST);
} }
@ExceptionHandler(MaxUploadSizeExceededException.class)
protected void handleMaxUploadSizeExceededException(HttpServletRequest request, HttpServletResponse response,
Exception ex) throws IOException {
sendErrorResponse(request, response, ex, "Request entity is too large",
HttpServletResponse.SC_REQUEST_ENTITY_TOO_LARGE);
}
@ExceptionHandler(SQLException.class) @ExceptionHandler(SQLException.class)
protected void handleSQLException(HttpServletRequest request, HttpServletResponse response, Exception ex) protected void handleSQLException(HttpServletRequest request, HttpServletResponse response, Exception ex)
throws IOException { throws IOException {

View File

@@ -249,4 +249,24 @@ public class OpenSearchControllerIT extends AbstractControllerIntegrationTest {
</OpenSearchDescription> </OpenSearchDescription>
*/ */
} }
@Test
public void emptyDescriptionTest() throws Exception {
context.turnOffAuthorisationSystem();
parentCommunity = CommunityBuilder.createCommunity(context)
.withName("Parent Community")
.build();
Community child1 = CommunityBuilder.createSubCommunity(context, parentCommunity)
.withName("Sub Community")
.build();
Collection collection1 = CollectionBuilder.createCollection(context, child1).withName("Collection 1")
.build();
getClient().perform(get("/opensearch/search")
.param("format", "rss")
.param("scope", collection1.getID().toString())
.param("query", "*"))
.andExpect(status().isOk())
.andExpect(xpath("rss/channel/description").string("No Description"));
}
} }

View File

@@ -15,6 +15,7 @@ import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.not;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.patch;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
@@ -30,6 +31,9 @@ import org.dspace.app.rest.matcher.BitstreamFormatMatcher;
import org.dspace.app.rest.matcher.BitstreamMatcher; import org.dspace.app.rest.matcher.BitstreamMatcher;
import org.dspace.app.rest.matcher.BundleMatcher; import org.dspace.app.rest.matcher.BundleMatcher;
import org.dspace.app.rest.matcher.HalMatcher; import org.dspace.app.rest.matcher.HalMatcher;
import org.dspace.app.rest.matcher.MetadataMatcher;
import org.dspace.app.rest.model.patch.Operation;
import org.dspace.app.rest.model.patch.ReplaceOperation;
import org.dspace.app.rest.test.AbstractControllerIntegrationTest; import org.dspace.app.rest.test.AbstractControllerIntegrationTest;
import org.dspace.app.rest.test.MetadataPatchSuite; import org.dspace.app.rest.test.MetadataPatchSuite;
import org.dspace.authorize.service.ResourcePolicyService; import org.dspace.authorize.service.ResourcePolicyService;
@@ -45,6 +49,7 @@ import org.dspace.content.Bundle;
import org.dspace.content.Collection; import org.dspace.content.Collection;
import org.dspace.content.Community; import org.dspace.content.Community;
import org.dspace.content.Item; import org.dspace.content.Item;
import org.dspace.content.MetadataSchemaEnum;
import org.dspace.content.service.BitstreamFormatService; import org.dspace.content.service.BitstreamFormatService;
import org.dspace.content.service.BitstreamService; import org.dspace.content.service.BitstreamService;
import org.dspace.content.service.ItemService; import org.dspace.content.service.ItemService;
@@ -1222,6 +1227,92 @@ public class BitstreamRestRepositoryIT extends AbstractControllerIntegrationTest
+ parentCommunity.getLogo().getID(), expectedStatus); + parentCommunity.getLogo().getID(), expectedStatus);
} }
@Test
public void patchReplaceMultipleDescriptionBitstream() throws Exception {
context.turnOffAuthorisationSystem();
List<String> bitstreamDescriptions = List.of(
"FIRST",
"SECOND",
"THIRD"
);
parentCommunity = CommunityBuilder.createCommunity(context)
.withName("Parent Community")
.build();
Community child1 =
CommunityBuilder.createSubCommunity(context, parentCommunity).withName("Sub Community").build();
Collection col1 = CollectionBuilder.createCollection(context, child1).withName("Collection 1").build();
Item publicItem1 = ItemBuilder.createItem(context, col1).withTitle("Test").build();
String bitstreamContent = "ThisIsSomeDummyText";
Bitstream bitstream = null;
try (InputStream is = IOUtils.toInputStream(bitstreamContent, CharEncoding.UTF_8)) {
bitstream = BitstreamBuilder.
createBitstream(context, publicItem1, is)
.withName("Bitstream")
.withMimeType("text/plain")
.build();
}
this.bitstreamService
.addMetadata(
context, bitstream,
MetadataSchemaEnum.DC.getName(), "description", null,
Item.ANY, bitstreamDescriptions
);
context.restoreAuthSystemState();
String token = getAuthToken(admin.getEmail(), password);
getClient(token)
.perform(get("/api/core/bitstreams/" + bitstream.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", bitstreamDescriptions.get(0), 0),
MetadataMatcher.matchMetadata("dc.description", bitstreamDescriptions.get(1), 1),
MetadataMatcher.matchMetadata("dc.description", bitstreamDescriptions.get(2), 2)
)
)
);
List<Operation> ops = List.of(
new ReplaceOperation("/metadata/dc.description/0", bitstreamDescriptions.get(2)),
new ReplaceOperation("/metadata/dc.description/1", bitstreamDescriptions.get(0)),
new ReplaceOperation("/metadata/dc.description/2", bitstreamDescriptions.get(1))
);
String requestBody = getPatchContent(ops);
getClient(token)
.perform(patch("/api/core/bitstreams/" + bitstream.getID())
.content(requestBody)
.contentType(javax.ws.rs.core.MediaType.APPLICATION_JSON_PATCH_JSON))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", bitstreamDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", bitstreamDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", bitstreamDescriptions.get(1), 2)
)
)
);
getClient(token)
.perform(get("/api/core/bitstreams/" + bitstream.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", bitstreamDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", bitstreamDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", bitstreamDescriptions.get(1), 2)
)
)
);
}
@Test @Test
public void testHiddenMetadataForAnonymousUser() throws Exception { public void testHiddenMetadataForAnonymousUser() throws Exception {

View File

@@ -37,6 +37,7 @@ import org.dspace.app.rest.model.MetadataRest;
import org.dspace.app.rest.model.MetadataValueRest; import org.dspace.app.rest.model.MetadataValueRest;
import org.dspace.app.rest.model.patch.MoveOperation; import org.dspace.app.rest.model.patch.MoveOperation;
import org.dspace.app.rest.model.patch.Operation; import org.dspace.app.rest.model.patch.Operation;
import org.dspace.app.rest.model.patch.ReplaceOperation;
import org.dspace.app.rest.test.AbstractControllerIntegrationTest; import org.dspace.app.rest.test.AbstractControllerIntegrationTest;
import org.dspace.authorize.ResourcePolicy; import org.dspace.authorize.ResourcePolicy;
import org.dspace.authorize.service.ResourcePolicyService; import org.dspace.authorize.service.ResourcePolicyService;
@@ -51,6 +52,8 @@ import org.dspace.content.Bitstream;
import org.dspace.content.Bundle; import org.dspace.content.Bundle;
import org.dspace.content.Collection; import org.dspace.content.Collection;
import org.dspace.content.Item; import org.dspace.content.Item;
import org.dspace.content.MetadataSchemaEnum;
import org.dspace.content.service.BundleService;
import org.dspace.content.service.ItemService; import org.dspace.content.service.ItemService;
import org.dspace.core.Constants; import org.dspace.core.Constants;
import org.dspace.eperson.EPerson; import org.dspace.eperson.EPerson;
@@ -68,6 +71,9 @@ public class BundleRestRepositoryIT extends AbstractControllerIntegrationTest {
@Autowired @Autowired
ItemService itemService; ItemService itemService;
@Autowired
BundleService bundleService;
private Collection collection; private Collection collection;
private Item item; private Item item;
private Bundle bundle1; private Bundle bundle1;
@@ -515,6 +521,77 @@ public class BundleRestRepositoryIT extends AbstractControllerIntegrationTest {
))); )));
} }
@Test
public void patchReplaceMultipleDescriptionBundle() throws Exception {
context.turnOffAuthorisationSystem();
List<String> bundleDescriptions = List.of(
"FIRST",
"SECOND",
"THIRD"
);
bundle1 = BundleBuilder.createBundle(context, item)
.withName("testname")
.build();
this.bundleService
.addMetadata(
context, bundle1,
MetadataSchemaEnum.DC.getName(), "description", null,
Item.ANY, bundleDescriptions
);
context.restoreAuthSystemState();
String token = getAuthToken(admin.getEmail(), password);
getClient(token)
.perform(get("/api/core/bundles/" + bundle1.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", bundleDescriptions.get(0), 0),
MetadataMatcher.matchMetadata("dc.description", bundleDescriptions.get(1), 1),
MetadataMatcher.matchMetadata("dc.description", bundleDescriptions.get(2), 2)
)
)
);
List<Operation> ops = List.of(
new ReplaceOperation("/metadata/dc.description/0", bundleDescriptions.get(2)),
new ReplaceOperation("/metadata/dc.description/1", bundleDescriptions.get(0)),
new ReplaceOperation("/metadata/dc.description/2", bundleDescriptions.get(1))
);
String requestBody = getPatchContent(ops);
getClient(token)
.perform(patch("/api/core/bundles/" + bundle1.getID())
.content(requestBody)
.contentType(javax.ws.rs.core.MediaType.APPLICATION_JSON_PATCH_JSON))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", bundleDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", bundleDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", bundleDescriptions.get(1), 2)
)
)
);
getClient(token)
.perform(get("/api/core/bundles/" + bundle1.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", bundleDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", bundleDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", bundleDescriptions.get(1), 2)
)
)
);
}
@Test @Test
public void deleteBundle() throws Exception { public void deleteBundle() throws Exception {
context.turnOffAuthorisationSystem(); context.turnOffAuthorisationSystem();

View File

@@ -69,6 +69,7 @@ import org.dspace.content.Collection;
import org.dspace.content.Community; import org.dspace.content.Community;
import org.dspace.content.EntityType; import org.dspace.content.EntityType;
import org.dspace.content.Item; import org.dspace.content.Item;
import org.dspace.content.MetadataSchemaEnum;
import org.dspace.content.service.CollectionService; import org.dspace.content.service.CollectionService;
import org.dspace.core.Constants; import org.dspace.core.Constants;
import org.dspace.eperson.EPerson; import org.dspace.eperson.EPerson;
@@ -499,13 +500,13 @@ public class CollectionRestRepositoryIT extends AbstractControllerIntegrationTes
getClient(tokenParentAdmin).perform(get("/api/core/collections/" + col1.getID())) getClient(tokenParentAdmin).perform(get("/api/core/collections/" + col1.getID()))
.andExpect(status().isOk()) .andExpect(status().isOk())
.andExpect(jsonPath("$", .andExpect(jsonPath("$",
Matchers.is((CollectionMatcher.matchCollection(col1))))); Matchers.is(CollectionMatcher.matchCollection(col1))));
String tokenCol1Admin = getAuthToken(col1Admin.getEmail(), "qwerty02"); String tokenCol1Admin = getAuthToken(col1Admin.getEmail(), "qwerty02");
getClient(tokenCol1Admin).perform(get("/api/core/collections/" + col1.getID())) getClient(tokenCol1Admin).perform(get("/api/core/collections/" + col1.getID()))
.andExpect(status().isOk()) .andExpect(status().isOk())
.andExpect(jsonPath("$", .andExpect(jsonPath("$",
Matchers.is((CollectionMatcher.matchCollection(col1))))); Matchers.is(CollectionMatcher.matchCollection(col1))));
String tokenCol2Admin = getAuthToken(col2Admin.getEmail(), "qwerty03"); String tokenCol2Admin = getAuthToken(col2Admin.getEmail(), "qwerty03");
getClient(tokenCol2Admin).perform(get("/api/core/collections/" + col1.getID())) getClient(tokenCol2Admin).perform(get("/api/core/collections/" + col1.getID()))
@@ -1206,7 +1207,7 @@ public class CollectionRestRepositoryIT extends AbstractControllerIntegrationTes
) )
))) )))
.andDo(result -> idRef .andDo(result -> idRef
.set(UUID.fromString(read(result.getResponse().getContentAsString(), "$.id"))));; .set(UUID.fromString(read(result.getResponse().getContentAsString(), "$.id"))));
getClient(authToken).perform(post("/api/core/collections") getClient(authToken).perform(post("/api/core/collections")
@@ -3101,6 +3102,81 @@ public class CollectionRestRepositoryIT extends AbstractControllerIntegrationTes
.andExpect(status().isUnauthorized()); .andExpect(status().isUnauthorized());
} }
@Test
public void patchReplaceMultipleDescriptionCollection() throws Exception {
context.turnOffAuthorisationSystem();
List<String> collectionDescriptions = List.of(
"FIRST",
"SECOND",
"THIRD"
);
parentCommunity =
CommunityBuilder.createCommunity(context)
.withName("Parent Community")
.build();
Collection col =
CollectionBuilder.createCollection(context, parentCommunity)
.withName("MyTest")
.build();
this.collectionService
.addMetadata(
context, col, MetadataSchemaEnum.DC.getName(), "description", null, Item.ANY, collectionDescriptions
);
context.restoreAuthSystemState();
String token = getAuthToken(admin.getEmail(), password);
getClient(token)
.perform(get("/api/core/collections/" + col.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", collectionDescriptions.get(0), 0),
MetadataMatcher.matchMetadata("dc.description", collectionDescriptions.get(1), 1),
MetadataMatcher.matchMetadata("dc.description", collectionDescriptions.get(2), 2)
)
)
);
List<Operation> ops = List.of(
new ReplaceOperation("/metadata/dc.description/0", collectionDescriptions.get(2)),
new ReplaceOperation("/metadata/dc.description/1", collectionDescriptions.get(0)),
new ReplaceOperation("/metadata/dc.description/2", collectionDescriptions.get(1))
);
String requestBody = getPatchContent(ops);
getClient(token)
.perform(patch("/api/core/collections/" + col.getID())
.content(requestBody)
.contentType(javax.ws.rs.core.MediaType.APPLICATION_JSON_PATCH_JSON))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", collectionDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", collectionDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", collectionDescriptions.get(1), 2)
)
)
);
getClient(token)
.perform(get("/api/core/collections/" + col.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", collectionDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", collectionDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", collectionDescriptions.get(1), 2)
)
)
);
}
@Test @Test
public void patchMetadataCheckReindexingTest() throws Exception { public void patchMetadataCheckReindexingTest() throws Exception {
context.turnOffAuthorisationSystem(); context.turnOffAuthorisationSystem();

View File

@@ -20,6 +20,7 @@ import static org.springframework.data.rest.webmvc.RestMediaTypes.TEXT_URI_LIST_
import static org.springframework.http.MediaType.parseMediaType; import static org.springframework.http.MediaType.parseMediaType;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.patch;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
@@ -44,6 +45,8 @@ import org.dspace.app.rest.model.CommunityRest;
import org.dspace.app.rest.model.GroupRest; import org.dspace.app.rest.model.GroupRest;
import org.dspace.app.rest.model.MetadataRest; import org.dspace.app.rest.model.MetadataRest;
import org.dspace.app.rest.model.MetadataValueRest; import org.dspace.app.rest.model.MetadataValueRest;
import org.dspace.app.rest.model.patch.Operation;
import org.dspace.app.rest.model.patch.ReplaceOperation;
import org.dspace.app.rest.projection.Projection; import org.dspace.app.rest.projection.Projection;
import org.dspace.app.rest.test.AbstractControllerIntegrationTest; import org.dspace.app.rest.test.AbstractControllerIntegrationTest;
import org.dspace.app.rest.test.MetadataPatchSuite; import org.dspace.app.rest.test.MetadataPatchSuite;
@@ -56,6 +59,8 @@ import org.dspace.builder.GroupBuilder;
import org.dspace.builder.ResourcePolicyBuilder; import org.dspace.builder.ResourcePolicyBuilder;
import org.dspace.content.Collection; import org.dspace.content.Collection;
import org.dspace.content.Community; import org.dspace.content.Community;
import org.dspace.content.Item;
import org.dspace.content.MetadataSchemaEnum;
import org.dspace.content.service.CommunityService; import org.dspace.content.service.CommunityService;
import org.dspace.core.Constants; import org.dspace.core.Constants;
import org.dspace.eperson.EPerson; import org.dspace.eperson.EPerson;
@@ -1935,6 +1940,78 @@ public class CommunityRestRepositoryIT extends AbstractControllerIntegrationTest
runPatchMetadataTests(eperson, 403); runPatchMetadataTests(eperson, 403);
} }
@Test
public void patchReplaceMultipleDescriptionCommunity() throws Exception {
context.turnOffAuthorisationSystem();
List<String> communityDescriptions = List.of(
"FIRST",
"SECOND",
"THIRD"
);
parentCommunity =
CommunityBuilder.createCommunity(context)
.withName("Parent Community")
.build();
this.communityService
.addMetadata(
context, parentCommunity,
MetadataSchemaEnum.DC.getName(), "description", null,
Item.ANY, communityDescriptions
);
context.restoreAuthSystemState();
String token = getAuthToken(admin.getEmail(), password);
getClient(token)
.perform(get("/api/core/communities/" + parentCommunity.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", communityDescriptions.get(0), 0),
MetadataMatcher.matchMetadata("dc.description", communityDescriptions.get(1), 1),
MetadataMatcher.matchMetadata("dc.description", communityDescriptions.get(2), 2)
)
)
);
List<Operation> ops = List.of(
new ReplaceOperation("/metadata/dc.description/0", communityDescriptions.get(2)),
new ReplaceOperation("/metadata/dc.description/1", communityDescriptions.get(0)),
new ReplaceOperation("/metadata/dc.description/2", communityDescriptions.get(1))
);
String requestBody = getPatchContent(ops);
getClient(token)
.perform(patch("/api/core/communities/" + parentCommunity.getID())
.content(requestBody)
.contentType(javax.ws.rs.core.MediaType.APPLICATION_JSON_PATCH_JSON))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", communityDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", communityDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", communityDescriptions.get(1), 2)
)
)
);
getClient(token)
.perform(get("/api/core/communities/" + parentCommunity.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", communityDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", communityDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", communityDescriptions.get(1), 2)
)
)
);
}
private void runPatchMetadataTests(EPerson asUser, int expectedStatus) throws Exception { private void runPatchMetadataTests(EPerson asUser, int expectedStatus) throws Exception {
context.turnOffAuthorisationSystem(); context.turnOffAuthorisationSystem();
parentCommunity = CommunityBuilder.createCommunity(context).withName("Community").build(); parentCommunity = CommunityBuilder.createCommunity(context).withName("Community").build();

View File

@@ -72,6 +72,7 @@ import org.dspace.builder.GroupBuilder;
import org.dspace.builder.WorkflowItemBuilder; import org.dspace.builder.WorkflowItemBuilder;
import org.dspace.content.Collection; import org.dspace.content.Collection;
import org.dspace.content.Community; import org.dspace.content.Community;
import org.dspace.content.Item;
import org.dspace.core.I18nUtil; import org.dspace.core.I18nUtil;
import org.dspace.eperson.EPerson; import org.dspace.eperson.EPerson;
import org.dspace.eperson.Group; import org.dspace.eperson.Group;
@@ -155,7 +156,7 @@ public class EPersonRestRepositoryIT extends AbstractControllerIntegrationTest {
.andExpect(content().contentType(contentType)) .andExpect(content().contentType(contentType))
.andExpect(jsonPath("$", HalMatcher.matchNoEmbeds())) .andExpect(jsonPath("$", HalMatcher.matchNoEmbeds()))
.andDo(result -> idRefNoEmbeds .andDo(result -> idRefNoEmbeds
.set(UUID.fromString(read(result.getResponse().getContentAsString(), "$.id"))));; .set(UUID.fromString(read(result.getResponse().getContentAsString(), "$.id"))));
} finally { } finally {
EPersonBuilder.deleteEPerson(idRef.get()); EPersonBuilder.deleteEPerson(idRef.get());
@@ -1217,7 +1218,7 @@ public class EPersonRestRepositoryIT extends AbstractControllerIntegrationTest {
.content(patchBody) .content(patchBody)
.contentType(MediaType.APPLICATION_JSON_PATCH_JSON)) .contentType(MediaType.APPLICATION_JSON_PATCH_JSON))
.andExpect(status().isOk()) .andExpect(status().isOk())
.andExpect(jsonPath("$.canLogIn", Matchers.is(true)));; .andExpect(jsonPath("$.canLogIn", Matchers.is(true)));
List<Operation> ops2 = new ArrayList<Operation>(); List<Operation> ops2 = new ArrayList<Operation>();
@@ -1295,7 +1296,7 @@ public class EPersonRestRepositoryIT extends AbstractControllerIntegrationTest {
.content(patchBody) .content(patchBody)
.contentType(MediaType.APPLICATION_JSON_PATCH_JSON)) .contentType(MediaType.APPLICATION_JSON_PATCH_JSON))
.andExpect(status().isOk()) .andExpect(status().isOk())
.andExpect(jsonPath("$.requireCertificate", Matchers.is(true)));; .andExpect(jsonPath("$.requireCertificate", Matchers.is(true)));
List<Operation> ops2 = new ArrayList<Operation>(); List<Operation> ops2 = new ArrayList<Operation>();
ReplaceOperation replaceOperation2 = new ReplaceOperation("/certificate",null); ReplaceOperation replaceOperation2 = new ReplaceOperation("/certificate",null);
@@ -1858,6 +1859,78 @@ public class EPersonRestRepositoryIT extends AbstractControllerIntegrationTest {
matchMetadata("eperson.firstname", newName))))); matchMetadata("eperson.firstname", newName)))));
} }
@Test
public void patchMultipleReplaceMetadataByAdmin() throws Exception {
context.turnOffAuthorisationSystem();
String first = "First";
String second = "Second";
String third = "Third";
EPerson ePerson = EPersonBuilder.createEPerson(context)
.withEmail("Johndoe@example.com")
.build();
this.ePersonService
.addMetadata(context, ePerson, "eperson", "firstname", null, Item.ANY, List.of(first, second, third));
context.restoreAuthSystemState();
String token = getAuthToken(admin.getEmail(), password);
// The replacement of the eperson.firstname value is persisted
getClient(token).perform(get("/api/eperson/epersons/" + ePerson.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("eperson.firstname", first, 0),
MetadataMatcher.matchMetadata("eperson.firstname", second, 1),
MetadataMatcher.matchMetadata("eperson.firstname", third, 2)
)
)
);
List<Operation> ops = new ArrayList<Operation>();
ReplaceOperation replaceFirst = new ReplaceOperation("/metadata/eperson.firstname/0", third);
ReplaceOperation replaceSecond = new ReplaceOperation("/metadata/eperson.firstname/1", second);
ReplaceOperation replaceThird = new ReplaceOperation("/metadata/eperson.firstname/2", first);
ops.add(replaceFirst);
ops.add(replaceSecond);
ops.add(replaceThird);
String patchBody = getPatchContent(ops);
getClient(token).perform(patch("/api/eperson/epersons/" + ePerson.getID())
.content(patchBody)
.contentType(MediaType.APPLICATION_JSON_PATCH_JSON))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("eperson.firstname", third, 0),
MetadataMatcher.matchMetadata("eperson.firstname", second, 1),
MetadataMatcher.matchMetadata("eperson.firstname", first, 2)
)
)
);
getClient(token).perform(get("/api/eperson/epersons/" + ePerson.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("eperson.firstname", third, 0),
MetadataMatcher.matchMetadata("eperson.firstname", second, 1),
MetadataMatcher.matchMetadata("eperson.firstname", first, 2)
)
)
);
}
@Test @Test
public void patchOwnMetadataByNonAdminUser() throws Exception { public void patchOwnMetadataByNonAdminUser() throws Exception {

View File

@@ -40,6 +40,7 @@ import org.dspace.app.rest.exception.GroupNameNotProvidedException;
import org.dspace.app.rest.matcher.EPersonMatcher; import org.dspace.app.rest.matcher.EPersonMatcher;
import org.dspace.app.rest.matcher.GroupMatcher; import org.dspace.app.rest.matcher.GroupMatcher;
import org.dspace.app.rest.matcher.HalMatcher; import org.dspace.app.rest.matcher.HalMatcher;
import org.dspace.app.rest.matcher.MetadataMatcher;
import org.dspace.app.rest.model.GroupRest; import org.dspace.app.rest.model.GroupRest;
import org.dspace.app.rest.model.MetadataRest; import org.dspace.app.rest.model.MetadataRest;
import org.dspace.app.rest.model.MetadataValueRest; import org.dspace.app.rest.model.MetadataValueRest;
@@ -56,6 +57,8 @@ import org.dspace.builder.GroupBuilder;
import org.dspace.builder.ResourcePolicyBuilder; import org.dspace.builder.ResourcePolicyBuilder;
import org.dspace.content.Collection; import org.dspace.content.Collection;
import org.dspace.content.Community; import org.dspace.content.Community;
import org.dspace.content.Item;
import org.dspace.content.MetadataSchemaEnum;
import org.dspace.content.factory.ContentServiceFactory; import org.dspace.content.factory.ContentServiceFactory;
import org.dspace.content.service.CollectionService; import org.dspace.content.service.CollectionService;
import org.dspace.content.service.CommunityService; import org.dspace.content.service.CommunityService;
@@ -558,6 +561,68 @@ public class GroupRestRepositoryIT extends AbstractControllerIntegrationTest {
)); ));
} }
@Test
public void patchReplaceMultipleDescriptionGroupName() throws Exception {
context.turnOffAuthorisationSystem();
List<String> groupDescription = List.of(
"FIRST",
"SECOND",
"THIRD"
);
Group group =
GroupBuilder.createGroup(context)
.build();
GroupService groupService = EPersonServiceFactory.getInstance().getGroupService();
groupService
.addMetadata(
context, group, MetadataSchemaEnum.DC.getName(), "description", null, Item.ANY, groupDescription
);
context.restoreAuthSystemState();
String token = getAuthToken(admin.getEmail(), password);
getClient(token)
.perform(get("/api/eperson/groups/" + group.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", groupDescription.get(0), 0),
MetadataMatcher.matchMetadata("dc.description", groupDescription.get(1), 1),
MetadataMatcher.matchMetadata("dc.description", groupDescription.get(2), 2)
)
)
);
List<Operation> ops = List.of(
new ReplaceOperation("/metadata/dc.description/0", groupDescription.get(2)),
new ReplaceOperation("/metadata/dc.description/1", groupDescription.get(0)),
new ReplaceOperation("/metadata/dc.description/2", groupDescription.get(1))
);
String requestBody = getPatchContent(ops);
getClient(token)
.perform(
patch("/api/eperson/groups/" + group.getID())
.content(requestBody)
.contentType(MediaType.APPLICATION_JSON_PATCH_JSON)
)
.andExpect(status().isOk());
getClient(token)
.perform(get("/api/eperson/groups/" + group.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", groupDescription.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", groupDescription.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", groupDescription.get(1), 2)
)
)
);
}
@Test @Test
public void patchGroupWithParentUnprocessable() throws Exception { public void patchGroupWithParentUnprocessable() throws Exception {
context.turnOffAuthorisationSystem(); context.turnOffAuthorisationSystem();

View File

@@ -11,8 +11,8 @@ import static com.jayway.jsonpath.JsonPath.read;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.CoreMatchers.startsWith;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.patch; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.patch;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
@@ -23,8 +23,11 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
import java.io.IOException; import java.io.IOException;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Comparator;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import org.dspace.app.rest.matcher.MetadataMatcher; import org.dspace.app.rest.matcher.MetadataMatcher;
@@ -50,6 +53,7 @@ import org.dspace.content.service.EntityTypeService;
import org.dspace.content.service.ItemService; import org.dspace.content.service.ItemService;
import org.dspace.content.service.RelationshipTypeService; import org.dspace.content.service.RelationshipTypeService;
import org.dspace.content.service.WorkspaceItemService; import org.dspace.content.service.WorkspaceItemService;
import org.dspace.services.ConfigurationService;
import org.hamcrest.Matcher; import org.hamcrest.Matcher;
import org.hamcrest.Matchers; import org.hamcrest.Matchers;
import org.junit.After; import org.junit.After;
@@ -63,6 +67,13 @@ import org.springframework.http.MediaType;
*/ */
public class PatchMetadataIT extends AbstractEntityIntegrationTest { public class PatchMetadataIT extends AbstractEntityIntegrationTest {
private static final String SECTIONS_TRADITIONALPAGEONE_DC_CONTRIBUTOR_AUTHOR =
"/sections/traditionalpageone/dc.contributor.author/%1$s";
private static final String getPath(Object element) {
return String.format(SECTIONS_TRADITIONALPAGEONE_DC_CONTRIBUTOR_AUTHOR, element);
}
@Autowired @Autowired
private RelationshipTypeService relationshipTypeService; private RelationshipTypeService relationshipTypeService;
@@ -75,6 +86,9 @@ public class PatchMetadataIT extends AbstractEntityIntegrationTest {
@Autowired @Autowired
private WorkspaceItemService workspaceItemService; private WorkspaceItemService workspaceItemService;
@Autowired
private ConfigurationService configurationService;
private Collection collection; private Collection collection;
private Collection collection2; private Collection collection2;
private WorkspaceItem publicationWorkspaceItem; private WorkspaceItem publicationWorkspaceItem;
@@ -297,8 +311,6 @@ public class PatchMetadataIT extends AbstractEntityIntegrationTest {
.withEntityType("Publication") .withEntityType("Publication")
.build(); .build();
String adminToken = getAuthToken(admin.getEmail(), password);
// Make sure we grab the latest instance of the Item from the database before adding a regular author // Make sure we grab the latest instance of the Item from the database before adding a regular author
WorkspaceItem publication = workspaceItemService.find(context, publicationWorkspaceItem.getID()); WorkspaceItem publication = workspaceItemService.find(context, publicationWorkspaceItem.getID());
itemService.addMetadata(context, publication.getItem(), itemService.addMetadata(context, publication.getItem(),
@@ -920,6 +932,41 @@ public class PatchMetadataIT extends AbstractEntityIntegrationTest {
replaceTraditionalPageOneAuthorTest(3, expectedOrder); replaceTraditionalPageOneAuthorTest(3, expectedOrder);
} }
@Test
public void replaceMultipleTraditionalPageOnePlainTextAuthorTest() throws Exception {
final boolean virtualMetadataEnabled =
configurationService.getBooleanProperty("item.enable-virtual-metadata", false);
configurationService.setProperty("item.enable-virtual-metadata", false);
try {
initPlainTextPublicationWorkspace();
Map<Integer, String> replacedAuthors =
Map.of(
0, authorsOriginalOrder.get(4),
1, authorsOriginalOrder.get(1),
2, authorsOriginalOrder.get(2),
3, authorsOriginalOrder.get(3),
4, authorsOriginalOrder.get(0)
);
List<String> expectedOrder =
List.of(
authorsOriginalOrder.get(4),
authorsOriginalOrder.get(1),
authorsOriginalOrder.get(2),
authorsOriginalOrder.get(3),
authorsOriginalOrder.get(0)
);
replaceTraditionalPageMultipleAuthorsTest(replacedAuthors, expectedOrder);
} catch (Exception e) {
throw e;
} finally {
configurationService.setProperty("item.enable-virtual-metadata", virtualMetadataEnabled);
}
}
/** /**
* This test will add an author (dc.contributor.author) within a workspace publication's "traditionalpageone" * This test will add an author (dc.contributor.author) within a workspace publication's "traditionalpageone"
@@ -1393,24 +1440,7 @@ public class PatchMetadataIT extends AbstractEntityIntegrationTest {
ops.add(moveOperation); ops.add(moveOperation);
String patchBody = getPatchContent(ops); String patchBody = getPatchContent(ops);
String token = getAuthToken(admin.getEmail(), password); assertReplacementOrder(expectedOrder, patchBody);
getClient(token).perform(patch("/api/submission/workspaceitems/" + publicationWorkspaceItem.getID())
.content(patchBody)
.contentType(javax.ws.rs.core.MediaType.APPLICATION_JSON_PATCH_JSON))
.andExpect(status().isOk());
String authorField = "dc.contributor.author";
getClient(token).perform(get("/api/submission/workspaceitems/" + publicationWorkspaceItem.getID()))
.andExpect(status().isOk())
.andExpect(content().contentType(contentType))
.andExpect(jsonPath("$.sections.traditionalpageone", Matchers.allOf(
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(0), 0)),
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(1), 1)),
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(2), 2)),
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(3), 3)),
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(4), 4))
)));
} }
/** /**
@@ -1450,33 +1480,66 @@ public class PatchMetadataIT extends AbstractEntityIntegrationTest {
* @param expectedOrder A list of author names sorted in the expected order * @param expectedOrder A list of author names sorted in the expected order
*/ */
private void replaceTraditionalPageOneAuthorTest(int path, List<String> expectedOrder) throws Exception { private void replaceTraditionalPageOneAuthorTest(int path, List<String> expectedOrder) throws Exception {
List<Operation> ops = new ArrayList<Operation>(); String patchBody =
MetadataValueRest value = new MetadataValueRest(); getPatchContent(
value.setValue(replacedAuthor); List.of(
this.mapToReplaceOperation(path, replacedAuthor)
)
);
assertReplacementOrder(expectedOrder, patchBody);
}
private void replaceTraditionalPageMultipleAuthorsTest(
Map<Integer, String> values, List<String> expectedOrder
) throws Exception {
List<Operation> ops =
values
.entrySet()
.stream()
.sorted(Comparator.comparing(Map.Entry::getKey))
.map(entry -> mapToReplaceOperation(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
ReplaceOperation replaceOperation = new ReplaceOperation("/sections/traditionalpageone/dc.contributor.author/"
+ path, value);
ops.add(replaceOperation);
String patchBody = getPatchContent(ops); String patchBody = getPatchContent(ops);
assertReplacementOrder(expectedOrder, patchBody);
}
private ReplaceOperation mapToReplaceOperation(int path, String author) {
return new ReplaceOperation(getPath(path), new MetadataValueRest(author));
}
private void assertReplacementOrder(List<String> expectedOrder, String patchBody) throws Exception, SQLException {
String token = getAuthToken(admin.getEmail(), password); String token = getAuthToken(admin.getEmail(), password);
getClient(token).perform(patch("/api/submission/workspaceitems/" + publicationWorkspaceItem.getID()) getClient(token)
.content(patchBody) .perform(
.contentType(javax.ws.rs.core.MediaType.APPLICATION_JSON_PATCH_JSON)) patch("/api/submission/workspaceitems/" + publicationWorkspaceItem.getID())
.andExpect(status().isOk()); .content(patchBody)
.contentType(javax.ws.rs.core.MediaType.APPLICATION_JSON_PATCH_JSON)
)
.andExpect(status().isOk());
String authorField = "dc.contributor.author"; String authorField = "dc.contributor.author";
getClient(token).perform(get("/api/submission/workspaceitems/" + publicationWorkspaceItem.getID())) getClient(token)
.andExpect(status().isOk()) .perform(get("/api/submission/workspaceitems/" + publicationWorkspaceItem.getID()))
.andExpect(content().contentType(contentType)) .andExpect(status().isOk())
.andExpect(jsonPath("$.sections.traditionalpageone", Matchers.allOf( .andExpect(
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(0), 0)), content().contentType(contentType)
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(1), 1)), )
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(2), 2)), .andExpect(
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(3), 3)), jsonPath(
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(4), 4)) "$.sections.traditionalpageone",
))); Matchers.allOf(
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(0), 0)),
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(1), 1)),
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(2), 2)),
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(3), 3)),
Matchers.is(MetadataMatcher.matchMetadata(authorField, expectedOrder.get(4), 4))
)
)
);
} }
/** /**
@@ -1490,8 +1553,7 @@ public class PatchMetadataIT extends AbstractEntityIntegrationTest {
List<Operation> ops = new ArrayList<Operation>(); List<Operation> ops = new ArrayList<Operation>();
MetadataValueRest value = new MetadataValueRest(); MetadataValueRest value = new MetadataValueRest();
value.setValue(addedAuthor); value.setValue(addedAuthor);
AddOperation addOperation = new AddOperation("/sections/traditionalpageone/dc.contributor.author/" + path, AddOperation addOperation = new AddOperation(getPath(path), value);
value);
ops.add(addOperation); ops.add(addOperation);
String patchBody = getPatchContent(ops); String patchBody = getPatchContent(ops);
@@ -1525,8 +1587,7 @@ public class PatchMetadataIT extends AbstractEntityIntegrationTest {
*/ */
private void removeTraditionalPageOneAuthorTest(int path, List<String> expectedOrder) throws Exception { private void removeTraditionalPageOneAuthorTest(int path, List<String> expectedOrder) throws Exception {
List<Operation> ops = new ArrayList<Operation>(); List<Operation> ops = new ArrayList<Operation>();
RemoveOperation removeOperation = new RemoveOperation("/sections/traditionalpageone/dc.contributor.author/" RemoveOperation removeOperation = new RemoveOperation(getPath(path));
+ path);
ops.add(removeOperation); ops.add(removeOperation);
String patchBody = getPatchContent(ops); String patchBody = getPatchContent(ops);
@@ -1600,8 +1661,10 @@ public class PatchMetadataIT extends AbstractEntityIntegrationTest {
* @param path The "path" index to use for the Move operation * @param path The "path" index to use for the Move operation
*/ */
private MoveOperation getTraditionalPageOneMoveAuthorOperation(int from, int path) { private MoveOperation getTraditionalPageOneMoveAuthorOperation(int from, int path) {
return new MoveOperation("/sections/traditionalpageone/dc.contributor.author/" + path, return new MoveOperation(
"/sections/traditionalpageone/dc.contributor.author/" + from); getPath(path),
getPath(from)
);
} }
/** /**

View File

@@ -9,22 +9,34 @@ package org.dspace.app.rest;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.patch;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
import java.util.List;
import java.util.UUID; import java.util.UUID;
import org.dspace.app.rest.matcher.MetadataMatcher;
import org.dspace.app.rest.matcher.SiteMatcher; import org.dspace.app.rest.matcher.SiteMatcher;
import org.dspace.app.rest.model.patch.Operation;
import org.dspace.app.rest.model.patch.ReplaceOperation;
import org.dspace.app.rest.test.AbstractControllerIntegrationTest; import org.dspace.app.rest.test.AbstractControllerIntegrationTest;
import org.dspace.app.rest.test.MetadataPatchSuite; import org.dspace.app.rest.test.MetadataPatchSuite;
import org.dspace.builder.SiteBuilder; import org.dspace.builder.SiteBuilder;
import org.dspace.content.Item;
import org.dspace.content.MetadataSchemaEnum;
import org.dspace.content.Site; import org.dspace.content.Site;
import org.dspace.content.service.SiteService;
import org.dspace.eperson.EPerson; import org.dspace.eperson.EPerson;
import org.hamcrest.Matchers; import org.hamcrest.Matchers;
import org.junit.Test; import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
public class SiteRestRepositoryIT extends AbstractControllerIntegrationTest { public class SiteRestRepositoryIT extends AbstractControllerIntegrationTest {
@Autowired
private SiteService siteService;
@Test @Test
public void findAll() throws Exception { public void findAll() throws Exception {
@@ -77,6 +89,75 @@ public class SiteRestRepositoryIT extends AbstractControllerIntegrationTest {
runPatchMetadataTests(eperson, 403); runPatchMetadataTests(eperson, 403);
} }
@Test
public void patchReplaceMultipleDescriptionSite() throws Exception {
context.turnOffAuthorisationSystem();
List<String> siteDescriptions = List.of(
"FIRST",
"SECOND",
"THIRD"
);
Site site = SiteBuilder.createSite(context).build();
this.siteService
.addMetadata(
context, site,
MetadataSchemaEnum.DC.getName(), "description", null,
Item.ANY, siteDescriptions
);
context.restoreAuthSystemState();
String token = getAuthToken(admin.getEmail(), password);
getClient(token)
.perform(get("/api/core/sites/" + site.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", siteDescriptions.get(0), 0),
MetadataMatcher.matchMetadata("dc.description", siteDescriptions.get(1), 1),
MetadataMatcher.matchMetadata("dc.description", siteDescriptions.get(2), 2)
)
)
);
List<Operation> ops = List.of(
new ReplaceOperation("/metadata/dc.description/0", siteDescriptions.get(2)),
new ReplaceOperation("/metadata/dc.description/1", siteDescriptions.get(0)),
new ReplaceOperation("/metadata/dc.description/2", siteDescriptions.get(1))
);
String requestBody = getPatchContent(ops);
getClient(token)
.perform(patch("/api/core/sites/" + site.getID())
.content(requestBody)
.contentType(javax.ws.rs.core.MediaType.APPLICATION_JSON_PATCH_JSON))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", siteDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", siteDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", siteDescriptions.get(1), 2)
)
)
);
getClient(token)
.perform(get("/api/core/sites/" + site.getID()))
.andExpect(status().isOk())
.andExpect(
jsonPath("$.metadata",
Matchers.allOf(
MetadataMatcher.matchMetadata("dc.description", siteDescriptions.get(2), 0),
MetadataMatcher.matchMetadata("dc.description", siteDescriptions.get(0), 1),
MetadataMatcher.matchMetadata("dc.description", siteDescriptions.get(1), 2)
)
)
);
}
private void runPatchMetadataTests(EPerson asUser, int expectedStatus) throws Exception { private void runPatchMetadataTests(EPerson asUser, int expectedStatus) throws Exception {
context.turnOffAuthorisationSystem(); context.turnOffAuthorisationSystem();
Site site = SiteBuilder.createSite(context).build(); Site site = SiteBuilder.createSite(context).build();

View File

@@ -843,7 +843,7 @@ plugin.single.org.dspace.embargo.EmbargoSetter = org.dspace.embargo.DefaultEmbar
plugin.single.org.dspace.embargo.EmbargoLifter = org.dspace.embargo.DefaultEmbargoLifter plugin.single.org.dspace.embargo.EmbargoLifter = org.dspace.embargo.DefaultEmbargoLifter
# values for the forever embargo date threshold # values for the forever embargo date threshold
# This threshold date is used in the default access status helper to dermine if an item is # This threshold date is used in the default access status helper to determine if an item is
# restricted or embargoed based on the start date of the primary (or first) file policies. # restricted or embargoed based on the start date of the primary (or first) file policies.
# In this case, if the policy start date is inferior to the threshold date, the status will # In this case, if the policy start date is inferior to the threshold date, the status will
# be embargo, else it will be restricted. # be embargo, else it will be restricted.
@@ -880,7 +880,7 @@ org.dspace.app.itemexport.life.span.hours = 48
# The maximum size in Megabytes the export should be. This is enforced before the # The maximum size in Megabytes the export should be. This is enforced before the
# compression. Each bitstream's size in each item being exported is added up, if their # compression. Each bitstream's size in each item being exported is added up, if their
# cummulative sizes are more than this entry the export is not kicked off # cumulative sizes are more than this entry the export is not kicked off
org.dspace.app.itemexport.max.size = 200 org.dspace.app.itemexport.max.size = 200
### Batch Item import settings ### ### Batch Item import settings ###

View File

@@ -56,10 +56,12 @@
<constructor-arg value="dc.description.abstract"/> <constructor-arg value="dc.description.abstract"/>
</bean> </bean>
<bean id="arxivPublishedContrib" class="org.dspace.importer.external.metadatamapping.contributor.SimpleXpathMetadatumContributor"> <bean id="arxivPublishedContrib" class="org.dspace.importer.external.metadatamapping.contributor.SimpleXpathDateFormatMetadataContributor">
<property name="field" ref="arxiv.published"/> <property name="field" ref="arxiv.published"/>
<property name="query" value="ns:published"/> <property name="query" value="ns:published"/>
<property name="prefixToNamespaceMapping" ref="arxivBasePrefixToNamespaceMapping"/> <property name="prefixToNamespaceMapping" ref="arxivBasePrefixToNamespaceMapping"/>
<property name="dateFormatFrom" value="yyyy-MM-dd'T'HH:mm:ss'Z'"/>
<property name="dateFormatTo" value="yyyy-MM-dd"></property>
</bean> </bean>
<bean id="arxiv.published" class="org.dspace.importer.external.metadatamapping.MetadataFieldConfig"> <bean id="arxiv.published" class="org.dspace.importer.external.metadatamapping.MetadataFieldConfig">
<constructor-arg value="dc.date.issued"/> <constructor-arg value="dc.date.issued"/>

View File

@@ -17,11 +17,9 @@
The VersionedHandleIdentifierProvider creates a new versioned The VersionedHandleIdentifierProvider creates a new versioned
handle for every new version. handle for every new version.
--> -->
<!--
<bean id="org.dspace.identifier.HandleIdentifierProvider" class="org.dspace.identifier.VersionedHandleIdentifierProvider" scope="singleton"> <bean id="org.dspace.identifier.HandleIdentifierProvider" class="org.dspace.identifier.VersionedHandleIdentifierProvider" scope="singleton">
<property name="configurationService" ref="org.dspace.services.ConfigurationService"/> <property name="configurationService" ref="org.dspace.services.ConfigurationService"/>
</bean> </bean>
-->
<!-- <!--
The VersionedHandleIdentifierProviderWithCanonicalHandles The VersionedHandleIdentifierProviderWithCanonicalHandles
preserves the first handle for every new version. Whenever preserves the first handle for every new version. Whenever

View File

@@ -38,6 +38,7 @@
<value>yyyy-MMM-dd</value> <value>yyyy-MMM-dd</value>
<value>yyyy-MMM</value> <value>yyyy-MMM</value>
<value>yyyy-MM-dd</value> <value>yyyy-MM-dd</value>
<value>yyyy</value>
</list> </list>
</property> </property>
<property name="year" ref="yearContrib"/> <property name="year" ref="yearContrib"/>

View File

@@ -1,4 +1,4 @@
# Docker Compose Resources # Docker Compose files for DSpace Backend
*** ***
:warning: **THESE IMAGES ARE NOT PRODUCTION READY** The below Docker Compose images/resources were built for development/testing only. Therefore, they may not be fully secured or up-to-date, and should not be used in production. :warning: **THESE IMAGES ARE NOT PRODUCTION READY** The below Docker Compose images/resources were built for development/testing only. Therefore, they may not be fully secured or up-to-date, and should not be used in production.
@@ -6,27 +6,51 @@
If you wish to run DSpace on Docker in production, we recommend building your own Docker images. You are welcome to borrow ideas/concepts from the below images in doing so. But, the below images should not be used "as is" in any production scenario. If you wish to run DSpace on Docker in production, we recommend building your own Docker images. You are welcome to borrow ideas/concepts from the below images in doing so. But, the below images should not be used "as is" in any production scenario.
*** ***
## root directory Resources
- docker-compose.yml
- Docker compose file to orchestrate DSpace 7 REST components
- docker-compose-cli
- Docker compose file to run DSpace CLI tasks within a running DSpace instance in Docker
## dspace/src/main/docker-compose resources ## Overview
The scripts in this directory can be used to start the DSpace REST API (backend) in Docker.
Optionally, the DSpace User Interface (frontend) may also be started in Docker.
For additional options/settings in starting the User Interface (frontend) in Docker, see the Docker Compose
documentation for the frontend: https://github.com/DSpace/dspace-angular/blob/main/docker/README.md
## Primary Docker Compose Scripts (in root directory)
The root directory of this project contains the primary Dockerfiles & Docker Compose scripts
which are used to start the backend.
- docker-compose.yml
- Docker compose file to orchestrate DSpace REST API (backend) components.
- Uses the `Dockerfile` in the same directory.
- docker-compose-cli.yml
- Docker compose file to run DSpace CLI (Command Line Interface) tasks within a running DSpace instance in Docker. See instructions below.
- Uses the `Dockerfile.cli` in the same directory.
Documentation for all Dockerfiles used by these compose scripts can be found in the ["docker" folder README](../docker/README.md)
## Additional Docker Compose tools (in ./dspace/src/main/docker-compose)
- cli.assetstore.yml - cli.assetstore.yml
- Docker compose file that will download and install a default assetstore. - Docker compose file that will download and install a default assetstore.
- The default assetstore is the configurable entities test dataset. Useful for [testing/demos of Entities](#Ingest Option 2 Ingest Entities Test Data).
- cli.ingest.yml - cli.ingest.yml
- Docker compose file that will run an AIP ingest into DSpace 7. - Docker compose file that will run an AIP ingest into DSpace 7. Useful for testing/demos with basic Items.
- db.entities.yml - db.entities.yml
- Docker compose file that pre-populate a database instance using a SQL dump. The default dataset is the configurable entities test dataset. - Docker compose file that pre-populate a database instance using a downloaded SQL dump.
- local.cfg - The default dataset is the configurable entities test dataset. Useful for [testing/demos of Entities](#Ingest Option 2 Ingest Entities Test Data).
- Sets the environment used across containers run with docker-compose - db.restore.yml
- Docker compose file that pre-populate a database instance using a *local* SQL dump (hardcoded to `./pgdump.sql`)
- Useful for restoring data from a local backup, or [Upgrading PostgreSQL in Docker](#Upgrading PostgreSQL in Docker)
- docker-compose-angular.yml - docker-compose-angular.yml
- Docker compose file that will start a published DSpace angular container that interacts with the branch. - Docker compose file that will start a published DSpace User Interface container that interacts with the branch.
- docker-compose-shibboleth.yml - docker-compose-shibboleth.yml
- Docker compose file that will start a *test/demo* Shibboleth SP container (in Apache) that proxies requests to the DSpace container - Docker compose file that will start a *test/demo* Shibboleth SP container (in Apache) that proxies requests to the DSpace container
- ONLY useful for testing/development. NOT production ready. - ONLY useful for testing/development. NOT production ready.
- docker-compose-iiif.yml
- Docker compose file that will start a *test/demo* Cantaloupe image server container required for enabling IIIF support.
- ONLY useful for testing/development. NOT production ready.
Documentation for all Dockerfiles used by these compose scripts can be found in the ["docker" folder README](../docker/README.md)
## To refresh / pull DSpace images from Dockerhub ## To refresh / pull DSpace images from Dockerhub
``` ```
@@ -55,6 +79,12 @@ docker-compose -p d7 up -d
docker-compose -p d7 -f docker-compose.yml -f dspace/src/main/docker-compose/docker-compose-angular.yml up -d docker-compose -p d7 -f docker-compose.yml -f dspace/src/main/docker-compose/docker-compose-angular.yml up -d
``` ```
## Run DSpace REST and DSpace Angular from local branches
*Allows you to run the backend from the "DSpace/DSpace" codebase while also running the frontend from the "DSpace/dspace-angular" codebase.*
See documentation in [DSpace User Interface Docker instructions](https://github.com/DSpace/dspace-angular/blob/main/docker/README.md#run-dspace-rest-and-dspace-angular-from-local-branches).
## Run DSpace 7 REST with a IIIF Image Server from your branch ## Run DSpace 7 REST with a IIIF Image Server from your branch
*Only useful for testing IIIF support in a development environment* *Only useful for testing IIIF support in a development environment*
@@ -67,7 +97,6 @@ docker-compose -p d7 -f docker-compose.yml -f dspace/src/main/docker-compose/doc
``` ```
## Run DSpace 7 REST and Shibboleth SP (in Apache) from your branch ## Run DSpace 7 REST and Shibboleth SP (in Apache) from your branch
*Only useful for testing Shibboleth in a development environment* *Only useful for testing Shibboleth in a development environment*
This Shibboleth container uses https://samltest.id/ as an IdP (see `../docker/dspace-shibboleth/`). This Shibboleth container uses https://samltest.id/ as an IdP (see `../docker/dspace-shibboleth/`).
@@ -143,21 +172,11 @@ The remainder of these instructions assume you are using ngrok (though other pro
DSPACE_HOSTNAME=[subdomain].ngrok.io docker-compose -p d7 -f docker-compose.yml -f dspace/src/main/docker-compose/docker-compose-angular.yml -f dspace/src/main/docker-compose/docker-compose-shibboleth.yml up -d DSPACE_HOSTNAME=[subdomain].ngrok.io docker-compose -p d7 -f docker-compose.yml -f dspace/src/main/docker-compose/docker-compose-angular.yml -f dspace/src/main/docker-compose/docker-compose-shibboleth.yml up -d
``` ```
## Run DSpace 7 REST and Angular from local branches ## Sample Test Data
_The system will be started in 2 steps. Each step shares the same docker network._ ### Ingesting test content from AIP files
From DSpace/DSpace *Allows you to ingest a set of AIPs into your DSpace instance for testing/demo purposes.* These AIPs represent basic Communities, Collections and Items.
```
docker-compose -p d7 up -d
```
From DSpace/DSpace-angular (build as needed)
```
docker-compose -p d7 -f docker/docker-compose.yml up -d
```
## Ingest Option 1: Ingesting test content from AIP files into a running DSpace 7 instance
Prerequisites Prerequisites
- Start DSpace 7 using one of the options listed above - Start DSpace 7 using one of the options listed above
@@ -173,8 +192,14 @@ Download a Zip file of AIP content and ingest test data
docker-compose -p d7 -f docker-compose-cli.yml -f dspace/src/main/docker-compose/cli.ingest.yml run --rm dspace-cli docker-compose -p d7 -f docker-compose-cli.yml -f dspace/src/main/docker-compose/cli.ingest.yml run --rm dspace-cli
``` ```
## Ingest Option 2: Ingest Entities Test Data ### Ingest Entities Test Data
_Remove your d7 volumes if you already ingested content into your docker volumes_
*Allows you to load Configurable Entities test data for testing/demo purposes.*
Prerequisites
- Start DSpace 7 using one of the options listed above
- Build the DSpace CLI image if needed. See the instructions above.
- _Remove your d7 volumes if you already ingested content into your docker volumes_
Start DSpace REST with a postgres database dump downloaded from the internet. Start DSpace REST with a postgres database dump downloaded from the internet.
``` ```
@@ -212,3 +237,85 @@ Similarly, you can see the value of any DSpace configuration (in local.cfg or ds
# Output the value of `dspace.ui.url` from running Docker instance # Output the value of `dspace.ui.url` from running Docker instance
docker-compose -p d7 -f docker-compose-cli.yml run --rm dspace-cli dsprop -p dspace.ui.url docker-compose -p d7 -f docker-compose-cli.yml run --rm dspace-cli dsprop -p dspace.ui.url
``` ```
NOTE: It is also possible to run CLI scripts directly on the "dspace" container (where the backend runs)
This can be useful if you want to pass environment variables which override DSpace configs.
```
# Run the "./dspace database clean" command from the "dspace" container
# Before doing so, it sets "db.cleanDisabled=false".
# WARNING: This will delete all your data. It's just an example of how to do so.
docker-compose -p d7 exec -e "db__P__cleanDisabled=false" dspace /dspace/bin/dspace database clean
```
## Upgrading PostgreSQL in Docker
Occasionally, we update our `dspace-postgres-*` images to use a new version of PostgreSQL.
Simply using the new image will likely throw errors as the pgdata (postgres data) directory is incompatible
with the new version of PostgreSQL. These errors look like:
```
FATAL: database files are incompatible with server
DETAIL: The data directory was initialized by PostgreSQL version 11, which is not compatible with this version 13.10
```
Here's how to fix those issues by migrating your old Postgres data to the new version of Postgres
1. First, you must start up the older PostgreSQL image (to dump your existing data to a `*.sql` file)
```
# This command assumes you are using the process described above to start all your containers
docker-compose -p d7 up -d
```
* If you've already accidentally updated to the new PostgreSQL image, you have a few options:
* Pull down an older version of the image from Dockerhub (using a tag)
* Or, temporarily rebuild your local image with the old version of Postgres. For example:
```
# This command will rebuild using PostgreSQL v11 & tag it locally as "dspace-7_x"
docker build --build-arg POSTGRES_VERSION=11 -t dspace/dspace-postgres-pgcrypto:dspace-7_x ./dspace/src/main/docker/dspace-postgres-pgcrypto/
# Then restart container with that image
docker-compose -p d7 up -d
```
2. Dump your entire "dspace" database out of the old "dspacedb" container to a local file named `pgdump.sql`
```
# NOTE: WE HIGHLY RECOMMEND LOGGING INTO THE CONTAINER and doing the pg_dump within the container.
# If you attempt to run pg_dump from your local machine via docker "exec" (or similar), sometimes
# UTF-8 characters can be corrupted in the export file. This may result in data loss.
# First login to the "dspacedb" container
docker exec -it dspacedb /bin/bash
# Dump the "dspace" database to a file named "/tmp/pgdump.sql" within the container
pg_dump -U dspace dspace > /tmp/pgdump.sql
# Exit the container
exit
# Download (copy) that /tmp/pgdump.sql backup file from container to your local machine
docker cp dspacedb:/tmp/pgdump.sql .
```
3. Now, stop all existing containers. This shuts down the old version of PostgreSQL
```
# This command assumes you are using the process described above to start/stop all your containers
docker-compose -p d7 down
```
4. Delete the `pgdata` volume. WARNING: This deletes all your old PostgreSQL data. Make sure you have that `pgdump.sql` file FIRST!
```
# Assumes you are using `-p d7` which prefixes all volumes with `d7_`
docker volume rm d7_pgdata
```
5. Now, pull down the latest PostgreSQL image with the NEW version of PostgreSQL.
```
docker-compose -f docker-compose.yml -f docker-compose-cli.yml pull
```
6. Start everything up using our `db.restore.yml` script. This script will recreate the database
using the local `./pgdump.sql` file. IMPORTANT: If you renamed that "pgdump.sql" file or stored it elsewhere,
then you MUST change the name/directory in the `db.restore.yml` script.
```
# Restore database from "./pgdump.sql" (this path is hardcoded in db.restore.yml)
docker-compose -p d7 -f docker-compose.yml -f dspace/src/main/docker-compose/db.restore.yml up -d
```
7. Finally, reindex all database contents into Solr (just to be sure Solr indexes are current).
```
# Run "./dspace index-discovery -b" using our CLI image
docker-compose -p d7 -f docker-compose-cli.yml run --rm dspace-cli index-discovery -b
```
At this point in time, all your old database data should be migrated to the new Postgres
and running at http://localhost:8080/server/

View File

@@ -10,7 +10,7 @@ version: "3.7"
services: services:
dspacedb: dspacedb:
image: dspace/dspace-postgres-pgcrypto:loadsql image: dspace/dspace-postgres-pgcrypto:dspace-7_x-loadsql
environment: environment:
# This SQL is available from https://github.com/DSpace-Labs/AIP-Files/releases/tag/demo-entities-data # This SQL is available from https://github.com/DSpace-Labs/AIP-Files/releases/tag/demo-entities-data
- LOADSQL=https://github.com/DSpace-Labs/AIP-Files/releases/download/demo-entities-data/dspace7-entities-data.sql - LOADSQL=https://github.com/DSpace-Labs/AIP-Files/releases/download/demo-entities-data/dspace7-entities-data.sql

View File

@@ -0,0 +1,26 @@
#
# The contents of this file are subject to the license and copyright
# detailed in the LICENSE and NOTICE files at the root of the source
# tree and available online at
#
# http://www.dspace.org/license/
#
version: "3.7"
#
# Overrides the default "dspacedb" container behavior to load a local SQL file into PostgreSQL.
#
# This can be used to restore a "dspacedb" container from a pg_dump, or during upgrade to a new version of PostgreSQL.
services:
dspacedb:
image: dspace/dspace-postgres-pgcrypto:dspace-7_x-loadsql
environment:
# Location where the dump SQL file will be available on the running container
- LOCALSQL=/tmp/pgdump.sql
volumes:
# Volume which shares a local SQL file at "./pgdump.sql" to the running container
# IF YOUR LOCAL FILE HAS A DIFFERENT NAME (or is in a different location), then change the "./pgdump.sql"
# portion of this line.
- ./pgdump.sql:/tmp/pgdump.sql

View File

@@ -1,4 +1,4 @@
# Docker images supporting DSpace # Docker images supporting DSpace Backend
*** ***
:warning: **THESE IMAGES ARE NOT PRODUCTION READY** The below Docker Compose images/resources were built for development/testing only. Therefore, they may not be fully secured or up-to-date, and should not be used in production. :warning: **THESE IMAGES ARE NOT PRODUCTION READY** The below Docker Compose images/resources were built for development/testing only. Therefore, they may not be fully secured or up-to-date, and should not be used in production.
@@ -6,9 +6,15 @@
If you wish to run DSpace on Docker in production, we recommend building your own Docker images. You are welcome to borrow ideas/concepts from the below images in doing so. But, the below images should not be used "as is" in any production scenario. If you wish to run DSpace on Docker in production, we recommend building your own Docker images. You are welcome to borrow ideas/concepts from the below images in doing so. But, the below images should not be used "as is" in any production scenario.
*** ***
## Dockerfile.dependencies ## Overview
The Dockerfiles in this directory (and subdirectories) are used by our [Docker Compose scripts](../docker-compose/README.md).
## Dockerfile.dependencies (in root folder)
This Dockerfile is used to pre-cache Maven dependency downloads that will be used in subsequent DSpace docker builds. This Dockerfile is used to pre-cache Maven dependency downloads that will be used in subsequent DSpace docker builds.
Caching these Maven dependencies provides a speed increase to all later builds by ensuring the dependencies
are only downloaded once.
``` ```
docker build -t dspace/dspace-dependencies:dspace-7_x -f Dockerfile.dependencies . docker build -t dspace/dspace-dependencies:dspace-7_x -f Dockerfile.dependencies .
``` ```
@@ -22,12 +28,13 @@ Admins to our DockerHub repo can manually publish with the following command.
docker push dspace/dspace-dependencies:dspace-7_x docker push dspace/dspace-dependencies:dspace-7_x
``` ```
## Dockerfile.test ## Dockerfile.test (in root folder)
This Dockerfile builds a DSpace 7 Tomcat image (for testing/development). This Dockerfile builds a DSpace 7 backend image (for testing/development).
This image deploys two DSpace webapps: This image deploys two DSpace webapps to Tomcat running in Docker:
1. The DSpace 7 REST API (at `http://localhost:8080/server`) 1. The DSpace 7 REST API (at `http://localhost:8080/server`)
2. The legacy (v6) REST API (at `http://localhost:8080//rest`), deployed without requiring HTTPS access. 2. The legacy (v6) REST API (at `http://localhost:8080/rest`), deployed without requiring HTTPS access.
This image also sets up debugging in Tomcat for development.
``` ```
docker build -t dspace/dspace:dspace-7_x-test -f Dockerfile.test . docker build -t dspace/dspace:dspace-7_x-test -f Dockerfile.test .
@@ -42,12 +49,12 @@ Admins to our DockerHub repo can manually publish with the following command.
docker push dspace/dspace:dspace-7_x-test docker push dspace/dspace:dspace-7_x-test
``` ```
## Dockerfile ## Dockerfile (in root folder)
This Dockerfile builds a DSpace 7 tomcat image. This Dockerfile builds a DSpace 7 backend image.
This image deploys two DSpace webapps: This image deploys one DSpace webapp to Tomcat running in Docker:
1. The DSpace 7 REST API (at `http://localhost:8080/server`) 1. The DSpace 7 REST API (at `http://localhost:8080/server`)
2. The legacy (v6) REST API (at `http://localhost:8080//rest`), deployed *requiring* HTTPS access.
``` ```
docker build -t dspace/dspace:dspace-7_x -f Dockerfile . docker build -t dspace/dspace:dspace-7_x -f Dockerfile .
``` ```
@@ -61,9 +68,9 @@ Admins to our DockerHub repo can publish with the following command.
docker push dspace/dspace:dspace-7_x docker push dspace/dspace:dspace-7_x
``` ```
## Dockefile.cli ## Dockerfile.cli (in root folder)
This Dockerfile builds a DSpace 7 CLI image, which can be used to run commandline tools via Docker. This Dockerfile builds a DSpace 7 CLI (command line interface) image, which can be used to run DSpace's commandline tools via Docker.
``` ```
docker build -t dspace/dspace-cli:dspace-7_x -f Dockerfile.cli . docker build -t dspace/dspace-cli:dspace-7_x -f Dockerfile.cli .
``` ```
@@ -77,46 +84,60 @@ Admins to our DockerHub repo can publish with the following command.
docker push dspace/dspace-cli:dspace-7_x docker push dspace/dspace-cli:dspace-7_x
``` ```
## dspace/src/main/docker/dspace-postgres-pgcrypto/Dockerfile ## ./dspace-postgres-pgcrypto/Dockerfile
This is a PostgreSQL Docker image containing the `pgcrypto` extension required by DSpace 6+. This is a PostgreSQL Docker image containing the `pgcrypto` extension required by DSpace 6+.
This image is built *automatically* after each commit is made to the `main` branch.
How to build manually:
``` ```
cd dspace/src/main/docker/dspace-postgres-pgcrypto cd dspace/src/main/docker/dspace-postgres-pgcrypto
docker build -t dspace/dspace-postgres-pgcrypto . docker build -t dspace/dspace-postgres-pgcrypto:dspace-7_x .
``` ```
**This image is built manually.** It should be rebuilt as needed. It is also possible to change the version of PostgreSQL or the PostgreSQL user's password during the build:
```
cd dspace/src/main/docker/dspace-postgres-pgcrypto
docker build -t dspace/dspace-postgres-pgcrypto:dspace-7_x --build-arg POSTGRES_VERSION=11 --build-arg POSTGRES_PASSWORD=mypass .
```
A copy of this file exists in the DSpace 6 branch. A specialized version of this file exists for DSpace 4 in DSpace-Docker-Images. A copy of this file exists in the DSpace 6 branch. A specialized version of this file exists for DSpace 4 in DSpace-Docker-Images.
Admins to our DockerHub repo can publish with the following command. Admins to our DockerHub repo can (manually) publish with the following command.
``` ```
docker push dspace/dspace-postgres-pgcrypto docker push dspace/dspace-postgres-pgcrypto:dspace-7_x
``` ```
## dspace/src/main/docker/dspace-postgres-pgcrypto-curl/Dockerfile ## ./dspace-postgres-pgcrypto-curl/Dockerfile
This is a PostgreSQL Docker image containing the `pgcrypto` extension required by DSpace 6+. This is a PostgreSQL Docker image containing the `pgcrypto` extension required by DSpace 6+.
This image also contains `curl`. The image is pre-configured to load a Postgres database dump on initialization. This image also contains `curl`. The image is pre-configured to load a Postgres database dump on initialization.
This image is built *automatically* after each commit is made to the `main` branch.
How to build manually:
``` ```
cd dspace/src/main/docker/dspace-postgres-pgcrypto-curl cd dspace/src/main/docker/dspace-postgres-pgcrypto-curl
docker build -t dspace/dspace-postgres-pgcrypto:loadsql . docker build -t dspace/dspace-postgres-pgcrypto:dspace-7_x-loadsql .
``` ```
**This image is built manually.** It should be rebuilt as needed. Similar to `dspace-postgres-pgcrypto` above, you can also modify the version of PostgreSQL or the PostgreSQL user's password.
See examples above.
A copy of this file exists in the DSpace 6 branch. A copy of this file exists in the DSpace 6 branch.
Admins to our DockerHub repo can publish with the following command. Admins to our DockerHub repo can (manually) publish with the following command.
``` ```
docker push dspace/dspace-postgres-pgcrypto:loadsql docker push dspace/dspace-postgres-pgcrypto:dspace-7_x-loadsql
``` ```
## dspace/src/main/docker/dspace-shibboleth/Dockerfile ## ./dspace-shibboleth/Dockerfile
This is a test / demo image which provides an Apache HTTPD proxy (in front of Tomcat) This is a test / demo image which provides an Apache HTTPD proxy (in front of Tomcat)
with mod_shib & Shibboleth installed. It is primarily for usage for with `mod_shib` & Shibboleth installed based on the
testing DSpace's Shibboleth integration. It uses https://samltest.id/ as the Shibboleth IDP [DSpace Shibboleth configuration instructions](https://wiki.lyrasis.org/display/DSDOC7x/Authentication+Plugins#AuthenticationPlugins-ShibbolethAuthentication).
It is primarily for usage for testing DSpace's Shibboleth integration.
It uses https://samltest.id/ as the Shibboleth IDP
**This image is built manually.** It should be rebuilt as needed. **This image is built manually.** It should be rebuilt as needed.
@@ -130,10 +151,28 @@ docker run -i -t -d -p 80:80 -p 443:443 dspace/dspace-shibboleth
This image can also be rebuilt using the `../docker-compose/docker-compose-shibboleth.yml` script. This image can also be rebuilt using the `../docker-compose/docker-compose-shibboleth.yml` script.
## ./dspace-solr/Dockerfile
## test/ folder This Dockerfile builds a Solr image with DSpace Solr configsets included. It
can be pulled / built following the [docker compose resources](../docker-compose/README.md)
documentation. Or, to just build and/or run Solr:
```bash
docker-compose build dspacesolr
docker-compose -p d7 up -d dspacesolr
```
If you're making iterative changes to the DSpace Solr configsets you'll need to rebuild /
restart the `dspacesolr` container for the changes to be deployed. From DSpace root:
```bash
docker-compose -p d7 up --detach --build dspacesolr
```
## ./test/ folder
These resources are bundled into the `dspace/dspace:dspace-*-test` image at build time. These resources are bundled into the `dspace/dspace:dspace-*-test` image at build time.
See the `Dockerfile.test` section above for more information about the test image.
## Debugging Docker builds ## Debugging Docker builds

View File

@@ -6,14 +6,21 @@
# http://www.dspace.org/license/ # http://www.dspace.org/license/
# #
# This will be deployed as dspace/dspace-postgres-pgcrpyto:loadsql # To build for example use:
FROM postgres:11 # docker build --build-arg POSTGRES_VERSION=13 --build-arg POSTGRES_PASSWORD=mypass ./dspace/src/main/docker/dspace-postgres-pgcrypto-curl/
# This will be published as dspace/dspace-postgres-pgcrypto:$DSPACE_VERSION-loadsql
ARG POSTGRES_VERSION=13
ARG POSTGRES_PASSWORD=dspace
FROM postgres:${POSTGRES_VERSION}
ENV POSTGRES_DB dspace ENV POSTGRES_DB dspace
ENV POSTGRES_USER dspace ENV POSTGRES_USER dspace
ENV POSTGRES_PASSWORD dspace ENV POSTGRES_PASSWORD ${POSTGRES_PASSWORD}
# Install curl which is necessary to load SQL file
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
# Load a SQL dump. Set LOADSQL to a URL for the sql dump file. # Load a SQL dump. Set LOADSQL to a URL for the sql dump file.
RUN apt-get update && apt-get install -y curl
COPY install-pgcrypto.sh /docker-entrypoint-initdb.d/ COPY install-pgcrypto.sh /docker-entrypoint-initdb.d/

View File

@@ -11,15 +11,33 @@ set -e
CHECKFILE=/pgdata/ingest.hasrun.flag CHECKFILE=/pgdata/ingest.hasrun.flag
# If $LOADSQL environment variable set, use 'curl' to download that SQL and run it in PostgreSQL
# This can be used to initialize a database based on test data available on the web.
if [ ! -f $CHECKFILE -a ! -z ${LOADSQL} ] if [ ! -f $CHECKFILE -a ! -z ${LOADSQL} ]
then then
curl ${LOADSQL} -L -s --output /tmp/dspace.sql # Download SQL file to /tmp/dspace-db-init.sql
psql -U $POSTGRES_USER < /tmp/dspace.sql curl ${LOADSQL} -L -s --output /tmp/dspace-db-init.sql
# Load into PostgreSQL
psql -U $POSTGRES_USER < /tmp/dspace-db-init.sql
# Remove downloaded file
rm /tmp/dspace-db-init.sql
touch $CHECKFILE touch $CHECKFILE
exit exit
fi fi
# If $LOCALSQL environment variable set, then simply run it in PostgreSQL
# This can be used to restore data from a pg_dump or similar.
if [ ! -f $CHECKFILE -a ! -z ${LOCALSQL} ]
then
# Load into PostgreSQL
psql -U $POSTGRES_USER < ${LOCALSQL}
touch $CHECKFILE
exit
fi
# Then, setup pgcrypto on this database
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL
-- Create a new schema in this database named "extensions" (or whatever you want to name it) -- Create a new schema in this database named "extensions" (or whatever you want to name it)
CREATE SCHEMA extensions; CREATE SCHEMA extensions;

View File

@@ -6,13 +6,18 @@
# http://www.dspace.org/license/ # http://www.dspace.org/license/
# #
# This will be deployed as dspace/dspace-postgres-pgcrpyto:latest # To build for example use:
FROM postgres:11 # docker build --build-arg POSTGRES_VERSION=13 --build-arg POSTGRES_PASSWORD=mypass ./dspace/src/main/docker/dspace-postgres-pgcrypto/
# This will be published as dspace/dspace-postgres-pgcrypto:$DSPACE_VERSION
ARG POSTGRES_VERSION=13
ARG POSTGRES_PASSWORD=dspace
FROM postgres:${POSTGRES_VERSION}
ENV POSTGRES_DB dspace ENV POSTGRES_DB dspace
ENV POSTGRES_USER dspace ENV POSTGRES_USER dspace
ENV POSTGRES_PASSWORD dspace ENV POSTGRES_PASSWORD ${POSTGRES_PASSWORD}
RUN apt-get update
# Copy over script which will initialize database and install pgcrypto extension
COPY install-pgcrypto.sh /docker-entrypoint-initdb.d/ COPY install-pgcrypto.sh /docker-entrypoint-initdb.d/

View File

@@ -0,0 +1,36 @@
#
# The contents of this file are subject to the license and copyright
# detailed in the LICENSE and NOTICE files at the root of the source
# tree and available online at
#
# http://www.dspace.org/license/
#
# To build use root as context for (easier) access to solr cfgs
# docker build --build-arg SOLR_VERSION=8.11 -f ./dspace/src/main/docker/dspace-solr/Dockerfile .
# This will be published as dspace/dspace-solr:$DSPACE_VERSION
ARG SOLR_VERSION=8.11
FROM solr:${SOLR_VERSION}-slim
ENV AUTHORITY_CONFIGSET_PATH=/opt/solr/server/solr/configsets/authority/conf \
OAI_CONFIGSET_PATH=/opt/solr/server/solr/configsets/oai/conf \
SEARCH_CONFIGSET_PATH=/opt/solr/server/solr/configsets/search/conf \
STATISTICS_CONFIGSET_PATH=/opt/solr/server/solr/configsets/statistics/conf
USER root
RUN mkdir -p $AUTHORITY_CONFIGSET_PATH && \
mkdir -p $OAI_CONFIGSET_PATH && \
mkdir -p $SEARCH_CONFIGSET_PATH && \
mkdir -p $STATISTICS_CONFIGSET_PATH
COPY dspace/solr/authority/conf/* $AUTHORITY_CONFIGSET_PATH/
COPY dspace/solr/oai/conf/* $OAI_CONFIGSET_PATH/
COPY dspace/solr/search/conf/* $SEARCH_CONFIGSET_PATH/
COPY dspace/solr/statistics/conf/* $STATISTICS_CONFIGSET_PATH/
RUN chown -R solr:solr /opt/solr/server/solr/configsets
USER solr

View File

@@ -37,7 +37,7 @@
<jaxb-runtime.version>2.3.1</jaxb-runtime.version> <jaxb-runtime.version>2.3.1</jaxb-runtime.version>
<jcache-version>1.1.0</jcache-version> <jcache-version>1.1.0</jcache-version>
<!-- NOTE: Jetty needed for Solr, Handle Server & tests --> <!-- NOTE: Jetty needed for Solr, Handle Server & tests -->
<jetty.version>9.4.48.v20220622</jetty.version> <jetty.version>9.4.51.v20230217</jetty.version>
<log4j.version>2.17.1</log4j.version> <log4j.version>2.17.1</log4j.version>
<pdfbox-version>2.0.27</pdfbox-version> <pdfbox-version>2.0.27</pdfbox-version>
<rome.version>1.18.0</rome.version> <rome.version>1.18.0</rome.version>