diff --git a/.dockerignore b/.dockerignore
index 0e42960dc9..7d3bdc2b4b 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -6,6 +6,5 @@ dspace/modules/*/target/
Dockerfile.*
dspace/src/main/docker/dspace-postgres-pgcrypto
dspace/src/main/docker/dspace-postgres-pgcrypto-curl
-dspace/src/main/docker/solr
dspace/src/main/docker/README.md
dspace/src/main/docker-compose/
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 52714a8ba2..99c9efe019 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -79,6 +79,39 @@ jobs:
name: ${{ matrix.type }} results
path: ${{ matrix.resultsdir }}
- # https://github.com/codecov/codecov-action
+ # Upload code coverage report to artifact, so that it can be shared with the 'codecov' job (see below)
+ - name: Upload code coverage report to Artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: ${{ matrix.type }} coverage report
+ path: 'dspace/target/site/jacoco-aggregate/jacoco.xml'
+ retention-days: 14
+
+ # Codecov upload is a separate job in order to allow us to restart this separate from the entire build/test
+ # job above. This is necessary because Codecov uploads seem to randomly fail at times.
+ # See https://community.codecov.com/t/upload-issues-unable-to-locate-build-via-github-actions-api/3954
+ codecov:
+ # Must run after 'tests' job above
+ needs: tests
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ # Download artifacts from previous 'tests' job
+ - name: Download coverage artifacts
+ uses: actions/download-artifact@v3
+
+ # Now attempt upload to Codecov using its action.
+ # NOTE: We use a retry action to retry the Codecov upload if it fails the first time.
+ #
+ # Retry action: https://github.com/marketplace/actions/retry-action
+ # Codecov action: https://github.com/codecov/codecov-action
- name: Upload coverage to Codecov.io
- uses: codecov/codecov-action@v3
+ uses: Wandalen/wretry.action@v1.0.36
+ with:
+ action: codecov/codecov-action@v3
+ # Try upload 5 times max
+ attempt_limit: 5
+ # Run again in 30 seconds
+ attempt_delay: 30000
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 64e12f01aa..faa50ac8dc 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -170,3 +170,29 @@ jobs:
# Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_cli.outputs.tags }}
labels: ${{ steps.meta_build_cli.outputs.labels }}
+
+ ###########################################
+ # Build/Push the 'dspace/dspace-solr' image
+ ###########################################
+ # Get Metadata for docker_build_solr step below
+ - name: Sync metadata (tags, labels) from GitHub to Docker for 'dspace-solr' image
+ id: meta_build_solr
+ uses: docker/metadata-action@v4
+ with:
+ images: dspace/dspace-solr
+ tags: ${{ env.IMAGE_TAGS }}
+ flavor: ${{ env.TAGS_FLAVOR }}
+
+ - name: Build and push 'dspace-solr' image
+ id: docker_build_solr
+ uses: docker/build-push-action@v3
+ with:
+ context: .
+ file: ./dspace/src/main/docker/dspace-solr/Dockerfile
+ platforms: ${{ env.PLATFORMS }}
+ # For pull requests, we run the Docker build (to ensure no PR changes break the build),
+ # but we ONLY do an image push to DockerHub if it's NOT a PR
+ push: ${{ github.event_name != 'pull_request' }}
+ # Use tags / labels provided by 'docker/metadata-action' above
+ tags: ${{ steps.meta_build_solr.outputs.tags }}
+ labels: ${{ steps.meta_build_solr.outputs.labels }}
diff --git a/.github/workflows/issue_opened.yml b/.github/workflows/issue_opened.yml
index 5d7c1c30f7..b4436dca3a 100644
--- a/.github/workflows/issue_opened.yml
+++ b/.github/workflows/issue_opened.yml
@@ -16,7 +16,7 @@ jobs:
# Only add to project board if issue is flagged as "needs triage" or has no labels
# NOTE: By default we flag new issues as "needs triage" in our issue template
if: (contains(github.event.issue.labels.*.name, 'needs triage') || join(github.event.issue.labels.*.name) == '')
- uses: actions/add-to-project@v0.3.0
+ uses: actions/add-to-project@v0.5.0
# Note, the authentication token below is an ORG level Secret.
# It must be created/recreated manually via a personal access token with admin:org, project, public_repo permissions
# See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token#permissions-for-the-github_token
diff --git a/.github/workflows/label_merge_conflicts.yml b/.github/workflows/label_merge_conflicts.yml
index d71d244c2b..cc0c7099f4 100644
--- a/.github/workflows/label_merge_conflicts.yml
+++ b/.github/workflows/label_merge_conflicts.yml
@@ -23,7 +23,7 @@ jobs:
steps:
# See: https://github.com/prince-chrismc/label-merge-conflicts-action
- name: Auto-label PRs with merge conflicts
- uses: prince-chrismc/label-merge-conflicts-action@v2
+ uses: prince-chrismc/label-merge-conflicts-action@v3
# Add "merge conflict" label if a merge conflict is detected. Remove it when resolved.
# Note, the authentication token is created automatically
# See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token
diff --git a/docker-compose.yml b/docker-compose.yml
index 6008b873ae..40b4ce064e 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -81,8 +81,12 @@ services:
# DSpace Solr container
dspacesolr:
container_name: dspacesolr
- # Uses official Solr image at https://hub.docker.com/_/solr/
- image: solr:8.11-slim
+ image: "${DOCKER_OWNER:-dspace}/dspace-solr:${DSPACE_VER:-dspace-7_x}"
+ build:
+ context: .
+ dockerfile: ./dspace/src/main/docker/dspace-solr/Dockerfile
+ args:
+ SOLR_VERSION: "${SOLR_VER:-8.11}"
networks:
dspacenet:
ports:
@@ -92,30 +96,25 @@ services:
tty: true
working_dir: /var/solr/data
volumes:
- # Mount our local Solr core configs so that they are available as Solr configsets on container
- - ./dspace/solr/authority:/opt/solr/server/solr/configsets/authority
- - ./dspace/solr/oai:/opt/solr/server/solr/configsets/oai
- - ./dspace/solr/search:/opt/solr/server/solr/configsets/search
- - ./dspace/solr/statistics:/opt/solr/server/solr/configsets/statistics
# Keep Solr data directory between reboots
- solr_data:/var/solr/data
- # Initialize all DSpace Solr cores using the mounted local configsets (see above), then start Solr
+ # Initialize all DSpace Solr cores then start Solr:
# * First, run precreate-core to create the core (if it doesn't yet exist). If exists already, this is a no-op
- # * Second, copy updated configs from mounted configsets to this core. If it already existed, this updates core
- # to the latest configs. If it's a newly created core, this is a no-op.
+ # * Second, copy configsets to this core:
+ # Updates to Solr configs require the container to be rebuilt/restarted: `docker compose -p d7 up -d --build dspacesolr`
entrypoint:
- /bin/bash
- '-c'
- |
init-var-solr
precreate-core authority /opt/solr/server/solr/configsets/authority
- cp -r -u /opt/solr/server/solr/configsets/authority/* authority
+ cp -r /opt/solr/server/solr/configsets/authority/* authority
precreate-core oai /opt/solr/server/solr/configsets/oai
- cp -r -u /opt/solr/server/solr/configsets/oai/* oai
+ cp -r /opt/solr/server/solr/configsets/oai/* oai
precreate-core search /opt/solr/server/solr/configsets/search
- cp -r -u /opt/solr/server/solr/configsets/search/* search
+ cp -r /opt/solr/server/solr/configsets/search/* search
precreate-core statistics /opt/solr/server/solr/configsets/statistics
- cp -r -u /opt/solr/server/solr/configsets/statistics/* statistics
+ cp -r /opt/solr/server/solr/configsets/statistics/* statistics
exec solr -f
volumes:
assetstore:
diff --git a/dspace-api/pom.xml b/dspace-api/pom.xml
index e12aabc956..8f20f423aa 100644
--- a/dspace-api/pom.xml
+++ b/dspace-api/pom.xml
@@ -776,7 +776,7 @@
org.json
json
- 20180130
+ 20230227
diff --git a/dspace-api/src/main/java/org/dspace/content/BitstreamServiceImpl.java b/dspace-api/src/main/java/org/dspace/content/BitstreamServiceImpl.java
index 071bf3972f..cc89cea33a 100644
--- a/dspace-api/src/main/java/org/dspace/content/BitstreamServiceImpl.java
+++ b/dspace-api/src/main/java/org/dspace/content/BitstreamServiceImpl.java
@@ -332,8 +332,8 @@ public class BitstreamServiceImpl extends DSpaceObjectServiceImpl imp
}
@Override
- public List findDeletedBitstreams(Context context) throws SQLException {
- return bitstreamDAO.findDeletedBitstreams(context);
+ public List findDeletedBitstreams(Context context, int limit, int offset) throws SQLException {
+ return bitstreamDAO.findDeletedBitstreams(context, limit, offset);
}
@Override
diff --git a/dspace-api/src/main/java/org/dspace/content/dao/BitstreamDAO.java b/dspace-api/src/main/java/org/dspace/content/dao/BitstreamDAO.java
index c1ef923131..0d7afaa3cd 100644
--- a/dspace-api/src/main/java/org/dspace/content/dao/BitstreamDAO.java
+++ b/dspace-api/src/main/java/org/dspace/content/dao/BitstreamDAO.java
@@ -29,7 +29,7 @@ public interface BitstreamDAO extends DSpaceObjectLegacySupportDAO {
public Iterator findAll(Context context, int limit, int offset) throws SQLException;
- public List findDeletedBitstreams(Context context) throws SQLException;
+ public List findDeletedBitstreams(Context context, int limit, int offset) throws SQLException;
public List findDuplicateInternalIdentifier(Context context, Bitstream bitstream) throws SQLException;
diff --git a/dspace-api/src/main/java/org/dspace/content/dao/impl/BitstreamDAOImpl.java b/dspace-api/src/main/java/org/dspace/content/dao/impl/BitstreamDAOImpl.java
index 02e3509c31..d6d77fe7f0 100644
--- a/dspace-api/src/main/java/org/dspace/content/dao/impl/BitstreamDAOImpl.java
+++ b/dspace-api/src/main/java/org/dspace/content/dao/impl/BitstreamDAOImpl.java
@@ -41,13 +41,14 @@ public class BitstreamDAOImpl extends AbstractHibernateDSODAO impleme
}
@Override
- public List findDeletedBitstreams(Context context) throws SQLException {
+ public List findDeletedBitstreams(Context context, int limit, int offset) throws SQLException {
CriteriaBuilder criteriaBuilder = getCriteriaBuilder(context);
CriteriaQuery criteriaQuery = getCriteriaQuery(criteriaBuilder, Bitstream.class);
Root bitstreamRoot = criteriaQuery.from(Bitstream.class);
criteriaQuery.select(bitstreamRoot);
+ criteriaQuery.orderBy(criteriaBuilder.desc(bitstreamRoot.get(Bitstream_.ID)));
criteriaQuery.where(criteriaBuilder.equal(bitstreamRoot.get(Bitstream_.deleted), true));
- return list(context, criteriaQuery, false, Bitstream.class, -1, -1);
+ return list(context, criteriaQuery, false, Bitstream.class, limit, offset);
}
diff --git a/dspace-api/src/main/java/org/dspace/content/service/BitstreamService.java b/dspace-api/src/main/java/org/dspace/content/service/BitstreamService.java
index 4621c95e7c..8effabf284 100644
--- a/dspace-api/src/main/java/org/dspace/content/service/BitstreamService.java
+++ b/dspace-api/src/main/java/org/dspace/content/service/BitstreamService.java
@@ -183,7 +183,7 @@ public interface BitstreamService extends DSpaceObjectService, DSpace
* @return a list of all bitstreams that have been "deleted"
* @throws SQLException if database error
*/
- public List findDeletedBitstreams(Context context) throws SQLException;
+ public List findDeletedBitstreams(Context context, int limit, int offset) throws SQLException;
/**
diff --git a/dspace-api/src/main/java/org/dspace/identifier/doi/DOIConsumer.java b/dspace-api/src/main/java/org/dspace/identifier/doi/DOIConsumer.java
index 1961ce8274..33ef058e16 100644
--- a/dspace-api/src/main/java/org/dspace/identifier/doi/DOIConsumer.java
+++ b/dspace-api/src/main/java/org/dspace/identifier/doi/DOIConsumer.java
@@ -141,7 +141,6 @@ public class DOIConsumer implements Consumer {
+ item.getID() + " and DOI " + doi + ".", ex);
}
}
- ctx.commit();
}
}
diff --git a/dspace-api/src/main/java/org/dspace/storage/bitstore/BitstreamStorageServiceImpl.java b/dspace-api/src/main/java/org/dspace/storage/bitstore/BitstreamStorageServiceImpl.java
index fcdaa516ed..956ac5a7f8 100644
--- a/dspace-api/src/main/java/org/dspace/storage/bitstore/BitstreamStorageServiceImpl.java
+++ b/dspace-api/src/main/java/org/dspace/storage/bitstore/BitstreamStorageServiceImpl.java
@@ -17,6 +17,7 @@ import java.util.Map;
import java.util.UUID;
import javax.annotation.Nullable;
+import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections4.MapUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -216,24 +217,61 @@ public class BitstreamStorageServiceImpl implements BitstreamStorageService, Ini
@Override
public void cleanup(boolean deleteDbRecords, boolean verbose) throws SQLException, IOException, AuthorizeException {
Context context = new Context(Context.Mode.BATCH_EDIT);
- int commitCounter = 0;
+
+ int offset = 0;
+ int limit = 100;
+
+ int cleanedBitstreamCount = 0;
+
+ int deletedBitstreamCount = bitstreamService.countDeletedBitstreams(context);
+ System.out.println("Found " + deletedBitstreamCount + " deleted bistream to cleanup");
try {
context.turnOffAuthorisationSystem();
- List storage = bitstreamService.findDeletedBitstreams(context);
- for (Bitstream bitstream : storage) {
- UUID bid = bitstream.getID();
- List wantedMetadata = List.of("size_bytes", "modified");
- Map receivedMetadata = this.getStore(bitstream.getStoreNumber())
- .about(bitstream, wantedMetadata);
+ while (cleanedBitstreamCount < deletedBitstreamCount) {
+
+ List storage = bitstreamService.findDeletedBitstreams(context, limit, offset);
+
+ if (CollectionUtils.isEmpty(storage)) {
+ break;
+ }
+
+ for (Bitstream bitstream : storage) {
+ UUID bid = bitstream.getID();
+ List wantedMetadata = List.of("size_bytes", "modified");
+ Map receivedMetadata = this.getStore(bitstream.getStoreNumber())
+ .about(bitstream, wantedMetadata);
- // Make sure entries which do not exist are removed
- if (MapUtils.isEmpty(receivedMetadata)) {
- log.debug("bitstore.about is empty, so file is not present");
+ // Make sure entries which do not exist are removed
+ if (MapUtils.isEmpty(receivedMetadata)) {
+ log.debug("bitstore.about is empty, so file is not present");
+ if (deleteDbRecords) {
+ log.debug("deleting record");
+ if (verbose) {
+ System.out.println(" - Deleting bitstream information (ID: " + bid + ")");
+ }
+ checksumHistoryService.deleteByBitstream(context, bitstream);
+ if (verbose) {
+ System.out.println(" - Deleting bitstream record from database (ID: " + bid + ")");
+ }
+ bitstreamService.expunge(context, bitstream);
+ }
+ context.uncacheEntity(bitstream);
+ continue;
+ }
+
+ // This is a small chance that this is a file which is
+ // being stored -- get it next time.
+ if (isRecent(Long.valueOf(receivedMetadata.get("modified").toString()))) {
+ log.debug("file is recent");
+ context.uncacheEntity(bitstream);
+ continue;
+ }
+
if (deleteDbRecords) {
- log.debug("deleting record");
+ log.debug("deleting db record");
if (verbose) {
System.out.println(" - Deleting bitstream information (ID: " + bid + ")");
}
@@ -243,64 +281,42 @@ public class BitstreamStorageServiceImpl implements BitstreamStorageService, Ini
}
bitstreamService.expunge(context, bitstream);
}
+
+ if (isRegisteredBitstream(bitstream.getInternalId())) {
+ context.uncacheEntity(bitstream);
+ continue; // do not delete registered bitstreams
+ }
+
+
+ // Since versioning allows for multiple bitstreams, check if the internal
+ // identifier isn't used on
+ // another place
+ if (bitstreamService.findDuplicateInternalIdentifier(context, bitstream).isEmpty()) {
+ this.getStore(bitstream.getStoreNumber()).remove(bitstream);
+
+ String message = ("Deleted bitstreamID " + bid + ", internalID " + bitstream.getInternalId());
+ if (log.isDebugEnabled()) {
+ log.debug(message);
+ }
+ if (verbose) {
+ System.out.println(message);
+ }
+ }
+
context.uncacheEntity(bitstream);
- continue;
}
- // This is a small chance that this is a file which is
- // being stored -- get it next time.
- if (isRecent(Long.valueOf(receivedMetadata.get("modified").toString()))) {
- log.debug("file is recent");
- context.uncacheEntity(bitstream);
- continue;
+ // Commit actual changes to DB after dispatch events
+ System.out.print("Performing incremental commit to the database...");
+ context.commit();
+ System.out.println(" Incremental commit done!");
+
+ cleanedBitstreamCount = cleanedBitstreamCount + storage.size();
+
+ if (!deleteDbRecords) {
+ offset = offset + limit;
}
- if (deleteDbRecords) {
- log.debug("deleting db record");
- if (verbose) {
- System.out.println(" - Deleting bitstream information (ID: " + bid + ")");
- }
- checksumHistoryService.deleteByBitstream(context, bitstream);
- if (verbose) {
- System.out.println(" - Deleting bitstream record from database (ID: " + bid + ")");
- }
- bitstreamService.expunge(context, bitstream);
- }
-
- if (isRegisteredBitstream(bitstream.getInternalId())) {
- context.uncacheEntity(bitstream);
- continue; // do not delete registered bitstreams
- }
-
-
- // Since versioning allows for multiple bitstreams, check if the internal identifier isn't used on
- // another place
- if (bitstreamService.findDuplicateInternalIdentifier(context, bitstream).isEmpty()) {
- this.getStore(bitstream.getStoreNumber()).remove(bitstream);
-
- String message = ("Deleted bitstreamID " + bid + ", internalID " + bitstream.getInternalId());
- if (log.isDebugEnabled()) {
- log.debug(message);
- }
- if (verbose) {
- System.out.println(message);
- }
- }
-
- // Make sure to commit our outstanding work every 100
- // iterations. Otherwise you risk losing the entire transaction
- // if we hit an exception, which isn't useful at all for large
- // amounts of bitstreams.
- commitCounter++;
- if (commitCounter % 100 == 0) {
- context.dispatchEvents();
- // Commit actual changes to DB after dispatch events
- System.out.print("Performing incremental commit to the database...");
- context.commit();
- System.out.println(" Incremental commit done!");
- }
-
- context.uncacheEntity(bitstream);
}
System.out.print("Committing changes to the database...");
diff --git a/dspace-api/src/main/resources/org/dspace/storage/rdbms/sqlmigration/h2/V7.6_2023.03.24__Update_PNG_in_bitstream_format_registry.sql b/dspace-api/src/main/resources/org/dspace/storage/rdbms/sqlmigration/h2/V7.6_2023.03.24__Update_PNG_in_bitstream_format_registry.sql
new file mode 100644
index 0000000000..8aec44a7f6
--- /dev/null
+++ b/dspace-api/src/main/resources/org/dspace/storage/rdbms/sqlmigration/h2/V7.6_2023.03.24__Update_PNG_in_bitstream_format_registry.sql
@@ -0,0 +1,17 @@
+--
+-- The contents of this file are subject to the license and copyright
+-- detailed in the LICENSE and NOTICE files at the root of the source
+-- tree and available online at
+--
+-- http://www.dspace.org/license/
+--
+
+-----------------------------------------------------------------------------------
+-- Update short description for PNG mimetype in the bitstream format registry
+-- See: https://github.com/DSpace/DSpace/pull/8722
+-----------------------------------------------------------------------------------
+
+UPDATE bitstreamformatregistry
+SET short_description='PNG'
+WHERE short_description='image/png'
+ AND mimetype='image/png';
diff --git a/dspace-api/src/main/resources/org/dspace/storage/rdbms/sqlmigration/oracle/V7.6_2023.03.24__Update_PNG_in_bitstream_format_registry.sql b/dspace-api/src/main/resources/org/dspace/storage/rdbms/sqlmigration/oracle/V7.6_2023.03.24__Update_PNG_in_bitstream_format_registry.sql
new file mode 100644
index 0000000000..8aec44a7f6
--- /dev/null
+++ b/dspace-api/src/main/resources/org/dspace/storage/rdbms/sqlmigration/oracle/V7.6_2023.03.24__Update_PNG_in_bitstream_format_registry.sql
@@ -0,0 +1,17 @@
+--
+-- The contents of this file are subject to the license and copyright
+-- detailed in the LICENSE and NOTICE files at the root of the source
+-- tree and available online at
+--
+-- http://www.dspace.org/license/
+--
+
+-----------------------------------------------------------------------------------
+-- Update short description for PNG mimetype in the bitstream format registry
+-- See: https://github.com/DSpace/DSpace/pull/8722
+-----------------------------------------------------------------------------------
+
+UPDATE bitstreamformatregistry
+SET short_description='PNG'
+WHERE short_description='image/png'
+ AND mimetype='image/png';
diff --git a/dspace-api/src/main/resources/org/dspace/storage/rdbms/sqlmigration/postgres/V7.6_2023.03.24__Update_PNG_in_bitstream_format_registry.sql b/dspace-api/src/main/resources/org/dspace/storage/rdbms/sqlmigration/postgres/V7.6_2023.03.24__Update_PNG_in_bitstream_format_registry.sql
new file mode 100644
index 0000000000..8aec44a7f6
--- /dev/null
+++ b/dspace-api/src/main/resources/org/dspace/storage/rdbms/sqlmigration/postgres/V7.6_2023.03.24__Update_PNG_in_bitstream_format_registry.sql
@@ -0,0 +1,17 @@
+--
+-- The contents of this file are subject to the license and copyright
+-- detailed in the LICENSE and NOTICE files at the root of the source
+-- tree and available online at
+--
+-- http://www.dspace.org/license/
+--
+
+-----------------------------------------------------------------------------------
+-- Update short description for PNG mimetype in the bitstream format registry
+-- See: https://github.com/DSpace/DSpace/pull/8722
+-----------------------------------------------------------------------------------
+
+UPDATE bitstreamformatregistry
+SET short_description='PNG'
+WHERE short_description='image/png'
+ AND mimetype='image/png';
diff --git a/dspace-server-webapp/src/main/java/org/dspace/app/rest/ItemOwningCollectionUpdateRestController.java b/dspace-server-webapp/src/main/java/org/dspace/app/rest/ItemOwningCollectionUpdateRestController.java
index b06360ee1d..b5a0c957f2 100644
--- a/dspace-server-webapp/src/main/java/org/dspace/app/rest/ItemOwningCollectionUpdateRestController.java
+++ b/dspace-server-webapp/src/main/java/org/dspace/app/rest/ItemOwningCollectionUpdateRestController.java
@@ -39,6 +39,7 @@ import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
+import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
/**
@@ -69,6 +70,8 @@ public class ItemOwningCollectionUpdateRestController {
* moving the item to the new collection.
*
* @param uuid The UUID of the item that will be moved
+ * @param inheritCollectionPolicies Boolean flag whether to inherit the target collection policies when
+ * moving the item
* @param response The response object
* @param request The request object
* @return The wrapped resource containing the new owning collection or null when the item was not moved
@@ -79,7 +82,10 @@ public class ItemOwningCollectionUpdateRestController {
@RequestMapping(method = RequestMethod.PUT, consumes = {"text/uri-list"})
@PreAuthorize("hasPermission(#uuid, 'ITEM','WRITE')")
@PostAuthorize("returnObject != null")
- public CollectionRest move(@PathVariable UUID uuid, HttpServletResponse response,
+ public CollectionRest move(@PathVariable UUID uuid,
+ @RequestParam(name = "inheritPolicies", defaultValue = "false")
+ Boolean inheritCollectionPolicies,
+ HttpServletResponse response,
HttpServletRequest request)
throws SQLException, IOException, AuthorizeException {
Context context = ContextUtil.obtainContext(request);
@@ -91,7 +97,8 @@ public class ItemOwningCollectionUpdateRestController {
"or the data cannot be resolved to a collection.");
}
- Collection targetCollection = performItemMove(context, uuid, (Collection) dsoList.get(0));
+ Collection targetCollection = performItemMove(context, uuid, (Collection) dsoList.get(0),
+ inheritCollectionPolicies);
if (targetCollection == null) {
return null;
@@ -107,17 +114,19 @@ public class ItemOwningCollectionUpdateRestController {
* @param item The item to be moved
* @param currentCollection The current owning collection of the item
* @param targetCollection The target collection of the item
+ * @param inheritPolicies Boolean flag whether to inherit the target collection policies when moving the item
* @return The target collection
* @throws SQLException If something goes wrong
* @throws IOException If something goes wrong
* @throws AuthorizeException If the user is not authorized to perform the move action
*/
private Collection moveItem(final Context context, final Item item, final Collection currentCollection,
- final Collection targetCollection)
+ final Collection targetCollection,
+ final boolean inheritPolicies)
throws SQLException, IOException, AuthorizeException {
- itemService.move(context, item, currentCollection, targetCollection);
- //Necessary because Controller does not pass through general RestResourceController, and as such does not do its
- // commit in DSpaceRestRepository.createAndReturn() or similar
+ itemService.move(context, item, currentCollection, targetCollection, inheritPolicies);
+ // Necessary because Controller does not pass through general RestResourceController, and as such does not do
+ // its commit in DSpaceRestRepository.createAndReturn() or similar
context.commit();
return context.reloadEntity(targetCollection);
@@ -129,12 +138,14 @@ public class ItemOwningCollectionUpdateRestController {
* @param context The context Object
* @param itemUuid The uuid of the item to be moved
* @param targetCollection The target collection
+ * @param inheritPolicies Whether to inherit the target collection policies when moving the item
* @return The new owning collection of the item when authorized or null when not authorized
* @throws SQLException If something goes wrong
* @throws IOException If something goes wrong
* @throws AuthorizeException If the user is not authorized to perform the move action
*/
- private Collection performItemMove(final Context context, final UUID itemUuid, final Collection targetCollection)
+ private Collection performItemMove(final Context context, final UUID itemUuid, final Collection targetCollection,
+ boolean inheritPolicies)
throws SQLException, IOException, AuthorizeException {
Item item = itemService.find(context, itemUuid);
@@ -153,7 +164,7 @@ public class ItemOwningCollectionUpdateRestController {
if (authorizeService.authorizeActionBoolean(context, currentCollection, Constants.ADMIN)) {
- return moveItem(context, item, currentCollection, targetCollection);
+ return moveItem(context, item, currentCollection, targetCollection, inheritPolicies);
}
return null;
diff --git a/dspace-server-webapp/src/main/java/org/dspace/app/rest/exception/DSpaceApiExceptionControllerAdvice.java b/dspace-server-webapp/src/main/java/org/dspace/app/rest/exception/DSpaceApiExceptionControllerAdvice.java
index 1cbfd5c632..5e32247ee4 100644
--- a/dspace-server-webapp/src/main/java/org/dspace/app/rest/exception/DSpaceApiExceptionControllerAdvice.java
+++ b/dspace-server-webapp/src/main/java/org/dspace/app/rest/exception/DSpaceApiExceptionControllerAdvice.java
@@ -42,6 +42,7 @@ import org.springframework.web.bind.annotation.ControllerAdvice;
import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.context.request.WebRequest;
+import org.springframework.web.multipart.MaxUploadSizeExceededException;
import org.springframework.web.multipart.MultipartException;
import org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler;
@@ -97,6 +98,13 @@ public class DSpaceApiExceptionControllerAdvice extends ResponseEntityExceptionH
sendErrorResponse(request, response, ex, "Request is invalid or incorrect", HttpServletResponse.SC_BAD_REQUEST);
}
+ @ExceptionHandler(MaxUploadSizeExceededException.class)
+ protected void handleMaxUploadSizeExceededException(HttpServletRequest request, HttpServletResponse response,
+ Exception ex) throws IOException {
+ sendErrorResponse(request, response, ex, "Request entity is too large",
+ HttpServletResponse.SC_REQUEST_ENTITY_TOO_LARGE);
+ }
+
@ExceptionHandler(SQLException.class)
protected void handleSQLException(HttpServletRequest request, HttpServletResponse response, Exception ex)
throws IOException {
diff --git a/dspace-server-webapp/src/test/java/org/dspace/app/rest/BitstreamFormatRestRepositoryIT.java b/dspace-server-webapp/src/test/java/org/dspace/app/rest/BitstreamFormatRestRepositoryIT.java
index d5798ba5a3..fd12826930 100644
--- a/dspace-server-webapp/src/test/java/org/dspace/app/rest/BitstreamFormatRestRepositoryIT.java
+++ b/dspace-server-webapp/src/test/java/org/dspace/app/rest/BitstreamFormatRestRepositoryIT.java
@@ -56,7 +56,7 @@ public class BitstreamFormatRestRepositoryIT extends AbstractControllerIntegrati
@Autowired
private BitstreamFormatConverter bitstreamFormatConverter;
- private final int DEFAULT_AMOUNT_FORMATS = 82;
+ private final int DEFAULT_AMOUNT_FORMATS = 85;
@Test
public void findAllPaginationTest() throws Exception {
diff --git a/dspace/config/dspace.cfg b/dspace/config/dspace.cfg
index 2a35e89459..65b1f951fa 100644
--- a/dspace/config/dspace.cfg
+++ b/dspace/config/dspace.cfg
@@ -492,9 +492,9 @@ filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = OpenDo
filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = OpenDocument Text
filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = RTF
filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = Text
-filter.org.dspace.app.mediafilter.JPEGFilter.inputFormats = BMP, GIF, JPEG, image/png
-filter.org.dspace.app.mediafilter.BrandedPreviewJPEGFilter.inputFormats = BMP, GIF, JPEG, image/png
-filter.org.dspace.app.mediafilter.ImageMagickImageThumbnailFilter.inputFormats = BMP, GIF, image/png, JPG, TIFF, JPEG, JPEG 2000
+filter.org.dspace.app.mediafilter.JPEGFilter.inputFormats = BMP, GIF, JPEG, PNG
+filter.org.dspace.app.mediafilter.BrandedPreviewJPEGFilter.inputFormats = BMP, GIF, JPEG, PNG
+filter.org.dspace.app.mediafilter.ImageMagickImageThumbnailFilter.inputFormats = BMP, GIF, PNG, JPG, TIFF, JPEG, JPEG 2000
filter.org.dspace.app.mediafilter.ImageMagickPdfThumbnailFilter.inputFormats = Adobe PDF
filter.org.dspace.app.mediafilter.PDFBoxThumbnail.inputFormats = Adobe PDF
@@ -843,7 +843,7 @@ plugin.single.org.dspace.embargo.EmbargoSetter = org.dspace.embargo.DefaultEmbar
plugin.single.org.dspace.embargo.EmbargoLifter = org.dspace.embargo.DefaultEmbargoLifter
# values for the forever embargo date threshold
-# This threshold date is used in the default access status helper to dermine if an item is
+# This threshold date is used in the default access status helper to determine if an item is
# restricted or embargoed based on the start date of the primary (or first) file policies.
# In this case, if the policy start date is inferior to the threshold date, the status will
# be embargo, else it will be restricted.
@@ -880,7 +880,7 @@ org.dspace.app.itemexport.life.span.hours = 48
# The maximum size in Megabytes the export should be. This is enforced before the
# compression. Each bitstream's size in each item being exported is added up, if their
-# cummulative sizes are more than this entry the export is not kicked off
+# cumulative sizes are more than this entry the export is not kicked off
org.dspace.app.itemexport.max.size = 200
### Batch Item import settings ###
diff --git a/dspace/config/registries/bitstream-formats.xml b/dspace/config/registries/bitstream-formats.xml
index 076959a319..3515773fd7 100644
--- a/dspace/config/registries/bitstream-formats.xml
+++ b/dspace/config/registries/bitstream-formats.xml
@@ -115,6 +115,15 @@
csv
+
+ text/vtt
+ WebVTT
+ Web Video Text Tracks Format
+ 1
+ false
+ vtt
+
+
application/msword
Microsoft Word
@@ -201,7 +210,7 @@
image/png
- image/png
+ PNG
Portable Network Graphics
1
false
@@ -800,4 +809,22 @@
mp3
+
+ image/webp
+ WebP
+ WebP is a modern image format that provides superior lossless and lossy compression for images on the web.
+ 1
+ false
+ webp
+
+
+
+ image/avif
+ AVIF
+ AV1 Image File Format (AVIF) is an open, royalty-free image file format specification for storing images or image sequences compressed with AV1 in the HEIF container format.
+ 1
+ false
+ avif
+
+
diff --git a/dspace/src/main/docker/README.md b/dspace/src/main/docker/README.md
index 6c9da0190c..f2a944f608 100644
--- a/dspace/src/main/docker/README.md
+++ b/dspace/src/main/docker/README.md
@@ -130,6 +130,23 @@ docker run -i -t -d -p 80:80 -p 443:443 dspace/dspace-shibboleth
This image can also be rebuilt using the `../docker-compose/docker-compose-shibboleth.yml` script.
+## dspace/src/main/docker/dspace-solr/Dockerfile
+
+This Dockerfile builds a Solr image with DSpace Solr configsets included. It
+can be pulled / built following the [docker compose resources](../docker-compose/README.md)
+documentation. Or, to just build and/or run Solr:
+
+```bash
+docker-compose build dspacesolr
+docker-compose -p d7 up -d dspacesolr
+```
+
+If you're making iterative changes to the DSpace Solr configsets you'll need to rebuild /
+restart the `dspacesolr` container for the changes to be deployed. From DSpace root:
+
+```bash
+docker-compose -p d7 up --detach --build dspacesolr
+```
## test/ folder
diff --git a/dspace/src/main/docker/dspace-solr/Dockerfile b/dspace/src/main/docker/dspace-solr/Dockerfile
new file mode 100644
index 0000000000..9fe9adf944
--- /dev/null
+++ b/dspace/src/main/docker/dspace-solr/Dockerfile
@@ -0,0 +1,36 @@
+#
+# The contents of this file are subject to the license and copyright
+# detailed in the LICENSE and NOTICE files at the root of the source
+# tree and available online at
+#
+# http://www.dspace.org/license/
+#
+
+# To build use root as context for (easier) access to solr cfgs
+# docker build --build-arg SOLR_VERSION=8.11 -f ./dspace/src/main/docker/dspace-solr/Dockerfile .
+# This will be published as dspace/dspace-solr:$DSPACE_VERSION
+
+ARG SOLR_VERSION=8.11
+
+FROM solr:${SOLR_VERSION}-slim
+
+ENV AUTHORITY_CONFIGSET_PATH=/opt/solr/server/solr/configsets/authority/conf \
+ OAI_CONFIGSET_PATH=/opt/solr/server/solr/configsets/oai/conf \
+ SEARCH_CONFIGSET_PATH=/opt/solr/server/solr/configsets/search/conf \
+ STATISTICS_CONFIGSET_PATH=/opt/solr/server/solr/configsets/statistics/conf
+
+USER root
+
+RUN mkdir -p $AUTHORITY_CONFIGSET_PATH && \
+ mkdir -p $OAI_CONFIGSET_PATH && \
+ mkdir -p $SEARCH_CONFIGSET_PATH && \
+ mkdir -p $STATISTICS_CONFIGSET_PATH
+
+COPY dspace/solr/authority/conf/* $AUTHORITY_CONFIGSET_PATH/
+COPY dspace/solr/oai/conf/* $OAI_CONFIGSET_PATH/
+COPY dspace/solr/search/conf/* $SEARCH_CONFIGSET_PATH/
+COPY dspace/solr/statistics/conf/* $STATISTICS_CONFIGSET_PATH/
+
+RUN chown -R solr:solr /opt/solr/server/solr/configsets
+
+USER solr