Merge branch 'main' into DURACOM-92

This commit is contained in:
Luca Giamminonni
2023-04-20 14:51:35 +02:00
23 changed files with 329 additions and 106 deletions

View File

@@ -6,6 +6,5 @@ dspace/modules/*/target/
Dockerfile.* Dockerfile.*
dspace/src/main/docker/dspace-postgres-pgcrypto dspace/src/main/docker/dspace-postgres-pgcrypto
dspace/src/main/docker/dspace-postgres-pgcrypto-curl dspace/src/main/docker/dspace-postgres-pgcrypto-curl
dspace/src/main/docker/solr
dspace/src/main/docker/README.md dspace/src/main/docker/README.md
dspace/src/main/docker-compose/ dspace/src/main/docker-compose/

View File

@@ -79,6 +79,39 @@ jobs:
name: ${{ matrix.type }} results name: ${{ matrix.type }} results
path: ${{ matrix.resultsdir }} path: ${{ matrix.resultsdir }}
# https://github.com/codecov/codecov-action # Upload code coverage report to artifact, so that it can be shared with the 'codecov' job (see below)
- name: Upload code coverage report to Artifact
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.type }} coverage report
path: 'dspace/target/site/jacoco-aggregate/jacoco.xml'
retention-days: 14
# Codecov upload is a separate job in order to allow us to restart this separate from the entire build/test
# job above. This is necessary because Codecov uploads seem to randomly fail at times.
# See https://community.codecov.com/t/upload-issues-unable-to-locate-build-via-github-actions-api/3954
codecov:
# Must run after 'tests' job above
needs: tests
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
# Download artifacts from previous 'tests' job
- name: Download coverage artifacts
uses: actions/download-artifact@v3
# Now attempt upload to Codecov using its action.
# NOTE: We use a retry action to retry the Codecov upload if it fails the first time.
#
# Retry action: https://github.com/marketplace/actions/retry-action
# Codecov action: https://github.com/codecov/codecov-action
- name: Upload coverage to Codecov.io - name: Upload coverage to Codecov.io
uses: codecov/codecov-action@v3 uses: Wandalen/wretry.action@v1.0.36
with:
action: codecov/codecov-action@v3
# Try upload 5 times max
attempt_limit: 5
# Run again in 30 seconds
attempt_delay: 30000

View File

@@ -170,3 +170,29 @@ jobs:
# Use tags / labels provided by 'docker/metadata-action' above # Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_cli.outputs.tags }} tags: ${{ steps.meta_build_cli.outputs.tags }}
labels: ${{ steps.meta_build_cli.outputs.labels }} labels: ${{ steps.meta_build_cli.outputs.labels }}
###########################################
# Build/Push the 'dspace/dspace-solr' image
###########################################
# Get Metadata for docker_build_solr step below
- name: Sync metadata (tags, labels) from GitHub to Docker for 'dspace-solr' image
id: meta_build_solr
uses: docker/metadata-action@v4
with:
images: dspace/dspace-solr
tags: ${{ env.IMAGE_TAGS }}
flavor: ${{ env.TAGS_FLAVOR }}
- name: Build and push 'dspace-solr' image
id: docker_build_solr
uses: docker/build-push-action@v3
with:
context: .
file: ./dspace/src/main/docker/dspace-solr/Dockerfile
platforms: ${{ env.PLATFORMS }}
# For pull requests, we run the Docker build (to ensure no PR changes break the build),
# but we ONLY do an image push to DockerHub if it's NOT a PR
push: ${{ github.event_name != 'pull_request' }}
# Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_solr.outputs.tags }}
labels: ${{ steps.meta_build_solr.outputs.labels }}

View File

@@ -16,7 +16,7 @@ jobs:
# Only add to project board if issue is flagged as "needs triage" or has no labels # Only add to project board if issue is flagged as "needs triage" or has no labels
# NOTE: By default we flag new issues as "needs triage" in our issue template # NOTE: By default we flag new issues as "needs triage" in our issue template
if: (contains(github.event.issue.labels.*.name, 'needs triage') || join(github.event.issue.labels.*.name) == '') if: (contains(github.event.issue.labels.*.name, 'needs triage') || join(github.event.issue.labels.*.name) == '')
uses: actions/add-to-project@v0.3.0 uses: actions/add-to-project@v0.5.0
# Note, the authentication token below is an ORG level Secret. # Note, the authentication token below is an ORG level Secret.
# It must be created/recreated manually via a personal access token with admin:org, project, public_repo permissions # It must be created/recreated manually via a personal access token with admin:org, project, public_repo permissions
# See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token#permissions-for-the-github_token # See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token#permissions-for-the-github_token

View File

@@ -23,7 +23,7 @@ jobs:
steps: steps:
# See: https://github.com/prince-chrismc/label-merge-conflicts-action # See: https://github.com/prince-chrismc/label-merge-conflicts-action
- name: Auto-label PRs with merge conflicts - name: Auto-label PRs with merge conflicts
uses: prince-chrismc/label-merge-conflicts-action@v2 uses: prince-chrismc/label-merge-conflicts-action@v3
# Add "merge conflict" label if a merge conflict is detected. Remove it when resolved. # Add "merge conflict" label if a merge conflict is detected. Remove it when resolved.
# Note, the authentication token is created automatically # Note, the authentication token is created automatically
# See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token # See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token

View File

@@ -81,8 +81,12 @@ services:
# DSpace Solr container # DSpace Solr container
dspacesolr: dspacesolr:
container_name: dspacesolr container_name: dspacesolr
# Uses official Solr image at https://hub.docker.com/_/solr/ image: "${DOCKER_OWNER:-dspace}/dspace-solr:${DSPACE_VER:-dspace-7_x}"
image: solr:8.11-slim build:
context: .
dockerfile: ./dspace/src/main/docker/dspace-solr/Dockerfile
args:
SOLR_VERSION: "${SOLR_VER:-8.11}"
networks: networks:
dspacenet: dspacenet:
ports: ports:
@@ -92,30 +96,25 @@ services:
tty: true tty: true
working_dir: /var/solr/data working_dir: /var/solr/data
volumes: volumes:
# Mount our local Solr core configs so that they are available as Solr configsets on container
- ./dspace/solr/authority:/opt/solr/server/solr/configsets/authority
- ./dspace/solr/oai:/opt/solr/server/solr/configsets/oai
- ./dspace/solr/search:/opt/solr/server/solr/configsets/search
- ./dspace/solr/statistics:/opt/solr/server/solr/configsets/statistics
# Keep Solr data directory between reboots # Keep Solr data directory between reboots
- solr_data:/var/solr/data - solr_data:/var/solr/data
# Initialize all DSpace Solr cores using the mounted local configsets (see above), then start Solr # Initialize all DSpace Solr cores then start Solr:
# * First, run precreate-core to create the core (if it doesn't yet exist). If exists already, this is a no-op # * First, run precreate-core to create the core (if it doesn't yet exist). If exists already, this is a no-op
# * Second, copy updated configs from mounted configsets to this core. If it already existed, this updates core # * Second, copy configsets to this core:
# to the latest configs. If it's a newly created core, this is a no-op. # Updates to Solr configs require the container to be rebuilt/restarted: `docker compose -p d7 up -d --build dspacesolr`
entrypoint: entrypoint:
- /bin/bash - /bin/bash
- '-c' - '-c'
- | - |
init-var-solr init-var-solr
precreate-core authority /opt/solr/server/solr/configsets/authority precreate-core authority /opt/solr/server/solr/configsets/authority
cp -r -u /opt/solr/server/solr/configsets/authority/* authority cp -r /opt/solr/server/solr/configsets/authority/* authority
precreate-core oai /opt/solr/server/solr/configsets/oai precreate-core oai /opt/solr/server/solr/configsets/oai
cp -r -u /opt/solr/server/solr/configsets/oai/* oai cp -r /opt/solr/server/solr/configsets/oai/* oai
precreate-core search /opt/solr/server/solr/configsets/search precreate-core search /opt/solr/server/solr/configsets/search
cp -r -u /opt/solr/server/solr/configsets/search/* search cp -r /opt/solr/server/solr/configsets/search/* search
precreate-core statistics /opt/solr/server/solr/configsets/statistics precreate-core statistics /opt/solr/server/solr/configsets/statistics
cp -r -u /opt/solr/server/solr/configsets/statistics/* statistics cp -r /opt/solr/server/solr/configsets/statistics/* statistics
exec solr -f exec solr -f
volumes: volumes:
assetstore: assetstore:

View File

@@ -776,7 +776,7 @@
<dependency> <dependency>
<groupId>org.json</groupId> <groupId>org.json</groupId>
<artifactId>json</artifactId> <artifactId>json</artifactId>
<version>20180130</version> <version>20230227</version>
</dependency> </dependency>
<!-- Useful for testing command-line tools --> <!-- Useful for testing command-line tools -->

View File

@@ -332,8 +332,8 @@ public class BitstreamServiceImpl extends DSpaceObjectServiceImpl<Bitstream> imp
} }
@Override @Override
public List<Bitstream> findDeletedBitstreams(Context context) throws SQLException { public List<Bitstream> findDeletedBitstreams(Context context, int limit, int offset) throws SQLException {
return bitstreamDAO.findDeletedBitstreams(context); return bitstreamDAO.findDeletedBitstreams(context, limit, offset);
} }
@Override @Override

View File

@@ -29,7 +29,7 @@ public interface BitstreamDAO extends DSpaceObjectLegacySupportDAO<Bitstream> {
public Iterator<Bitstream> findAll(Context context, int limit, int offset) throws SQLException; public Iterator<Bitstream> findAll(Context context, int limit, int offset) throws SQLException;
public List<Bitstream> findDeletedBitstreams(Context context) throws SQLException; public List<Bitstream> findDeletedBitstreams(Context context, int limit, int offset) throws SQLException;
public List<Bitstream> findDuplicateInternalIdentifier(Context context, Bitstream bitstream) throws SQLException; public List<Bitstream> findDuplicateInternalIdentifier(Context context, Bitstream bitstream) throws SQLException;

View File

@@ -41,13 +41,14 @@ public class BitstreamDAOImpl extends AbstractHibernateDSODAO<Bitstream> impleme
} }
@Override @Override
public List<Bitstream> findDeletedBitstreams(Context context) throws SQLException { public List<Bitstream> findDeletedBitstreams(Context context, int limit, int offset) throws SQLException {
CriteriaBuilder criteriaBuilder = getCriteriaBuilder(context); CriteriaBuilder criteriaBuilder = getCriteriaBuilder(context);
CriteriaQuery criteriaQuery = getCriteriaQuery(criteriaBuilder, Bitstream.class); CriteriaQuery criteriaQuery = getCriteriaQuery(criteriaBuilder, Bitstream.class);
Root<Bitstream> bitstreamRoot = criteriaQuery.from(Bitstream.class); Root<Bitstream> bitstreamRoot = criteriaQuery.from(Bitstream.class);
criteriaQuery.select(bitstreamRoot); criteriaQuery.select(bitstreamRoot);
criteriaQuery.orderBy(criteriaBuilder.desc(bitstreamRoot.get(Bitstream_.ID)));
criteriaQuery.where(criteriaBuilder.equal(bitstreamRoot.get(Bitstream_.deleted), true)); criteriaQuery.where(criteriaBuilder.equal(bitstreamRoot.get(Bitstream_.deleted), true));
return list(context, criteriaQuery, false, Bitstream.class, -1, -1); return list(context, criteriaQuery, false, Bitstream.class, limit, offset);
} }

View File

@@ -183,7 +183,7 @@ public interface BitstreamService extends DSpaceObjectService<Bitstream>, DSpace
* @return a list of all bitstreams that have been "deleted" * @return a list of all bitstreams that have been "deleted"
* @throws SQLException if database error * @throws SQLException if database error
*/ */
public List<Bitstream> findDeletedBitstreams(Context context) throws SQLException; public List<Bitstream> findDeletedBitstreams(Context context, int limit, int offset) throws SQLException;
/** /**

View File

@@ -141,7 +141,6 @@ public class DOIConsumer implements Consumer {
+ item.getID() + " and DOI " + doi + ".", ex); + item.getID() + " and DOI " + doi + ".", ex);
} }
} }
ctx.commit();
} }
} }

View File

@@ -17,6 +17,7 @@ import java.util.Map;
import java.util.UUID; import java.util.UUID;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections4.MapUtils; import org.apache.commons.collections4.MapUtils;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
@@ -216,24 +217,61 @@ public class BitstreamStorageServiceImpl implements BitstreamStorageService, Ini
@Override @Override
public void cleanup(boolean deleteDbRecords, boolean verbose) throws SQLException, IOException, AuthorizeException { public void cleanup(boolean deleteDbRecords, boolean verbose) throws SQLException, IOException, AuthorizeException {
Context context = new Context(Context.Mode.BATCH_EDIT); Context context = new Context(Context.Mode.BATCH_EDIT);
int commitCounter = 0;
int offset = 0;
int limit = 100;
int cleanedBitstreamCount = 0;
int deletedBitstreamCount = bitstreamService.countDeletedBitstreams(context);
System.out.println("Found " + deletedBitstreamCount + " deleted bistream to cleanup");
try { try {
context.turnOffAuthorisationSystem(); context.turnOffAuthorisationSystem();
List<Bitstream> storage = bitstreamService.findDeletedBitstreams(context); while (cleanedBitstreamCount < deletedBitstreamCount) {
for (Bitstream bitstream : storage) {
UUID bid = bitstream.getID(); List<Bitstream> storage = bitstreamService.findDeletedBitstreams(context, limit, offset);
List<String> wantedMetadata = List.of("size_bytes", "modified");
Map<String, Object> receivedMetadata = this.getStore(bitstream.getStoreNumber()) if (CollectionUtils.isEmpty(storage)) {
.about(bitstream, wantedMetadata); break;
}
for (Bitstream bitstream : storage) {
UUID bid = bitstream.getID();
List<String> wantedMetadata = List.of("size_bytes", "modified");
Map<String, Object> receivedMetadata = this.getStore(bitstream.getStoreNumber())
.about(bitstream, wantedMetadata);
// Make sure entries which do not exist are removed // Make sure entries which do not exist are removed
if (MapUtils.isEmpty(receivedMetadata)) { if (MapUtils.isEmpty(receivedMetadata)) {
log.debug("bitstore.about is empty, so file is not present"); log.debug("bitstore.about is empty, so file is not present");
if (deleteDbRecords) {
log.debug("deleting record");
if (verbose) {
System.out.println(" - Deleting bitstream information (ID: " + bid + ")");
}
checksumHistoryService.deleteByBitstream(context, bitstream);
if (verbose) {
System.out.println(" - Deleting bitstream record from database (ID: " + bid + ")");
}
bitstreamService.expunge(context, bitstream);
}
context.uncacheEntity(bitstream);
continue;
}
// This is a small chance that this is a file which is
// being stored -- get it next time.
if (isRecent(Long.valueOf(receivedMetadata.get("modified").toString()))) {
log.debug("file is recent");
context.uncacheEntity(bitstream);
continue;
}
if (deleteDbRecords) { if (deleteDbRecords) {
log.debug("deleting record"); log.debug("deleting db record");
if (verbose) { if (verbose) {
System.out.println(" - Deleting bitstream information (ID: " + bid + ")"); System.out.println(" - Deleting bitstream information (ID: " + bid + ")");
} }
@@ -243,64 +281,42 @@ public class BitstreamStorageServiceImpl implements BitstreamStorageService, Ini
} }
bitstreamService.expunge(context, bitstream); bitstreamService.expunge(context, bitstream);
} }
if (isRegisteredBitstream(bitstream.getInternalId())) {
context.uncacheEntity(bitstream);
continue; // do not delete registered bitstreams
}
// Since versioning allows for multiple bitstreams, check if the internal
// identifier isn't used on
// another place
if (bitstreamService.findDuplicateInternalIdentifier(context, bitstream).isEmpty()) {
this.getStore(bitstream.getStoreNumber()).remove(bitstream);
String message = ("Deleted bitstreamID " + bid + ", internalID " + bitstream.getInternalId());
if (log.isDebugEnabled()) {
log.debug(message);
}
if (verbose) {
System.out.println(message);
}
}
context.uncacheEntity(bitstream); context.uncacheEntity(bitstream);
continue;
} }
// This is a small chance that this is a file which is // Commit actual changes to DB after dispatch events
// being stored -- get it next time. System.out.print("Performing incremental commit to the database...");
if (isRecent(Long.valueOf(receivedMetadata.get("modified").toString()))) { context.commit();
log.debug("file is recent"); System.out.println(" Incremental commit done!");
context.uncacheEntity(bitstream);
continue; cleanedBitstreamCount = cleanedBitstreamCount + storage.size();
if (!deleteDbRecords) {
offset = offset + limit;
} }
if (deleteDbRecords) {
log.debug("deleting db record");
if (verbose) {
System.out.println(" - Deleting bitstream information (ID: " + bid + ")");
}
checksumHistoryService.deleteByBitstream(context, bitstream);
if (verbose) {
System.out.println(" - Deleting bitstream record from database (ID: " + bid + ")");
}
bitstreamService.expunge(context, bitstream);
}
if (isRegisteredBitstream(bitstream.getInternalId())) {
context.uncacheEntity(bitstream);
continue; // do not delete registered bitstreams
}
// Since versioning allows for multiple bitstreams, check if the internal identifier isn't used on
// another place
if (bitstreamService.findDuplicateInternalIdentifier(context, bitstream).isEmpty()) {
this.getStore(bitstream.getStoreNumber()).remove(bitstream);
String message = ("Deleted bitstreamID " + bid + ", internalID " + bitstream.getInternalId());
if (log.isDebugEnabled()) {
log.debug(message);
}
if (verbose) {
System.out.println(message);
}
}
// Make sure to commit our outstanding work every 100
// iterations. Otherwise you risk losing the entire transaction
// if we hit an exception, which isn't useful at all for large
// amounts of bitstreams.
commitCounter++;
if (commitCounter % 100 == 0) {
context.dispatchEvents();
// Commit actual changes to DB after dispatch events
System.out.print("Performing incremental commit to the database...");
context.commit();
System.out.println(" Incremental commit done!");
}
context.uncacheEntity(bitstream);
} }
System.out.print("Committing changes to the database..."); System.out.print("Committing changes to the database...");

View File

@@ -0,0 +1,17 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-----------------------------------------------------------------------------------
-- Update short description for PNG mimetype in the bitstream format registry
-- See: https://github.com/DSpace/DSpace/pull/8722
-----------------------------------------------------------------------------------
UPDATE bitstreamformatregistry
SET short_description='PNG'
WHERE short_description='image/png'
AND mimetype='image/png';

View File

@@ -0,0 +1,17 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-----------------------------------------------------------------------------------
-- Update short description for PNG mimetype in the bitstream format registry
-- See: https://github.com/DSpace/DSpace/pull/8722
-----------------------------------------------------------------------------------
UPDATE bitstreamformatregistry
SET short_description='PNG'
WHERE short_description='image/png'
AND mimetype='image/png';

View File

@@ -0,0 +1,17 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-----------------------------------------------------------------------------------
-- Update short description for PNG mimetype in the bitstream format registry
-- See: https://github.com/DSpace/DSpace/pull/8722
-----------------------------------------------------------------------------------
UPDATE bitstreamformatregistry
SET short_description='PNG'
WHERE short_description='image/png'
AND mimetype='image/png';

View File

@@ -39,6 +39,7 @@ import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController; import org.springframework.web.bind.annotation.RestController;
/** /**
@@ -69,6 +70,8 @@ public class ItemOwningCollectionUpdateRestController {
* moving the item to the new collection. * moving the item to the new collection.
* *
* @param uuid The UUID of the item that will be moved * @param uuid The UUID of the item that will be moved
* @param inheritCollectionPolicies Boolean flag whether to inherit the target collection policies when
* moving the item
* @param response The response object * @param response The response object
* @param request The request object * @param request The request object
* @return The wrapped resource containing the new owning collection or null when the item was not moved * @return The wrapped resource containing the new owning collection or null when the item was not moved
@@ -79,7 +82,10 @@ public class ItemOwningCollectionUpdateRestController {
@RequestMapping(method = RequestMethod.PUT, consumes = {"text/uri-list"}) @RequestMapping(method = RequestMethod.PUT, consumes = {"text/uri-list"})
@PreAuthorize("hasPermission(#uuid, 'ITEM','WRITE')") @PreAuthorize("hasPermission(#uuid, 'ITEM','WRITE')")
@PostAuthorize("returnObject != null") @PostAuthorize("returnObject != null")
public CollectionRest move(@PathVariable UUID uuid, HttpServletResponse response, public CollectionRest move(@PathVariable UUID uuid,
@RequestParam(name = "inheritPolicies", defaultValue = "false")
Boolean inheritCollectionPolicies,
HttpServletResponse response,
HttpServletRequest request) HttpServletRequest request)
throws SQLException, IOException, AuthorizeException { throws SQLException, IOException, AuthorizeException {
Context context = ContextUtil.obtainContext(request); Context context = ContextUtil.obtainContext(request);
@@ -91,7 +97,8 @@ public class ItemOwningCollectionUpdateRestController {
"or the data cannot be resolved to a collection."); "or the data cannot be resolved to a collection.");
} }
Collection targetCollection = performItemMove(context, uuid, (Collection) dsoList.get(0)); Collection targetCollection = performItemMove(context, uuid, (Collection) dsoList.get(0),
inheritCollectionPolicies);
if (targetCollection == null) { if (targetCollection == null) {
return null; return null;
@@ -107,17 +114,19 @@ public class ItemOwningCollectionUpdateRestController {
* @param item The item to be moved * @param item The item to be moved
* @param currentCollection The current owning collection of the item * @param currentCollection The current owning collection of the item
* @param targetCollection The target collection of the item * @param targetCollection The target collection of the item
* @param inheritPolicies Boolean flag whether to inherit the target collection policies when moving the item
* @return The target collection * @return The target collection
* @throws SQLException If something goes wrong * @throws SQLException If something goes wrong
* @throws IOException If something goes wrong * @throws IOException If something goes wrong
* @throws AuthorizeException If the user is not authorized to perform the move action * @throws AuthorizeException If the user is not authorized to perform the move action
*/ */
private Collection moveItem(final Context context, final Item item, final Collection currentCollection, private Collection moveItem(final Context context, final Item item, final Collection currentCollection,
final Collection targetCollection) final Collection targetCollection,
final boolean inheritPolicies)
throws SQLException, IOException, AuthorizeException { throws SQLException, IOException, AuthorizeException {
itemService.move(context, item, currentCollection, targetCollection); itemService.move(context, item, currentCollection, targetCollection, inheritPolicies);
//Necessary because Controller does not pass through general RestResourceController, and as such does not do its // Necessary because Controller does not pass through general RestResourceController, and as such does not do
// commit in DSpaceRestRepository.createAndReturn() or similar // its commit in DSpaceRestRepository.createAndReturn() or similar
context.commit(); context.commit();
return context.reloadEntity(targetCollection); return context.reloadEntity(targetCollection);
@@ -129,12 +138,14 @@ public class ItemOwningCollectionUpdateRestController {
* @param context The context Object * @param context The context Object
* @param itemUuid The uuid of the item to be moved * @param itemUuid The uuid of the item to be moved
* @param targetCollection The target collection * @param targetCollection The target collection
* @param inheritPolicies Whether to inherit the target collection policies when moving the item
* @return The new owning collection of the item when authorized or null when not authorized * @return The new owning collection of the item when authorized or null when not authorized
* @throws SQLException If something goes wrong * @throws SQLException If something goes wrong
* @throws IOException If something goes wrong * @throws IOException If something goes wrong
* @throws AuthorizeException If the user is not authorized to perform the move action * @throws AuthorizeException If the user is not authorized to perform the move action
*/ */
private Collection performItemMove(final Context context, final UUID itemUuid, final Collection targetCollection) private Collection performItemMove(final Context context, final UUID itemUuid, final Collection targetCollection,
boolean inheritPolicies)
throws SQLException, IOException, AuthorizeException { throws SQLException, IOException, AuthorizeException {
Item item = itemService.find(context, itemUuid); Item item = itemService.find(context, itemUuid);
@@ -153,7 +164,7 @@ public class ItemOwningCollectionUpdateRestController {
if (authorizeService.authorizeActionBoolean(context, currentCollection, Constants.ADMIN)) { if (authorizeService.authorizeActionBoolean(context, currentCollection, Constants.ADMIN)) {
return moveItem(context, item, currentCollection, targetCollection); return moveItem(context, item, currentCollection, targetCollection, inheritPolicies);
} }
return null; return null;

View File

@@ -42,6 +42,7 @@ import org.springframework.web.bind.annotation.ControllerAdvice;
import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.context.request.WebRequest; import org.springframework.web.context.request.WebRequest;
import org.springframework.web.multipart.MaxUploadSizeExceededException;
import org.springframework.web.multipart.MultipartException; import org.springframework.web.multipart.MultipartException;
import org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler; import org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler;
@@ -97,6 +98,13 @@ public class DSpaceApiExceptionControllerAdvice extends ResponseEntityExceptionH
sendErrorResponse(request, response, ex, "Request is invalid or incorrect", HttpServletResponse.SC_BAD_REQUEST); sendErrorResponse(request, response, ex, "Request is invalid or incorrect", HttpServletResponse.SC_BAD_REQUEST);
} }
@ExceptionHandler(MaxUploadSizeExceededException.class)
protected void handleMaxUploadSizeExceededException(HttpServletRequest request, HttpServletResponse response,
Exception ex) throws IOException {
sendErrorResponse(request, response, ex, "Request entity is too large",
HttpServletResponse.SC_REQUEST_ENTITY_TOO_LARGE);
}
@ExceptionHandler(SQLException.class) @ExceptionHandler(SQLException.class)
protected void handleSQLException(HttpServletRequest request, HttpServletResponse response, Exception ex) protected void handleSQLException(HttpServletRequest request, HttpServletResponse response, Exception ex)
throws IOException { throws IOException {

View File

@@ -56,7 +56,7 @@ public class BitstreamFormatRestRepositoryIT extends AbstractControllerIntegrati
@Autowired @Autowired
private BitstreamFormatConverter bitstreamFormatConverter; private BitstreamFormatConverter bitstreamFormatConverter;
private final int DEFAULT_AMOUNT_FORMATS = 82; private final int DEFAULT_AMOUNT_FORMATS = 85;
@Test @Test
public void findAllPaginationTest() throws Exception { public void findAllPaginationTest() throws Exception {

View File

@@ -492,9 +492,9 @@ filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = OpenDo
filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = OpenDocument Text filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = OpenDocument Text
filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = RTF filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = RTF
filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = Text filter.org.dspace.app.mediafilter.TikaTextExtractionFilter.inputFormats = Text
filter.org.dspace.app.mediafilter.JPEGFilter.inputFormats = BMP, GIF, JPEG, image/png filter.org.dspace.app.mediafilter.JPEGFilter.inputFormats = BMP, GIF, JPEG, PNG
filter.org.dspace.app.mediafilter.BrandedPreviewJPEGFilter.inputFormats = BMP, GIF, JPEG, image/png filter.org.dspace.app.mediafilter.BrandedPreviewJPEGFilter.inputFormats = BMP, GIF, JPEG, PNG
filter.org.dspace.app.mediafilter.ImageMagickImageThumbnailFilter.inputFormats = BMP, GIF, image/png, JPG, TIFF, JPEG, JPEG 2000 filter.org.dspace.app.mediafilter.ImageMagickImageThumbnailFilter.inputFormats = BMP, GIF, PNG, JPG, TIFF, JPEG, JPEG 2000
filter.org.dspace.app.mediafilter.ImageMagickPdfThumbnailFilter.inputFormats = Adobe PDF filter.org.dspace.app.mediafilter.ImageMagickPdfThumbnailFilter.inputFormats = Adobe PDF
filter.org.dspace.app.mediafilter.PDFBoxThumbnail.inputFormats = Adobe PDF filter.org.dspace.app.mediafilter.PDFBoxThumbnail.inputFormats = Adobe PDF
@@ -843,7 +843,7 @@ plugin.single.org.dspace.embargo.EmbargoSetter = org.dspace.embargo.DefaultEmbar
plugin.single.org.dspace.embargo.EmbargoLifter = org.dspace.embargo.DefaultEmbargoLifter plugin.single.org.dspace.embargo.EmbargoLifter = org.dspace.embargo.DefaultEmbargoLifter
# values for the forever embargo date threshold # values for the forever embargo date threshold
# This threshold date is used in the default access status helper to dermine if an item is # This threshold date is used in the default access status helper to determine if an item is
# restricted or embargoed based on the start date of the primary (or first) file policies. # restricted or embargoed based on the start date of the primary (or first) file policies.
# In this case, if the policy start date is inferior to the threshold date, the status will # In this case, if the policy start date is inferior to the threshold date, the status will
# be embargo, else it will be restricted. # be embargo, else it will be restricted.
@@ -880,7 +880,7 @@ org.dspace.app.itemexport.life.span.hours = 48
# The maximum size in Megabytes the export should be. This is enforced before the # The maximum size in Megabytes the export should be. This is enforced before the
# compression. Each bitstream's size in each item being exported is added up, if their # compression. Each bitstream's size in each item being exported is added up, if their
# cummulative sizes are more than this entry the export is not kicked off # cumulative sizes are more than this entry the export is not kicked off
org.dspace.app.itemexport.max.size = 200 org.dspace.app.itemexport.max.size = 200
### Batch Item import settings ### ### Batch Item import settings ###

View File

@@ -115,6 +115,15 @@
<extension>csv</extension> <extension>csv</extension>
</bitstream-type> </bitstream-type>
<bitstream-type>
<mimetype>text/vtt</mimetype>
<short_description>WebVTT</short_description>
<description>Web Video Text Tracks Format</description>
<support_level>1</support_level>
<internal>false</internal>
<extension>vtt</extension>
</bitstream-type>
<bitstream-type> <bitstream-type>
<mimetype>application/msword</mimetype> <mimetype>application/msword</mimetype>
<short_description>Microsoft Word</short_description> <short_description>Microsoft Word</short_description>
@@ -201,7 +210,7 @@
<bitstream-type> <bitstream-type>
<mimetype>image/png</mimetype> <mimetype>image/png</mimetype>
<short_description>image/png</short_description> <short_description>PNG</short_description>
<description>Portable Network Graphics</description> <description>Portable Network Graphics</description>
<support_level>1</support_level> <support_level>1</support_level>
<internal>false</internal> <internal>false</internal>
@@ -800,4 +809,22 @@
<extension>mp3</extension> <extension>mp3</extension>
</bitstream-type> </bitstream-type>
<bitstream-type>
<mimetype>image/webp</mimetype>
<short_description>WebP</short_description>
<description>WebP is a modern image format that provides superior lossless and lossy compression for images on the web.</description>
<support_level>1</support_level>
<internal>false</internal>
<extension>webp</extension>
</bitstream-type>
<bitstream-type>
<mimetype>image/avif</mimetype>
<short_description>AVIF</short_description>
<description>AV1 Image File Format (AVIF) is an open, royalty-free image file format specification for storing images or image sequences compressed with AV1 in the HEIF container format.</description>
<support_level>1</support_level>
<internal>false</internal>
<extension>avif</extension>
</bitstream-type>
</dspace-bitstream-types> </dspace-bitstream-types>

View File

@@ -130,6 +130,23 @@ docker run -i -t -d -p 80:80 -p 443:443 dspace/dspace-shibboleth
This image can also be rebuilt using the `../docker-compose/docker-compose-shibboleth.yml` script. This image can also be rebuilt using the `../docker-compose/docker-compose-shibboleth.yml` script.
## dspace/src/main/docker/dspace-solr/Dockerfile
This Dockerfile builds a Solr image with DSpace Solr configsets included. It
can be pulled / built following the [docker compose resources](../docker-compose/README.md)
documentation. Or, to just build and/or run Solr:
```bash
docker-compose build dspacesolr
docker-compose -p d7 up -d dspacesolr
```
If you're making iterative changes to the DSpace Solr configsets you'll need to rebuild /
restart the `dspacesolr` container for the changes to be deployed. From DSpace root:
```bash
docker-compose -p d7 up --detach --build dspacesolr
```
## test/ folder ## test/ folder

View File

@@ -0,0 +1,36 @@
#
# The contents of this file are subject to the license and copyright
# detailed in the LICENSE and NOTICE files at the root of the source
# tree and available online at
#
# http://www.dspace.org/license/
#
# To build use root as context for (easier) access to solr cfgs
# docker build --build-arg SOLR_VERSION=8.11 -f ./dspace/src/main/docker/dspace-solr/Dockerfile .
# This will be published as dspace/dspace-solr:$DSPACE_VERSION
ARG SOLR_VERSION=8.11
FROM solr:${SOLR_VERSION}-slim
ENV AUTHORITY_CONFIGSET_PATH=/opt/solr/server/solr/configsets/authority/conf \
OAI_CONFIGSET_PATH=/opt/solr/server/solr/configsets/oai/conf \
SEARCH_CONFIGSET_PATH=/opt/solr/server/solr/configsets/search/conf \
STATISTICS_CONFIGSET_PATH=/opt/solr/server/solr/configsets/statistics/conf
USER root
RUN mkdir -p $AUTHORITY_CONFIGSET_PATH && \
mkdir -p $OAI_CONFIGSET_PATH && \
mkdir -p $SEARCH_CONFIGSET_PATH && \
mkdir -p $STATISTICS_CONFIGSET_PATH
COPY dspace/solr/authority/conf/* $AUTHORITY_CONFIGSET_PATH/
COPY dspace/solr/oai/conf/* $OAI_CONFIGSET_PATH/
COPY dspace/solr/search/conf/* $SEARCH_CONFIGSET_PATH/
COPY dspace/solr/statistics/conf/* $STATISTICS_CONFIGSET_PATH/
RUN chown -R solr:solr /opt/solr/server/solr/configsets
USER solr