Merge branch 'main' into 1787-coll-comm-item-counts

# Conflicts:
#	dspace-api/src/main/java/org/dspace/content/CollectionServiceImpl.java
#	dspace-api/src/main/java/org/dspace/content/service/CollectionService.java
This commit is contained in:
damian
2023-06-09 12:18:38 +02:00
270 changed files with 6936 additions and 7367 deletions

View File

@@ -4,13 +4,6 @@
# Can be validated via instructions at: # Can be validated via instructions at:
# https://docs.codecov.io/docs/codecov-yaml#validate-your-repository-yaml # https://docs.codecov.io/docs/codecov-yaml#validate-your-repository-yaml
# Tell Codecov not to send a coverage notification until (at least) 2 builds are completed
# Since we run Unit & Integration tests in parallel, this lets Codecov know that coverage
# needs to be merged across those builds
codecov:
notify:
after_n_builds: 2
# Settings related to code coverage analysis # Settings related to code coverage analysis
coverage: coverage:
status: status:

View File

@@ -7,33 +7,16 @@
*/ */
package org.dspace.administer; package org.dspace.administer;
import java.sql.SQLException;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link ProcessCleaner} script. * The {@link ScriptConfiguration} for the {@link ProcessCleaner} script.
*/ */
public class ProcessCleanerConfiguration<T extends ProcessCleaner> extends ScriptConfiguration<T> { public class ProcessCleanerConfiguration<T extends ProcessCleaner> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override
public boolean isAllowedToExecute(Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (options == null) { if (options == null) {

View File

@@ -7,33 +7,16 @@
*/ */
package org.dspace.app.bulkedit; package org.dspace.app.bulkedit;
import java.sql.SQLException;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link MetadataDeletion} script. * The {@link ScriptConfiguration} for the {@link MetadataDeletion} script.
*/ */
public class MetadataDeletionScriptConfiguration<T extends MetadataDeletion> extends ScriptConfiguration<T> { public class MetadataDeletionScriptConfiguration<T extends MetadataDeletion> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override
public boolean isAllowedToExecute(Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (options == null) { if (options == null) {

View File

@@ -7,22 +7,14 @@
*/ */
package org.dspace.app.bulkedit; package org.dspace.app.bulkedit;
import java.sql.SQLException;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link MetadataExport} script * The {@link ScriptConfiguration} for the {@link MetadataExport} script
*/ */
public class MetadataExportScriptConfiguration<T extends MetadataExport> extends ScriptConfiguration<T> { public class MetadataExportScriptConfiguration<T extends MetadataExport> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override @Override
@@ -39,15 +31,6 @@ public class MetadataExportScriptConfiguration<T extends MetadataExport> extends
this.dspaceRunnableClass = dspaceRunnableClass; this.dspaceRunnableClass = dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (options == null) { if (options == null) {

View File

@@ -9,7 +9,6 @@
package org.dspace.app.bulkedit; package org.dspace.app.bulkedit;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
/** /**
@@ -29,11 +28,6 @@ public class MetadataExportSearchScriptConfiguration<T extends MetadataExportSea
this.dspaceRunnableclass = dspaceRunnableClass; this.dspaceRunnableclass = dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(Context context) {
return true;
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (options == null) { if (options == null) {

View File

@@ -8,22 +8,15 @@
package org.dspace.app.bulkedit; package org.dspace.app.bulkedit;
import java.io.InputStream; import java.io.InputStream;
import java.sql.SQLException;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link MetadataImport} script * The {@link ScriptConfiguration} for the {@link MetadataImport} script
*/ */
public class MetadataImportScriptConfiguration<T extends MetadataImport> extends ScriptConfiguration<T> { public class MetadataImportScriptConfiguration<T extends MetadataImport> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override @Override
@@ -40,15 +33,6 @@ public class MetadataImportScriptConfiguration<T extends MetadataImport> extends
this.dspaceRunnableClass = dspaceRunnableClass; this.dspaceRunnableClass = dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (options == null) { if (options == null) {

View File

@@ -7,18 +7,11 @@
*/ */
package org.dspace.app.harvest; package org.dspace.app.harvest;
import java.sql.SQLException;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
public class HarvestScriptConfiguration<T extends Harvest> extends ScriptConfiguration<T> { public class HarvestScriptConfiguration<T extends Harvest> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@@ -32,13 +25,6 @@ public class HarvestScriptConfiguration<T extends Harvest> extends ScriptConfigu
this.dspaceRunnableClass = dspaceRunnableClass; this.dspaceRunnableClass = dspaceRunnableClass;
} }
public boolean isAllowedToExecute(final Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
public Options getOptions() { public Options getOptions() {
Options options = new Options(); Options options = new Options();

View File

@@ -7,14 +7,9 @@
*/ */
package org.dspace.app.itemexport; package org.dspace.app.itemexport;
import java.sql.SQLException;
import org.apache.commons.cli.Option; import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link ItemExport} script * The {@link ScriptConfiguration} for the {@link ItemExport} script
@@ -23,9 +18,6 @@ import org.springframework.beans.factory.annotation.Autowired;
*/ */
public class ItemExportScriptConfiguration<T extends ItemExport> extends ScriptConfiguration<T> { public class ItemExportScriptConfiguration<T extends ItemExport> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override @Override
@@ -38,15 +30,6 @@ public class ItemExportScriptConfiguration<T extends ItemExport> extends ScriptC
this.dspaceRunnableClass = dspaceRunnableClass; this.dspaceRunnableClass = dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(final Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
Options options = new Options(); Options options = new Options();

View File

@@ -23,6 +23,7 @@ import java.util.UUID;
import org.apache.commons.cli.ParseException; import org.apache.commons.cli.ParseException;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.tika.Tika;
import org.dspace.app.itemimport.factory.ItemImportServiceFactory; import org.dspace.app.itemimport.factory.ItemImportServiceFactory;
import org.dspace.app.itemimport.service.ItemImportService; import org.dspace.app.itemimport.service.ItemImportService;
import org.dspace.authorize.AuthorizeException; import org.dspace.authorize.AuthorizeException;
@@ -77,6 +78,7 @@ public class ItemImport extends DSpaceRunnable<ItemImportScriptConfiguration> {
protected boolean zip = false; protected boolean zip = false;
protected boolean remoteUrl = false; protected boolean remoteUrl = false;
protected String zipfilename = null; protected String zipfilename = null;
protected boolean zipvalid = false;
protected boolean help = false; protected boolean help = false;
protected File workDir = null; protected File workDir = null;
protected File workFile = null; protected File workFile = null;
@@ -235,11 +237,19 @@ public class ItemImport extends DSpaceRunnable<ItemImportScriptConfiguration> {
handler.logInfo("***End of Test Run***"); handler.logInfo("***End of Test Run***");
} }
} finally { } finally {
// clean work dir
if (zip) { if (zip) {
// if zip file was valid then clean sourcedir
if (zipvalid && sourcedir != null && new File(sourcedir).exists()) {
FileUtils.deleteDirectory(new File(sourcedir)); FileUtils.deleteDirectory(new File(sourcedir));
}
// clean workdir
if (workDir != null && workDir.exists()) {
FileUtils.deleteDirectory(workDir); FileUtils.deleteDirectory(workDir);
if (remoteUrl && workFile != null && workFile.exists()) { }
// conditionally clean workFile if import was done in the UI or via a URL and it still exists
if (workFile != null && workFile.exists()) {
workFile.delete(); workFile.delete();
} }
} }
@@ -322,14 +332,23 @@ public class ItemImport extends DSpaceRunnable<ItemImportScriptConfiguration> {
*/ */
protected void readZip(Context context, ItemImportService itemImportService) throws Exception { protected void readZip(Context context, ItemImportService itemImportService) throws Exception {
Optional<InputStream> optionalFileStream = Optional.empty(); Optional<InputStream> optionalFileStream = Optional.empty();
Optional<InputStream> validationFileStream = Optional.empty();
if (!remoteUrl) { if (!remoteUrl) {
// manage zip via upload // manage zip via upload
optionalFileStream = handler.getFileStream(context, zipfilename); optionalFileStream = handler.getFileStream(context, zipfilename);
validationFileStream = handler.getFileStream(context, zipfilename);
} else { } else {
// manage zip via remote url // manage zip via remote url
optionalFileStream = Optional.ofNullable(new URL(zipfilename).openStream()); optionalFileStream = Optional.ofNullable(new URL(zipfilename).openStream());
validationFileStream = Optional.ofNullable(new URL(zipfilename).openStream());
} }
if (optionalFileStream.isPresent()) {
if (validationFileStream.isPresent()) {
// validate zip file
if (validationFileStream.isPresent()) {
validateZip(validationFileStream.get());
}
workFile = new File(itemImportService.getTempWorkDir() + File.separator workFile = new File(itemImportService.getTempWorkDir() + File.separator
+ zipfilename + "-" + context.getCurrentUser().getID()); + zipfilename + "-" + context.getCurrentUser().getID());
FileUtils.copyInputStreamToFile(optionalFileStream.get(), workFile); FileUtils.copyInputStreamToFile(optionalFileStream.get(), workFile);
@@ -337,10 +356,32 @@ public class ItemImport extends DSpaceRunnable<ItemImportScriptConfiguration> {
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Error reading file, the file couldn't be found for filename: " + zipfilename); "Error reading file, the file couldn't be found for filename: " + zipfilename);
} }
workDir = new File(itemImportService.getTempWorkDir() + File.separator + TEMP_DIR);
workDir = new File(itemImportService.getTempWorkDir() + File.separator + TEMP_DIR
+ File.separator + context.getCurrentUser().getID());
sourcedir = itemImportService.unzip(workFile, workDir.getAbsolutePath()); sourcedir = itemImportService.unzip(workFile, workDir.getAbsolutePath());
} }
/**
* Confirm that the zip file has the correct MIME type
* @param inputStream
*/
protected void validateZip(InputStream inputStream) {
Tika tika = new Tika();
try {
String mimeType = tika.detect(inputStream);
if (mimeType.equals("application/zip")) {
zipvalid = true;
} else {
handler.logError("A valid zip file must be supplied. The provided file has mimetype: " + mimeType);
throw new UnsupportedOperationException("A valid zip file must be supplied");
}
} catch (IOException e) {
throw new IllegalArgumentException(
"There was an error while reading the zip file: " + zipfilename);
}
}
/** /**
* Read the mapfile * Read the mapfile
* @param context * @param context

View File

@@ -8,6 +8,7 @@
package org.dspace.app.itemimport; package org.dspace.app.itemimport;
import java.io.File; import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream; import java.io.InputStream;
import java.net.URL; import java.net.URL;
import java.sql.SQLException; import java.sql.SQLException;
@@ -101,6 +102,17 @@ public class ItemImportCLI extends ItemImport {
// If this is a zip archive, unzip it first // If this is a zip archive, unzip it first
if (zip) { if (zip) {
if (!remoteUrl) { if (!remoteUrl) {
// confirm zip file exists
File myZipFile = new File(sourcedir + File.separator + zipfilename);
if ((!myZipFile.exists()) || (!myZipFile.isFile())) {
throw new IllegalArgumentException(
"Error reading file, the file couldn't be found for filename: " + zipfilename);
}
// validate zip file
InputStream validationFileStream = new FileInputStream(myZipFile);
validateZip(validationFileStream);
workDir = new File(itemImportService.getTempWorkDir() + File.separator + TEMP_DIR workDir = new File(itemImportService.getTempWorkDir() + File.separator + TEMP_DIR
+ File.separator + context.getCurrentUser().getID()); + File.separator + context.getCurrentUser().getID());
sourcedir = itemImportService.unzip( sourcedir = itemImportService.unzip(
@@ -109,15 +121,22 @@ public class ItemImportCLI extends ItemImport {
// manage zip via remote url // manage zip via remote url
Optional<InputStream> optionalFileStream = Optional.ofNullable(new URL(zipfilename).openStream()); Optional<InputStream> optionalFileStream = Optional.ofNullable(new URL(zipfilename).openStream());
if (optionalFileStream.isPresent()) { if (optionalFileStream.isPresent()) {
// validate zip file via url
Optional<InputStream> validationFileStream = Optional.ofNullable(new URL(zipfilename).openStream());
if (validationFileStream.isPresent()) {
validateZip(validationFileStream.get());
}
workFile = new File(itemImportService.getTempWorkDir() + File.separator workFile = new File(itemImportService.getTempWorkDir() + File.separator
+ zipfilename + "-" + context.getCurrentUser().getID()); + zipfilename + "-" + context.getCurrentUser().getID());
FileUtils.copyInputStreamToFile(optionalFileStream.get(), workFile); FileUtils.copyInputStreamToFile(optionalFileStream.get(), workFile);
workDir = new File(itemImportService.getTempWorkDir() + File.separator + TEMP_DIR
+ File.separator + context.getCurrentUser().getID());
sourcedir = itemImportService.unzip(workFile, workDir.getAbsolutePath());
} else { } else {
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Error reading file, the file couldn't be found for filename: " + zipfilename); "Error reading file, the file couldn't be found for filename: " + zipfilename);
} }
workDir = new File(itemImportService.getTempWorkDir() + File.separator + TEMP_DIR);
sourcedir = itemImportService.unzip(workFile, workDir.getAbsolutePath());
} }
} }
} }

View File

@@ -8,14 +8,10 @@
package org.dspace.app.itemimport; package org.dspace.app.itemimport;
import java.io.InputStream; import java.io.InputStream;
import java.sql.SQLException;
import org.apache.commons.cli.Option; import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link ItemImport} script * The {@link ScriptConfiguration} for the {@link ItemImport} script
@@ -24,9 +20,6 @@ import org.springframework.beans.factory.annotation.Autowired;
*/ */
public class ItemImportScriptConfiguration<T extends ItemImport> extends ScriptConfiguration<T> { public class ItemImportScriptConfiguration<T extends ItemImport> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override @Override
@@ -39,15 +32,6 @@ public class ItemImportScriptConfiguration<T extends ItemImport> extends ScriptC
this.dspaceRunnableClass = dspaceRunnableClass; this.dspaceRunnableClass = dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(final Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
Options options = new Options(); Options options = new Options();

View File

@@ -22,7 +22,9 @@ public class ImageMagickPdfThumbnailFilter extends ImageMagickThumbnailFilter {
File f2 = null; File f2 = null;
File f3 = null; File f3 = null;
try { try {
f2 = getImageFile(f, 0, verbose); // Step 1: get an image from our PDF file, with PDF-specific processing options
f2 = getImageFile(f, verbose);
// Step 2: use the image above to create the final resized and rotated thumbnail
f3 = getThumbnailFile(f2, verbose); f3 = getThumbnailFile(f2, verbose);
byte[] bytes = Files.readAllBytes(f3.toPath()); byte[] bytes = Files.readAllBytes(f3.toPath());
return new ByteArrayInputStream(bytes); return new ByteArrayInputStream(bytes);

View File

@@ -116,9 +116,17 @@ public abstract class ImageMagickThumbnailFilter extends MediaFilter {
return f2; return f2;
} }
public File getImageFile(File f, int page, boolean verbose) /**
* Return an image from a bitstream with specific processing options for
* PDFs. This is only used by ImageMagickPdfThumbnailFilter in order to
* generate an intermediate image file for use with getThumbnailFile.
*/
public File getImageFile(File f, boolean verbose)
throws IOException, InterruptedException, IM4JavaException { throws IOException, InterruptedException, IM4JavaException {
File f2 = new File(f.getParentFile(), f.getName() + ".jpg"); // Writing an intermediate file to disk is inefficient, but since we're
// doing it anyway, we should use a lossless format. IM's internal MIFF
// is lossless like PNG and TIFF, but much faster.
File f2 = new File(f.getParentFile(), f.getName() + ".miff");
f2.deleteOnExit(); f2.deleteOnExit();
ConvertCmd cmd = new ConvertCmd(); ConvertCmd cmd = new ConvertCmd();
IMOperation op = new IMOperation(); IMOperation op = new IMOperation();
@@ -155,7 +163,7 @@ public abstract class ImageMagickThumbnailFilter extends MediaFilter {
op.define("pdf:use-cropbox=true"); op.define("pdf:use-cropbox=true");
} }
String s = "[" + page + "]"; String s = "[0]";
op.addImage(f.getAbsolutePath() + s); op.addImage(f.getAbsolutePath() + s);
if (configurationService.getBooleanProperty(PRE + ".flatten", true)) { if (configurationService.getBooleanProperty(PRE + ".flatten", true)) {
op.flatten(); op.flatten();
@@ -208,20 +216,20 @@ public abstract class ImageMagickThumbnailFilter extends MediaFilter {
if (description != null) { if (description != null) {
if (replaceRegex.matcher(description).matches()) { if (replaceRegex.matcher(description).matches()) {
if (verbose) { if (verbose) {
System.out.format("%s %s matches pattern and is replacable.%n", System.out.format("%s %s matches pattern and is replaceable.%n",
description, nsrc); description, n);
} }
continue; continue;
} }
if (description.equals(getDescription())) { if (description.equals(getDescription())) {
if (verbose) { if (verbose) {
System.out.format("%s %s is replaceable.%n", System.out.format("%s %s is replaceable.%n",
getDescription(), nsrc); getDescription(), n);
} }
continue; continue;
} }
} }
System.out.format("Custom Thumbnail exists for %s for item %s. Thumbnail will not be generated.%n", System.out.format("Custom thumbnail exists for %s for item %s. Thumbnail will not be generated.%n",
nsrc, item.getHandle()); nsrc, item.getHandle());
return false; return false;
} }

View File

@@ -7,25 +7,16 @@
*/ */
package org.dspace.app.mediafilter; package org.dspace.app.mediafilter;
import java.sql.SQLException;
import org.apache.commons.cli.Option; import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
public class MediaFilterScriptConfiguration<T extends MediaFilterScript> extends ScriptConfiguration<T> { public class MediaFilterScriptConfiguration<T extends MediaFilterScript> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
private static final String MEDIA_FILTER_PLUGINS_KEY = "filter.plugins"; private static final String MEDIA_FILTER_PLUGINS_KEY = "filter.plugins";
@Override @Override
public Class<T> getDspaceRunnableClass() { public Class<T> getDspaceRunnableClass() {
return dspaceRunnableClass; return dspaceRunnableClass;
@@ -36,16 +27,6 @@ public class MediaFilterScriptConfiguration<T extends MediaFilterScript> extends
this.dspaceRunnableClass = dspaceRunnableClass; this.dspaceRunnableClass = dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(final Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
Options options = new Options(); Options options = new Options();

View File

@@ -167,17 +167,24 @@ public class RequestItemEmailNotifier {
if (!bitstream.getFormat(context).isInternal() && if (!bitstream.getFormat(context).isInternal() &&
requestItemService.isRestricted(context, requestItemService.isRestricted(context,
bitstream)) { bitstream)) {
// #8636 Anyone receiving the email can respond to the
// request without authenticating into DSpace
context.turnOffAuthorisationSystem();
email.addAttachment(bitstreamService.retrieve(context, email.addAttachment(bitstreamService.retrieve(context,
bitstream), bitstream.getName(), bitstream), bitstream.getName(),
bitstream.getFormat(context).getMIMEType()); bitstream.getFormat(context).getMIMEType());
context.restoreAuthSystemState();
} }
} }
} }
} else { } else {
Bitstream bitstream = ri.getBitstream(); Bitstream bitstream = ri.getBitstream();
// #8636 Anyone receiving the email can respond to the request without authenticating into DSpace
context.turnOffAuthorisationSystem();
email.addAttachment(bitstreamService.retrieve(context, bitstream), email.addAttachment(bitstreamService.retrieve(context, bitstream),
bitstream.getName(), bitstream.getName(),
bitstream.getFormat(context).getMIMEType()); bitstream.getFormat(context).getMIMEType());
context.restoreAuthSystemState();
} }
email.send(); email.send();
} else { } else {

View File

@@ -8,7 +8,6 @@
package org.dspace.app.solrdatabaseresync; package org.dspace.app.solrdatabaseresync;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
/** /**
@@ -27,11 +26,6 @@ public class SolrDatabaseResyncCliScriptConfiguration extends ScriptConfiguratio
this.dspaceRunnableClass = dspaceRunnableClass; this.dspaceRunnableClass = dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(Context context) {
return true;
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (options == null) { if (options == null) {

View File

@@ -22,7 +22,10 @@ import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.dspace.content.Collection; import org.dspace.content.Collection;
import org.dspace.content.DSpaceObject; import org.dspace.content.DSpaceObject;
import org.dspace.content.factory.ContentServiceFactory;
import org.dspace.content.service.CollectionService;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.discovery.SearchServiceException;
import org.dspace.handle.factory.HandleServiceFactory; import org.dspace.handle.factory.HandleServiceFactory;
import org.dspace.services.factory.DSpaceServicesFactory; import org.dspace.services.factory.DSpaceServicesFactory;
import org.w3c.dom.Document; import org.w3c.dom.Document;
@@ -105,6 +108,13 @@ public class SubmissionConfigReader {
*/ */
private SubmissionConfig lastSubmissionConfig = null; private SubmissionConfig lastSubmissionConfig = null;
/**
* Collection Service instance, needed to interact with collection's
* stored data
*/
protected static final CollectionService collectionService
= ContentServiceFactory.getInstance().getCollectionService();
/** /**
* Load Submission Configuration from the * Load Submission Configuration from the
* item-submission.xml configuration file * item-submission.xml configuration file
@@ -152,6 +162,9 @@ public class SubmissionConfigReader {
} catch (FactoryConfigurationError fe) { } catch (FactoryConfigurationError fe) {
throw new SubmissionConfigReaderException( throw new SubmissionConfigReaderException(
"Cannot create Item Submission Configuration parser", fe); "Cannot create Item Submission Configuration parser", fe);
} catch (SearchServiceException se) {
throw new SubmissionConfigReaderException(
"Cannot perform a discovery search for Item Submission Configuration", se);
} catch (Exception e) { } catch (Exception e) {
throw new SubmissionConfigReaderException( throw new SubmissionConfigReaderException(
"Error creating Item Submission Configuration: " + e); "Error creating Item Submission Configuration: " + e);
@@ -287,7 +300,7 @@ public class SubmissionConfigReader {
* should correspond to the collection-form maps, the form definitions, and * should correspond to the collection-form maps, the form definitions, and
* the display/storage word pairs. * the display/storage word pairs.
*/ */
private void doNodes(Node n) throws SAXException, SubmissionConfigReaderException { private void doNodes(Node n) throws SAXException, SearchServiceException, SubmissionConfigReaderException {
if (n == null) { if (n == null) {
return; return;
} }
@@ -334,18 +347,23 @@ public class SubmissionConfigReader {
* the collection handle and item submission name, put name in hashmap keyed * the collection handle and item submission name, put name in hashmap keyed
* by the collection handle. * by the collection handle.
*/ */
private void processMap(Node e) throws SAXException { private void processMap(Node e) throws SAXException, SearchServiceException {
// create a context
Context context = new Context();
NodeList nl = e.getChildNodes(); NodeList nl = e.getChildNodes();
int len = nl.getLength(); int len = nl.getLength();
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
Node nd = nl.item(i); Node nd = nl.item(i);
if (nd.getNodeName().equals("name-map")) { if (nd.getNodeName().equals("name-map")) {
String id = getAttribute(nd, "collection-handle"); String id = getAttribute(nd, "collection-handle");
String entityType = getAttribute(nd, "collection-entity-type");
String value = getAttribute(nd, "submission-name"); String value = getAttribute(nd, "submission-name");
String content = getValue(nd); String content = getValue(nd);
if (id == null) { if (id == null && entityType == null) {
throw new SAXException( throw new SAXException(
"name-map element is missing collection-handle attribute in 'item-submission.xml'"); "name-map element is missing collection-handle or collection-entity-type attribute " +
"in 'item-submission.xml'");
} }
if (value == null) { if (value == null) {
throw new SAXException( throw new SAXException(
@@ -355,7 +373,17 @@ public class SubmissionConfigReader {
throw new SAXException( throw new SAXException(
"name-map element has content in 'item-submission.xml', it should be empty."); "name-map element has content in 'item-submission.xml', it should be empty.");
} }
if (id != null) {
collectionToSubmissionConfig.put(id, value); collectionToSubmissionConfig.put(id, value);
} else {
// get all collections for this entity-type
List<Collection> collections = collectionService.findAllCollectionsByEntityType( context,
entityType);
for (Collection collection : collections) {
collectionToSubmissionConfig.putIfAbsent(collection.getHandle(), value);
}
}
} // ignore any child node that isn't a "name-map" } // ignore any child node that isn't a "name-map"
} }
} }

View File

@@ -11,6 +11,9 @@ import java.io.Serializable;
import java.util.Map; import java.util.Map;
import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.BooleanUtils;
import org.dspace.content.InProgressSubmission;
import org.dspace.content.WorkspaceItem;
import org.hibernate.proxy.HibernateProxyHelper;
/** /**
* Class representing configuration for a single step within an Item Submission * Class representing configuration for a single step within an Item Submission
@@ -173,6 +176,38 @@ public class SubmissionStepConfig implements Serializable {
return visibilityOutside; return visibilityOutside;
} }
/**
* Check if given submission section object is hidden for the current submission scope
*
* @param obj the InProgressSubmission to check
* @return true if the submission section is hidden, false otherwise
*/
public boolean isHiddenForInProgressSubmission(InProgressSubmission obj) {
String scopeToCheck = getScope(obj);
if (scope == null || scopeToCheck == null) {
return false;
}
String visibility = getVisibility();
String visibilityOutside = getVisibilityOutside();
if (scope.equalsIgnoreCase(scopeToCheck)) {
return "hidden".equalsIgnoreCase(visibility);
} else {
return visibilityOutside == null || "hidden".equalsIgnoreCase(visibilityOutside);
}
}
private String getScope(InProgressSubmission obj) {
if (HibernateProxyHelper.getClassWithoutInitializingProxy(obj).equals(WorkspaceItem.class)) {
return "submission";
}
return "workflow";
}
/** /**
* Get the number of this step in the current Submission process config. * Get the number of this step in the current Submission process config.
* Step numbers start with #0 (although step #0 is ALWAYS the special * Step numbers start with #0 (although step #0 is ALWAYS the special

View File

@@ -11,9 +11,11 @@ import static org.dspace.eperson.service.EPersonService.MD_PHONE;
import java.io.IOException; import java.io.IOException;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Hashtable; import java.util.Hashtable;
import java.util.Iterator;
import java.util.List; import java.util.List;
import javax.naming.NamingEnumeration; import javax.naming.NamingEnumeration;
import javax.naming.NamingException; import javax.naming.NamingException;
@@ -64,6 +66,7 @@ import org.dspace.services.factory.DSpaceServicesFactory;
* @author Reuben Pasquini * @author Reuben Pasquini
* @author Samuel Ottenhoff * @author Samuel Ottenhoff
* @author Ivan Masár * @author Ivan Masár
* @author Michael Plate
*/ */
public class LDAPAuthentication public class LDAPAuthentication
implements AuthenticationMethod { implements AuthenticationMethod {
@@ -391,7 +394,7 @@ public class LDAPAuthentication
protected String ldapGivenName = null; protected String ldapGivenName = null;
protected String ldapSurname = null; protected String ldapSurname = null;
protected String ldapPhone = null; protected String ldapPhone = null;
protected String ldapGroup = null; protected ArrayList<String> ldapGroup = null;
/** /**
* LDAP settings * LDAP settings
@@ -406,9 +409,9 @@ public class LDAPAuthentication
final String ldap_surname_field; final String ldap_surname_field;
final String ldap_phone_field; final String ldap_phone_field;
final String ldap_group_field; final String ldap_group_field;
final boolean useTLS; final boolean useTLS;
SpeakerToLDAP(Logger thelog) { SpeakerToLDAP(Logger thelog) {
ConfigurationService configurationService ConfigurationService configurationService
= DSpaceServicesFactory.getInstance().getConfigurationService(); = DSpaceServicesFactory.getInstance().getConfigurationService();
@@ -547,7 +550,11 @@ public class LDAPAuthentication
if (attlist[4] != null) { if (attlist[4] != null) {
att = atts.get(attlist[4]); att = atts.get(attlist[4]);
if (att != null) { if (att != null) {
ldapGroup = (String) att.get(); // loop through all groups returned by LDAP
ldapGroup = new ArrayList<String>();
for (NamingEnumeration val = att.getAll(); val.hasMoreElements(); ) {
ldapGroup.add((String) val.next());
}
} }
} }
@@ -693,24 +700,44 @@ public class LDAPAuthentication
/* /*
* Add authenticated users to the group defined in dspace.cfg by * Add authenticated users to the group defined in dspace.cfg by
* the authentication-ldap.login.groupmap.* key. * the authentication-ldap.login.groupmap.* key.
*
* @param dn
* The string containing distinguished name of the user
*
* @param group
* List of strings with LDAP dn of groups
*
* @param context
* DSpace context
*/ */
private void assignGroups(String dn, String group, Context context) { private void assignGroups(String dn, ArrayList<String> group, Context context) {
if (StringUtils.isNotBlank(dn)) { if (StringUtils.isNotBlank(dn)) {
System.out.println("dn:" + dn); System.out.println("dn:" + dn);
int i = 1; int i = 1;
String groupMap = configurationService.getProperty("authentication-ldap.login.groupmap." + i); String groupMap = configurationService.getProperty("authentication-ldap.login.groupmap." + i);
boolean cmp; boolean cmp;
// groupmap contains the mapping of LDAP groups to DSpace groups
// outer loop with the DSpace groups
while (groupMap != null) { while (groupMap != null) {
String t[] = groupMap.split(":"); String t[] = groupMap.split(":");
String ldapSearchString = t[0]; String ldapSearchString = t[0];
String dspaceGroupName = t[1]; String dspaceGroupName = t[1];
if (group == null) { // list of strings with dn from LDAP groups
// inner loop
Iterator<String> groupIterator = group.iterator();
while (groupIterator.hasNext()) {
// save the current entry from iterator for further use
String currentGroup = groupIterator.next();
// very much the old code from DSpace <= 7.5
if (currentGroup == null) {
cmp = StringUtils.containsIgnoreCase(dn, ldapSearchString + ","); cmp = StringUtils.containsIgnoreCase(dn, ldapSearchString + ",");
} else { } else {
cmp = StringUtils.equalsIgnoreCase(group, ldapSearchString); cmp = StringUtils.equalsIgnoreCase(currentGroup, ldapSearchString);
} }
if (cmp) { if (cmp) {
@@ -737,6 +764,7 @@ public class LDAPAuthentication
dspaceGroupName)); dspaceGroupName));
} }
} }
}
groupMap = configurationService.getProperty("authentication-ldap.login.groupmap." + ++i); groupMap = configurationService.getProperty("authentication-ldap.login.groupmap." + ++i);
} }

View File

@@ -43,6 +43,7 @@ import org.dspace.discovery.SearchService;
import org.dspace.discovery.SearchServiceException; import org.dspace.discovery.SearchServiceException;
import org.dspace.discovery.indexobject.IndexableCollection; import org.dspace.discovery.indexobject.IndexableCollection;
import org.dspace.discovery.indexobject.IndexableCommunity; import org.dspace.discovery.indexobject.IndexableCommunity;
import org.dspace.discovery.indexobject.IndexableItem;
import org.dspace.eperson.EPerson; import org.dspace.eperson.EPerson;
import org.dspace.eperson.Group; import org.dspace.eperson.Group;
import org.dspace.eperson.service.GroupService; import org.dspace.eperson.service.GroupService;
@@ -654,60 +655,6 @@ public class AuthorizeServiceImpl implements AuthorizeService {
} }
} }
/**
* Generate Policies policies READ for the date in input adding reason. New policies are assigned automatically
* at the groups that
* have right on the collection. E.g., if the anonymous can access the collection policies are assigned to
* anonymous.
*
* @param context The relevant DSpace Context.
* @param embargoDate embargo end date
* @param reason embargo reason
* @param dso DSpace object
* @param owningCollection collection to get group policies from
* @throws SQLException if database error
* @throws AuthorizeException if authorization error
*/
@Override
public void generateAutomaticPolicies(Context context, Date embargoDate,
String reason, DSpaceObject dso, Collection owningCollection)
throws SQLException, AuthorizeException {
if (embargoDate != null || (embargoDate == null && dso instanceof Bitstream)) {
List<Group> authorizedGroups = getAuthorizedGroups(context, owningCollection, Constants.DEFAULT_ITEM_READ);
removeAllPoliciesByDSOAndType(context, dso, ResourcePolicy.TYPE_CUSTOM);
// look for anonymous
boolean isAnonymousInPlace = false;
for (Group g : authorizedGroups) {
if (StringUtils.equals(g.getName(), Group.ANONYMOUS)) {
isAnonymousInPlace = true;
}
}
if (!isAnonymousInPlace) {
// add policies for all the groups
for (Group g : authorizedGroups) {
ResourcePolicy rp = createOrModifyPolicy(null, context, null, g, null, embargoDate, Constants.READ,
reason, dso);
if (rp != null) {
resourcePolicyService.update(context, rp);
}
}
} else {
// add policy just for anonymous
ResourcePolicy rp = createOrModifyPolicy(null, context, null,
groupService.findByName(context, Group.ANONYMOUS), null,
embargoDate, Constants.READ, reason, dso);
if (rp != null) {
resourcePolicyService.update(context, rp);
}
}
}
}
@Override @Override
public ResourcePolicy createResourcePolicy(Context context, DSpaceObject dso, Group group, EPerson eperson, public ResourcePolicy createResourcePolicy(Context context, DSpaceObject dso, Group group, EPerson eperson,
int type, String rpType) throws SQLException, AuthorizeException { int type, String rpType) throws SQLException, AuthorizeException {
@@ -809,6 +756,19 @@ public class AuthorizeServiceImpl implements AuthorizeService {
return performCheck(context, "search.resourcetype:" + IndexableCollection.TYPE); return performCheck(context, "search.resourcetype:" + IndexableCollection.TYPE);
} }
/**
* Checks that the context's current user is an item admin in the site by querying the solr database.
*
* @param context context with the current user
* @return true if the current user is an item admin in the site
* false when this is not the case, or an exception occurred
* @throws java.sql.SQLException passed through.
*/
@Override
public boolean isItemAdmin(Context context) throws SQLException {
return performCheck(context, "search.resourcetype:" + IndexableItem.TYPE);
}
/** /**
* Checks that the context's current user is a community or collection admin in the site. * Checks that the context's current user is a community or collection admin in the site.
* *

View File

@@ -100,7 +100,7 @@ public class ResourcePolicy implements ReloadableEntity<Integer> {
private String rptype; private String rptype;
@Lob @Lob
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType") @Type(type = "org.hibernate.type.TextType")
@Column(name = "rpdescription") @Column(name = "rpdescription")
private String rpdescription; private String rpdescription;

View File

@@ -470,24 +470,6 @@ public interface AuthorizeService {
public ResourcePolicy findByTypeGroupAction(Context c, DSpaceObject dso, Group group, int action) public ResourcePolicy findByTypeGroupAction(Context c, DSpaceObject dso, Group group, int action)
throws SQLException; throws SQLException;
/**
* Generate Policies policies READ for the date in input adding reason. New policies are assigned automatically
* at the groups that
* have right on the collection. E.g., if the anonymous can access the collection policies are assigned to
* anonymous.
*
* @param context current context
* @param embargoDate date
* @param reason reason
* @param dso DSpaceObject
* @param owningCollection collection
* @throws SQLException if database error
* @throws AuthorizeException if authorization error
*/
public void generateAutomaticPolicies(Context context, Date embargoDate, String reason, DSpaceObject dso,
Collection owningCollection) throws SQLException, AuthorizeException;
public ResourcePolicy createResourcePolicy(Context context, DSpaceObject dso, Group group, EPerson eperson, public ResourcePolicy createResourcePolicy(Context context, DSpaceObject dso, Group group, EPerson eperson,
int type, String rpType) throws SQLException, AuthorizeException; int type, String rpType) throws SQLException, AuthorizeException;
@@ -532,6 +514,15 @@ public interface AuthorizeService {
*/ */
boolean isCollectionAdmin(Context context) throws SQLException; boolean isCollectionAdmin(Context context) throws SQLException;
/**
* Checks that the context's current user is an item admin in the site by querying the solr database.
*
* @param context context with the current user
* @return true if the current user is an item admin in the site
* false when this is not the case, or an exception occurred
*/
boolean isItemAdmin(Context context) throws SQLException;
/** /**
* Checks that the context's current user is a community or collection admin in the site. * Checks that the context's current user is a community or collection admin in the site.
* *

View File

@@ -22,11 +22,13 @@ import org.dspace.sort.SortOption;
* This class holds all the information about a specifically configured * This class holds all the information about a specifically configured
* BrowseIndex. It is responsible for parsing the configuration, understanding * BrowseIndex. It is responsible for parsing the configuration, understanding
* about what sort options are available, and what the names of the database * about what sort options are available, and what the names of the database
* tables that hold all the information are actually called. * tables that hold all the information are actually called. Hierarchical browse
* indexes also contain information about the vocabulary they're using, see:
* {@link org.dspace.content.authority.DSpaceControlledVocabularyIndex}
* *
* @author Richard Jones * @author Richard Jones
*/ */
public final class BrowseIndex { public class BrowseIndex {
/** the configuration number, as specified in the config */ /** the configuration number, as specified in the config */
/** /**
* used for single metadata browse tables for generating the table name * used for single metadata browse tables for generating the table name
@@ -102,7 +104,7 @@ public final class BrowseIndex {
* *
* @param baseName The base of the table name * @param baseName The base of the table name
*/ */
private BrowseIndex(String baseName) { protected BrowseIndex(String baseName) {
try { try {
number = -1; number = -1;
tableBaseName = baseName; tableBaseName = baseName;

View File

@@ -239,7 +239,7 @@ public class SolrBrowseDAO implements BrowseDAO {
} }
private void addDefaultFilterQueries(DiscoverQuery query) { private void addDefaultFilterQueries(DiscoverQuery query) {
DiscoveryConfiguration discoveryConfiguration = SearchUtils.getDiscoveryConfiguration(container); DiscoveryConfiguration discoveryConfiguration = SearchUtils.getDiscoveryConfiguration(context, container);
discoveryConfiguration.getDefaultFilterQueries().forEach(query::addFilterQueries); discoveryConfiguration.getDefaultFilterQueries().forEach(query::addFilterQueries);
} }

View File

@@ -8,6 +8,7 @@
package org.dspace.content; package org.dspace.content;
import static org.dspace.core.Constants.ADD; import static org.dspace.core.Constants.ADD;
import static org.dspace.core.Constants.READ;
import static org.dspace.core.Constants.REMOVE; import static org.dspace.core.Constants.REMOVE;
import static org.dspace.core.Constants.WRITE; import static org.dspace.core.Constants.WRITE;
@@ -34,6 +35,7 @@ import org.dspace.content.service.ItemService;
import org.dspace.core.Constants; import org.dspace.core.Constants;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.core.LogHelper; import org.dspace.core.LogHelper;
import org.dspace.eperson.Group;
import org.dspace.event.Event; import org.dspace.event.Event;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
@@ -173,6 +175,39 @@ public class BundleServiceImpl extends DSpaceObjectServiceImpl<Bundle> implement
// copy authorization policies from bundle to bitstream // copy authorization policies from bundle to bitstream
// FIXME: multiple inclusion is affected by this... // FIXME: multiple inclusion is affected by this...
authorizeService.inheritPolicies(context, bundle, bitstream); authorizeService.inheritPolicies(context, bundle, bitstream);
// The next logic is a bit overly cautious but ensures that if there are any future start dates
// on the item or bitstream read policies, that we'll skip inheriting anything from the owning collection
// just in case. In practice, the item install process would overwrite these anyway but it may satisfy
// some other bitstream creation methods and integration tests
boolean isEmbargoed = false;
for (ResourcePolicy resourcePolicy : authorizeService.getPoliciesActionFilter(context, owningItem, READ)) {
if (!resourcePolicyService.isDateValid(resourcePolicy)) {
isEmbargoed = true;
break;
}
}
if (owningItem != null && !isEmbargoed) {
// Resolve owning collection
Collection owningCollection = owningItem.getOwningCollection();
if (owningCollection != null) {
// Get DEFAULT_BITSTREAM_READ policy from the collection
List<Group> defaultBitstreamReadGroups =
authorizeService.getAuthorizedGroups(context, owningCollection,
Constants.DEFAULT_BITSTREAM_READ);
log.info(defaultBitstreamReadGroups.size());
// If this collection is configured with a DEFAULT_BITSTREAM_READ group, overwrite the READ policy
// inherited from the bundle with this policy.
if (!defaultBitstreamReadGroups.isEmpty()) {
// Remove read policies from the bitstream
authorizeService.removePoliciesActionFilter(context, bitstream, Constants.READ);
for (Group defaultBitstreamReadGroup : defaultBitstreamReadGroups) {
// Inherit this policy as READ, directly from the collection roles
authorizeService.addPolicy(context, bitstream,
Constants.READ, defaultBitstreamReadGroup, ResourcePolicy.TYPE_INHERITED);
}
}
}
}
bitstreamService.update(context, bitstream); bitstreamService.update(context, bitstream);
} }

View File

@@ -1049,6 +1049,26 @@ public class CollectionServiceImpl extends DSpaceObjectServiceImpl<Collection> i
return (int) resp.getTotalSearchResults(); return (int) resp.getTotalSearchResults();
} }
@Override
@SuppressWarnings("rawtypes")
public List<Collection> findAllCollectionsByEntityType(Context context, String entityType)
throws SearchServiceException {
List<Collection> collectionList = new ArrayList<>();
DiscoverQuery discoverQuery = new DiscoverQuery();
discoverQuery.setDSpaceObjectFilter(IndexableCollection.TYPE);
discoverQuery.addFilterQueries("dspace.entity.type:" + entityType);
DiscoverResult discoverResult = searchService.search(context, discoverQuery);
List<IndexableObject> solrIndexableObjects = discoverResult.getIndexableObjects();
for (IndexableObject solrCollection : solrIndexableObjects) {
Collection c = ((IndexableCollection) solrCollection).getIndexedObject();
collectionList.add(c);
}
return collectionList;
}
@Override @Override
public int countArchivedItem(Collection collection) throws ItemCountException { public int countArchivedItem(Collection collection) throws ItemCountException {
return ItemCounter.getInstance().getCount(collection); return ItemCounter.getInstance().getCount(collection);

View File

@@ -64,7 +64,9 @@ import org.dspace.eperson.service.SubscribeService;
import org.dspace.event.Event; import org.dspace.event.Event;
import org.dspace.harvest.HarvestedItem; import org.dspace.harvest.HarvestedItem;
import org.dspace.harvest.service.HarvestedItemService; import org.dspace.harvest.service.HarvestedItemService;
import org.dspace.identifier.DOI;
import org.dspace.identifier.IdentifierException; import org.dspace.identifier.IdentifierException;
import org.dspace.identifier.service.DOIService;
import org.dspace.identifier.service.IdentifierService; import org.dspace.identifier.service.IdentifierService;
import org.dspace.orcid.OrcidHistory; import org.dspace.orcid.OrcidHistory;
import org.dspace.orcid.OrcidQueue; import org.dspace.orcid.OrcidQueue;
@@ -123,6 +125,8 @@ public class ItemServiceImpl extends DSpaceObjectServiceImpl<Item> implements It
@Autowired(required = true) @Autowired(required = true)
protected IdentifierService identifierService; protected IdentifierService identifierService;
@Autowired(required = true) @Autowired(required = true)
protected DOIService doiService;
@Autowired(required = true)
protected VersioningService versioningService; protected VersioningService versioningService;
@Autowired(required = true) @Autowired(required = true)
protected HarvestedItemService harvestedItemService; protected HarvestedItemService harvestedItemService;
@@ -786,6 +790,16 @@ public class ItemServiceImpl extends DSpaceObjectServiceImpl<Item> implements It
// Remove any Handle // Remove any Handle
handleService.unbindHandle(context, item); handleService.unbindHandle(context, item);
// Delete a DOI if linked to the item.
// If no DOI consumer or provider is configured, but a DOI remains linked to this item's uuid,
// hibernate will throw a foreign constraint exception.
// Here we use the DOI service directly as it is able to manage DOIs even without any configured
// consumer or provider.
DOI doi = doiService.findDOIByDSpaceObject(context, item);
if (doi != null) {
doi.setDSpaceObject(null);
}
// remove version attached to the item // remove version attached to the item
removeVersion(context, item); removeVersion(context, item);

View File

@@ -60,7 +60,7 @@ public class MetadataValue implements ReloadableEntity<Integer> {
* The value of the field * The value of the field
*/ */
@Lob @Lob
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType") @Type(type = "org.hibernate.type.TextType")
@Column(name = "text_value") @Column(name = "text_value")
private String value; private String value;

View File

@@ -15,6 +15,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.Set; import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
@@ -30,6 +31,8 @@ import org.dspace.content.MetadataValue;
import org.dspace.content.authority.service.ChoiceAuthorityService; import org.dspace.content.authority.service.ChoiceAuthorityService;
import org.dspace.core.Utils; import org.dspace.core.Utils;
import org.dspace.core.service.PluginService; import org.dspace.core.service.PluginService;
import org.dspace.discovery.configuration.DiscoveryConfigurationService;
import org.dspace.discovery.configuration.DiscoverySearchFilterFacet;
import org.dspace.services.ConfigurationService; import org.dspace.services.ConfigurationService;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
@@ -80,6 +83,9 @@ public final class ChoiceAuthorityServiceImpl implements ChoiceAuthorityService
protected Map<String, Map<String, List<String>>> authoritiesFormDefinitions = protected Map<String, Map<String, List<String>>> authoritiesFormDefinitions =
new HashMap<String, Map<String, List<String>>>(); new HashMap<String, Map<String, List<String>>>();
// Map of vocabulary authorities to and their index info equivalent
protected Map<String, DSpaceControlledVocabularyIndex> vocabularyIndexMap = new HashMap<>();
// the item submission reader // the item submission reader
private SubmissionConfigReader itemSubmissionConfigReader; private SubmissionConfigReader itemSubmissionConfigReader;
@@ -87,6 +93,8 @@ public final class ChoiceAuthorityServiceImpl implements ChoiceAuthorityService
protected ConfigurationService configurationService; protected ConfigurationService configurationService;
@Autowired(required = true) @Autowired(required = true)
protected PluginService pluginService; protected PluginService pluginService;
@Autowired
private DiscoveryConfigurationService searchConfigurationService;
final static String CHOICES_PLUGIN_PREFIX = "choices.plugin."; final static String CHOICES_PLUGIN_PREFIX = "choices.plugin.";
final static String CHOICES_PRESENTATION_PREFIX = "choices.presentation."; final static String CHOICES_PRESENTATION_PREFIX = "choices.presentation.";
@@ -540,4 +548,50 @@ public final class ChoiceAuthorityServiceImpl implements ChoiceAuthorityService
HierarchicalAuthority ma = (HierarchicalAuthority) getChoiceAuthorityByAuthorityName(authorityName); HierarchicalAuthority ma = (HierarchicalAuthority) getChoiceAuthorityByAuthorityName(authorityName);
return ma.getParentChoice(authorityName, vocabularyId, locale); return ma.getParentChoice(authorityName, vocabularyId, locale);
} }
@Override
public DSpaceControlledVocabularyIndex getVocabularyIndex(String nameVocab) {
if (this.vocabularyIndexMap.containsKey(nameVocab)) {
return this.vocabularyIndexMap.get(nameVocab);
} else {
init();
ChoiceAuthority source = this.getChoiceAuthorityByAuthorityName(nameVocab);
if (source != null && source instanceof DSpaceControlledVocabulary) {
Set<String> metadataFields = new HashSet<>();
Map<String, List<String>> formsToFields = this.authoritiesFormDefinitions.get(nameVocab);
for (Map.Entry<String, List<String>> formToField : formsToFields.entrySet()) {
metadataFields.addAll(formToField.getValue().stream().map(value ->
StringUtils.replace(value, "_", "."))
.collect(Collectors.toList()));
}
DiscoverySearchFilterFacet matchingFacet = null;
for (DiscoverySearchFilterFacet facetConfig : searchConfigurationService.getAllFacetsConfig()) {
boolean coversAllFieldsFromVocab = true;
for (String fieldFromVocab: metadataFields) {
boolean coversFieldFromVocab = false;
for (String facetMdField: facetConfig.getMetadataFields()) {
if (facetMdField.startsWith(fieldFromVocab)) {
coversFieldFromVocab = true;
break;
}
}
if (!coversFieldFromVocab) {
coversAllFieldsFromVocab = false;
break;
}
}
if (coversAllFieldsFromVocab) {
matchingFacet = facetConfig;
break;
}
}
DSpaceControlledVocabularyIndex vocabularyIndex =
new DSpaceControlledVocabularyIndex((DSpaceControlledVocabulary) source, metadataFields,
matchingFacet);
this.vocabularyIndexMap.put(nameVocab, vocabularyIndex);
return vocabularyIndex;
}
return null;
}
}
} }

View File

@@ -0,0 +1,47 @@
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.authority;
import java.util.Set;
import org.dspace.browse.BrowseIndex;
import org.dspace.discovery.configuration.DiscoverySearchFilterFacet;
/**
* Helper class to transform a {@link org.dspace.content.authority.DSpaceControlledVocabulary} into a
* {@code BrowseIndexRest}
* cached by {@link org.dspace.content.authority.service.ChoiceAuthorityService#getVocabularyIndex(String)}
*
* @author Marie Verdonck (Atmire) on 04/05/2023
*/
public class DSpaceControlledVocabularyIndex extends BrowseIndex {
protected DSpaceControlledVocabulary vocabulary;
protected Set<String> metadataFields;
protected DiscoverySearchFilterFacet facetConfig;
public DSpaceControlledVocabularyIndex(DSpaceControlledVocabulary controlledVocabulary, Set<String> metadataFields,
DiscoverySearchFilterFacet facetConfig) {
super(controlledVocabulary.vocabularyName);
this.vocabulary = controlledVocabulary;
this.metadataFields = metadataFields;
this.facetConfig = facetConfig;
}
public DSpaceControlledVocabulary getVocabulary() {
return vocabulary;
}
public Set<String> getMetadataFields() {
return this.metadataFields;
}
public DiscoverySearchFilterFacet getFacetConfig() {
return this.facetConfig;
}
}

View File

@@ -200,8 +200,8 @@ public class SolrAuthority implements ChoiceAuthority {
} }
private String toQuery(String searchField, String text) { private String toQuery(String searchField, String text) {
return searchField + ":(" + text.toLowerCase().replaceAll(":", "\\:") + "*) or " + searchField + ":(" + text return searchField + ":(" + text.toLowerCase().replaceAll(":", "\\\\:") + "*) or " + searchField + ":(" + text
.toLowerCase().replaceAll(":", "\\:") + ")"; .toLowerCase().replaceAll(":", "\\\\:") + ")";
} }
@Override @Override
@@ -225,7 +225,7 @@ public class SolrAuthority implements ChoiceAuthority {
log.debug("requesting label for key " + key + " using locale " + locale); log.debug("requesting label for key " + key + " using locale " + locale);
} }
SolrQuery queryArgs = new SolrQuery(); SolrQuery queryArgs = new SolrQuery();
queryArgs.setQuery("id:" + key); queryArgs.setQuery("id:" + key.replaceAll(":", "\\\\:"));
queryArgs.setRows(1); queryArgs.setRows(1);
QueryResponse searchResponse = getSearchService().search(queryArgs); QueryResponse searchResponse = getSearchService().search(queryArgs);
SolrDocumentList docs = searchResponse.getResults(); SolrDocumentList docs = searchResponse.getResults();

View File

@@ -15,6 +15,7 @@ import org.dspace.content.MetadataValue;
import org.dspace.content.authority.Choice; import org.dspace.content.authority.Choice;
import org.dspace.content.authority.ChoiceAuthority; import org.dspace.content.authority.ChoiceAuthority;
import org.dspace.content.authority.Choices; import org.dspace.content.authority.Choices;
import org.dspace.content.authority.DSpaceControlledVocabularyIndex;
/** /**
* Broker for ChoiceAuthority plugins, and for other information configured * Broker for ChoiceAuthority plugins, and for other information configured
@@ -220,4 +221,7 @@ public interface ChoiceAuthorityService {
* @return the parent Choice object if any * @return the parent Choice object if any
*/ */
public Choice getParentChoice(String authorityName, String vocabularyId, String locale); public Choice getParentChoice(String authorityName, String vocabularyId, String locale);
public DSpaceControlledVocabularyIndex getVocabularyIndex(String nameVocab);
} }

View File

@@ -14,6 +14,7 @@ import java.util.List;
import org.dspace.content.ProcessStatus; import org.dspace.content.ProcessStatus;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.core.GenericDAO; import org.dspace.core.GenericDAO;
import org.dspace.eperson.EPerson;
import org.dspace.scripts.Process; import org.dspace.scripts.Process;
import org.dspace.scripts.ProcessQueryParameterContainer; import org.dspace.scripts.ProcessQueryParameterContainer;
@@ -97,4 +98,26 @@ public interface ProcessDAO extends GenericDAO<Process> {
List<Process> findByStatusAndCreationTimeOlderThan(Context context, List<ProcessStatus> statuses, Date date) List<Process> findByStatusAndCreationTimeOlderThan(Context context, List<ProcessStatus> statuses, Date date)
throws SQLException; throws SQLException;
/**
* Returns a list of all Process objects in the database by the given user.
*
* @param context The relevant DSpace context
* @param user The user to search for
* @param limit The limit for the amount of Processes returned
* @param offset The offset for the Processes to be returned
* @return The list of all Process objects in the Database
* @throws SQLException If something goes wrong
*/
List<Process> findByUser(Context context, EPerson user, int limit, int offset) throws SQLException;
/**
* Count all the processes which is related to the given user.
*
* @param context The relevant DSpace context
* @param user The user to search for
* @return The number of results matching the query
* @throws SQLException If something goes wrong
*/
int countByUser(Context context, EPerson user) throws SQLException;
} }

View File

@@ -24,6 +24,7 @@ import org.dspace.content.ProcessStatus;
import org.dspace.content.dao.ProcessDAO; import org.dspace.content.dao.ProcessDAO;
import org.dspace.core.AbstractHibernateDAO; import org.dspace.core.AbstractHibernateDAO;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.eperson.EPerson;
import org.dspace.scripts.Process; import org.dspace.scripts.Process;
import org.dspace.scripts.ProcessQueryParameterContainer; import org.dspace.scripts.ProcessQueryParameterContainer;
import org.dspace.scripts.Process_; import org.dspace.scripts.Process_;
@@ -168,6 +169,33 @@ public class ProcessDAOImpl extends AbstractHibernateDAO<Process> implements Pro
return list(context, criteriaQuery, false, Process.class, -1, -1); return list(context, criteriaQuery, false, Process.class, -1, -1);
} }
@Override
public List<Process> findByUser(Context context, EPerson user, int limit, int offset) throws SQLException {
CriteriaBuilder criteriaBuilder = getCriteriaBuilder(context);
CriteriaQuery<Process> criteriaQuery = getCriteriaQuery(criteriaBuilder, Process.class);
Root<Process> processRoot = criteriaQuery.from(Process.class);
criteriaQuery.select(processRoot);
criteriaQuery.where(criteriaBuilder.equal(processRoot.get(Process_.E_PERSON), user));
List<javax.persistence.criteria.Order> orderList = new LinkedList<>();
orderList.add(criteriaBuilder.desc(processRoot.get(Process_.PROCESS_ID)));
criteriaQuery.orderBy(orderList);
return list(context, criteriaQuery, false, Process.class, limit, offset);
}
@Override
public int countByUser(Context context, EPerson user) throws SQLException {
CriteriaBuilder criteriaBuilder = getCriteriaBuilder(context);
CriteriaQuery<Process> criteriaQuery = getCriteriaQuery(criteriaBuilder, Process.class);
Root<Process> processRoot = criteriaQuery.from(Process.class);
criteriaQuery.select(processRoot);
criteriaQuery.where(criteriaBuilder.equal(processRoot.get(Process_.E_PERSON), user));
return count(context, criteriaQuery, criteriaBuilder, processRoot);
}
} }

View File

@@ -456,5 +456,19 @@ public interface CollectionService
public int countCollectionsWithSubmit(String q, Context context, Community community, String entityType) public int countCollectionsWithSubmit(String q, Context context, Community community, String entityType)
throws SQLException, SearchServiceException; throws SQLException, SearchServiceException;
/**
* Returns a list of all collections for a specific entity type.
* NOTE: for better performance, this method retrieves its results from an index (cache)
* and does not query the database directly.
* This means that results may be stale or outdated until
* https://github.com/DSpace/DSpace/issues/2853 is resolved."
*
* @param context DSpace Context
* @param entityType limit the returned collection to those related to given entity type
* @return list of collections found
* @throws SearchServiceException if search error
*/
public List<Collection> findAllCollectionsByEntityType(Context context, String entityType)
throws SearchServiceException;
int countArchivedItem(Collection collection) throws ItemCountException; int countArchivedItem(Collection collection) throws ItemCountException;
} }

View File

@@ -8,12 +8,15 @@
package org.dspace.curate; package org.dspace.curate;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService; import org.dspace.content.DSpaceObject;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.handle.factory.HandleServiceFactory;
import org.dspace.handle.service.HandleService;
import org.dspace.scripts.DSpaceCommandLineParameter;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link Curation} script * The {@link ScriptConfiguration} for the {@link Curation} script
@@ -22,9 +25,6 @@ import org.springframework.beans.factory.annotation.Autowired;
*/ */
public class CurationScriptConfiguration<T extends Curation> extends ScriptConfiguration<T> { public class CurationScriptConfiguration<T extends Curation> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override @Override
@@ -38,16 +38,37 @@ public class CurationScriptConfiguration<T extends Curation> extends ScriptConfi
} }
/** /**
* Only admin can run Curation script via the scripts and processes endpoints. * Only repository admins or admins of the target object can run Curation script via the scripts
* and processes endpoints.
*
* @param context The relevant DSpace context * @param context The relevant DSpace context
* @return True if currentUser is admin, otherwise false * @param commandLineParameters the parameters that will be used to start the process if known,
* <code>null</code> otherwise
* @return true if the currentUser is allowed to run the script with the specified parameters or
* at least in some case if the parameters are not yet known
*/ */
@Override @Override
public boolean isAllowedToExecute(Context context) { public boolean isAllowedToExecute(Context context, List<DSpaceCommandLineParameter> commandLineParameters) {
try { try {
if (commandLineParameters == null) {
return authorizeService.isAdmin(context) || authorizeService.isComColAdmin(context)
|| authorizeService.isItemAdmin(context);
} else if (commandLineParameters.stream()
.map(DSpaceCommandLineParameter::getName)
.noneMatch("-i"::equals)) {
return authorizeService.isAdmin(context); return authorizeService.isAdmin(context);
} else {
String dspaceObjectID = commandLineParameters.stream()
.filter(parameter -> "-i".equals(parameter.getName()))
.map(DSpaceCommandLineParameter::getValue)
.findFirst()
.get();
HandleService handleService = HandleServiceFactory.getInstance().getHandleService();
DSpaceObject dso = handleService.resolveToObject(context, dspaceObjectID);
return authorizeService.isAdmin(context, dso);
}
} catch (SQLException e) { } catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e); throw new RuntimeException(e);
} }
} }

View File

@@ -7,22 +7,14 @@
*/ */
package org.dspace.discovery; package org.dspace.discovery;
import java.sql.SQLException;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link IndexClient} script * The {@link ScriptConfiguration} for the {@link IndexClient} script
*/ */
public class IndexDiscoveryScriptConfiguration<T extends IndexClient> extends ScriptConfiguration<T> { public class IndexDiscoveryScriptConfiguration<T extends IndexClient> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override @Override
@@ -30,15 +22,6 @@ public class IndexDiscoveryScriptConfiguration<T extends IndexClient> extends Sc
return dspaceRunnableClass; return dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (options == null) { if (options == null) {

View File

@@ -18,6 +18,9 @@ import org.dspace.content.Collection;
import org.dspace.content.DSpaceObject; import org.dspace.content.DSpaceObject;
import org.dspace.content.Item; import org.dspace.content.Item;
import org.dspace.content.WorkspaceItem; import org.dspace.content.WorkspaceItem;
import org.dspace.content.factory.ContentServiceFactory;
import org.dspace.content.service.DSpaceObjectService;
import org.dspace.core.Context;
import org.dspace.discovery.configuration.DiscoveryConfiguration; import org.dspace.discovery.configuration.DiscoveryConfiguration;
import org.dspace.discovery.configuration.DiscoveryConfigurationService; import org.dspace.discovery.configuration.DiscoveryConfigurationService;
import org.dspace.discovery.utils.DiscoverQueryBuilder; import org.dspace.discovery.utils.DiscoverQueryBuilder;
@@ -73,32 +76,77 @@ public class SearchUtils {
searchService = null; searchService = null;
} }
/**
* Retrieves the Discovery Configuration for a null context, prefix and DSpace object.
* This will result in returning the default configuration
* @return the default configuration
*/
public static DiscoveryConfiguration getDiscoveryConfiguration() { public static DiscoveryConfiguration getDiscoveryConfiguration() {
return getDiscoveryConfiguration(null, null); return getDiscoveryConfiguration(null, null, null);
} }
public static DiscoveryConfiguration getDiscoveryConfiguration(DSpaceObject dso) { /**
return getDiscoveryConfiguration(null, dso); * Retrieves the Discovery Configuration with a null prefix for a DSpace object.
* @param context
* the dabase context
* @param dso
* the DSpace object
* @return the Discovery Configuration for the specified DSpace object
*/
public static DiscoveryConfiguration getDiscoveryConfiguration(Context context, DSpaceObject dso) {
return getDiscoveryConfiguration(context, null, dso);
} }
/** /**
* Return the discovery configuration to use in a specific scope for the king of search identified by the prefix. A * Return the discovery configuration to use in a specific scope for the king of search identified by the prefix. A
* null prefix mean the normal query, other predefined values are workspace or workflow * null prefix mean the normal query, other predefined values are workspace or workflow
* *
*
* @param context
* the database context
* @param prefix * @param prefix
* the namespace of the configuration to lookup if any * the namespace of the configuration to lookup if any
* @param dso * @param dso
* the DSpaceObject * the DSpaceObject
* @return the discovery configuration for the specified scope * @return the discovery configuration for the specified scope
*/ */
public static DiscoveryConfiguration getDiscoveryConfiguration(String prefix, DSpaceObject dso) { public static DiscoveryConfiguration getDiscoveryConfiguration(Context context, String prefix,
DSpaceObject dso) {
if (prefix != null) { if (prefix != null) {
return getDiscoveryConfigurationByName(dso != null ? prefix + "." + dso.getHandle() : prefix); return getDiscoveryConfigurationByName(dso != null ? prefix + "." + dso.getHandle() : prefix);
} else { } else {
return getDiscoveryConfigurationByName(dso != null ? dso.getHandle() : null); return getDiscoveryConfigurationByDSO(context, dso);
} }
} }
/**
* Retrieve the configuration for the current dspace object and all its parents and add it to the provided set
* @param context - The database context
* @param configurations - The set of configurations to add the retrieved configurations to
* @param prefix - The namespace of the configuration to lookup if any
* @param dso - The DSpace Object
* @return the set of configurations with additional retrieved ones for the dspace object and parents
* @throws SQLException
*/
public static Set<DiscoveryConfiguration> addDiscoveryConfigurationForParents(
Context context, Set<DiscoveryConfiguration> configurations, String prefix, DSpaceObject dso)
throws SQLException {
if (dso == null) {
configurations.add(getDiscoveryConfigurationByName(null));
return configurations;
}
if (prefix != null) {
configurations.add(getDiscoveryConfigurationByName(prefix + "." + dso.getHandle()));
} else {
configurations.add(getDiscoveryConfigurationByName(dso.getHandle()));
}
DSpaceObjectService<DSpaceObject> dSpaceObjectService = ContentServiceFactory.getInstance()
.getDSpaceObjectService(dso);
DSpaceObject parentObject = dSpaceObjectService.getParentObject(context, dso);
return addDiscoveryConfigurationForParents(context, configurations, prefix, parentObject);
}
/** /**
* Return the discovery configuration identified by the specified name * Return the discovery configuration identified by the specified name
* *
@@ -113,6 +161,18 @@ public class SearchUtils {
return configurationService.getDiscoveryConfiguration(configurationName); return configurationService.getDiscoveryConfiguration(configurationName);
} }
/**
* Return the discovery configuration for the provided DSO
* @param context - The database context
* @param dso - The DSpace object to retrieve the configuration for
* @return the discovery configuration for the provided DSO
*/
public static DiscoveryConfiguration getDiscoveryConfigurationByDSO(
Context context, DSpaceObject dso) {
DiscoveryConfigurationService configurationService = getConfigurationService();
return configurationService.getDiscoveryDSOConfiguration(context, dso);
}
public static DiscoveryConfigurationService getConfigurationService() { public static DiscoveryConfigurationService getConfigurationService() {
ServiceManager manager = DSpaceServicesFactory.getInstance().getServiceManager(); ServiceManager manager = DSpaceServicesFactory.getInstance().getServiceManager();
return manager return manager
@@ -127,47 +187,55 @@ public class SearchUtils {
* Method that retrieves a list of all the configuration objects from the given item * Method that retrieves a list of all the configuration objects from the given item
* A configuration object can be returned for each parent community/collection * A configuration object can be returned for each parent community/collection
* *
* @param context the database context
* @param item the DSpace item * @param item the DSpace item
* @return a list of configuration objects * @return a list of configuration objects
* @throws SQLException An exception that provides information on a database access error or other errors. * @throws SQLException An exception that provides information on a database access error or other errors.
*/ */
public static List<DiscoveryConfiguration> getAllDiscoveryConfigurations(Item item) throws SQLException { public static List<DiscoveryConfiguration> getAllDiscoveryConfigurations(Context context, Item item)
throws SQLException {
List<Collection> collections = item.getCollections(); List<Collection> collections = item.getCollections();
return getAllDiscoveryConfigurations(null, collections, item); return getAllDiscoveryConfigurations(context, null, collections, item);
} }
/** /**
* Return all the discovery configuration applicable to the provided workspace item * Return all the discovery configuration applicable to the provided workspace item
*
* @param context
* @param witem a workspace item * @param witem a workspace item
* @return a list of discovery configuration * @return a list of discovery configuration
* @throws SQLException * @throws SQLException
*/ */
public static List<DiscoveryConfiguration> getAllDiscoveryConfigurations(WorkspaceItem witem) throws SQLException { public static List<DiscoveryConfiguration> getAllDiscoveryConfigurations(final Context context,
WorkspaceItem witem) throws SQLException {
List<Collection> collections = new ArrayList<Collection>(); List<Collection> collections = new ArrayList<Collection>();
collections.add(witem.getCollection()); collections.add(witem.getCollection());
return getAllDiscoveryConfigurations("workspace", collections, witem.getItem()); return getAllDiscoveryConfigurations(context, "workspace", collections, witem.getItem());
} }
/** /**
* Return all the discovery configuration applicable to the provided workflow item * Return all the discovery configuration applicable to the provided workflow item
*
* @param context
* @param witem a workflow item * @param witem a workflow item
* @return a list of discovery configuration * @return a list of discovery configuration
* @throws SQLException * @throws SQLException
*/ */
public static List<DiscoveryConfiguration> getAllDiscoveryConfigurations(WorkflowItem witem) throws SQLException { public static List<DiscoveryConfiguration> getAllDiscoveryConfigurations(final Context context,
WorkflowItem witem) throws SQLException {
List<Collection> collections = new ArrayList<Collection>(); List<Collection> collections = new ArrayList<Collection>();
collections.add(witem.getCollection()); collections.add(witem.getCollection());
return getAllDiscoveryConfigurations("workflow", collections, witem.getItem()); return getAllDiscoveryConfigurations(context, "workflow", collections, witem.getItem());
} }
private static List<DiscoveryConfiguration> getAllDiscoveryConfigurations(String prefix, private static List<DiscoveryConfiguration> getAllDiscoveryConfigurations(final Context context,
String prefix,
List<Collection> collections, Item item) List<Collection> collections, Item item)
throws SQLException { throws SQLException {
Set<DiscoveryConfiguration> result = new HashSet<>(); Set<DiscoveryConfiguration> result = new HashSet<>();
for (Collection collection : collections) { for (Collection collection : collections) {
DiscoveryConfiguration configuration = getDiscoveryConfiguration(prefix, collection); addDiscoveryConfigurationForParents(context, result, prefix, collection);
result.add(configuration);
} }
//Add alwaysIndex configurations //Add alwaysIndex configurations

View File

@@ -53,10 +53,20 @@ public class SolrServiceFileInfoPlugin implements SolrServiceIndexPlugin {
if (bitstreams != null) { if (bitstreams != null) {
for (Bitstream bitstream : bitstreams) { for (Bitstream bitstream : bitstreams) {
document.addField(SOLR_FIELD_NAME_FOR_FILENAMES, bitstream.getName()); document.addField(SOLR_FIELD_NAME_FOR_FILENAMES, bitstream.getName());
// Add _keyword and _filter fields which are necessary to support filtering and faceting
// for the file names
document.addField(SOLR_FIELD_NAME_FOR_FILENAMES + "_keyword", bitstream.getName());
document.addField(SOLR_FIELD_NAME_FOR_FILENAMES + "_filter", bitstream.getName());
String description = bitstream.getDescription(); String description = bitstream.getDescription();
if ((description != null) && !description.isEmpty()) { if ((description != null) && !description.isEmpty()) {
document.addField(SOLR_FIELD_NAME_FOR_DESCRIPTIONS, description); document.addField(SOLR_FIELD_NAME_FOR_DESCRIPTIONS, description);
// Add _keyword and _filter fields which are necessary to support filtering and
// faceting for the descriptions
document.addField(SOLR_FIELD_NAME_FOR_DESCRIPTIONS + "_keyword",
description);
document.addField(SOLR_FIELD_NAME_FOR_DESCRIPTIONS + "_filter",
description);
} }
} }
} }

View File

@@ -7,12 +7,23 @@
*/ */
package org.dspace.discovery.configuration; package org.dspace.discovery.configuration;
import java.sql.SQLException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.DSpaceObject;
import org.dspace.content.factory.ContentServiceFactory;
import org.dspace.content.service.DSpaceObjectService;
import org.dspace.core.Context;
import org.dspace.discovery.IndexableObject; import org.dspace.discovery.IndexableObject;
import org.dspace.discovery.indexobject.IndexableDSpaceObject; import org.dspace.discovery.indexobject.IndexableDSpaceObject;
import org.dspace.services.factory.DSpaceServicesFactory; import org.dspace.services.factory.DSpaceServicesFactory;
@@ -22,9 +33,18 @@ import org.dspace.services.factory.DSpaceServicesFactory;
*/ */
public class DiscoveryConfigurationService { public class DiscoveryConfigurationService {
private static final Logger log = LogManager.getLogger();
private Map<String, DiscoveryConfiguration> map; private Map<String, DiscoveryConfiguration> map;
private Map<Integer, List<String>> toIgnoreMetadataFields = new HashMap<>(); private Map<Integer, List<String>> toIgnoreMetadataFields = new HashMap<>();
/**
* Discovery configurations, cached by Community/Collection UUID. When a Community or Collection does not have its
* own configuration, we take the one of the first parent that does.
* This cache ensures we do not have to go up the hierarchy every time.
*/
private final Map<UUID, DiscoveryConfiguration> comColToDiscoveryConfigurationMap = new ConcurrentHashMap<>();
public Map<String, DiscoveryConfiguration> getMap() { public Map<String, DiscoveryConfiguration> getMap() {
return map; return map;
} }
@@ -41,25 +61,98 @@ public class DiscoveryConfigurationService {
this.toIgnoreMetadataFields = toIgnoreMetadataFields; this.toIgnoreMetadataFields = toIgnoreMetadataFields;
} }
public DiscoveryConfiguration getDiscoveryConfiguration(IndexableObject dso) { /**
* Retrieve the discovery configuration for the provided IndexableObject. When a DSpace Object can be retrieved from
* the IndexableObject, the discovery configuration will be returned for the DSpace Object. Otherwise, a check will
* be done to look for the unique index ID of the IndexableObject. When the IndexableObject is null, the default
* configuration will be retrieved
*
* When no direct match is found, the parent object will
* be checked until there is no parent left, in which case the "default" configuration will be returned.
* @param context - The database context
* @param indexableObject - The IndexableObject to retrieve the configuration for
* @return the discovery configuration for the provided IndexableObject.
*/
public DiscoveryConfiguration getDiscoveryConfiguration(Context context, IndexableObject indexableObject) {
String name; String name;
if (dso == null) { if (indexableObject == null) {
name = "default"; return getDiscoveryConfiguration(null);
} else if (dso instanceof IndexableDSpaceObject) { } else if (indexableObject instanceof IndexableDSpaceObject) {
name = ((IndexableDSpaceObject) dso).getIndexedObject().getHandle(); return getDiscoveryDSOConfiguration(context, ((IndexableDSpaceObject) indexableObject).getIndexedObject());
} else { } else {
name = dso.getUniqueIndexID(); name = indexableObject.getUniqueIndexID();
} }
return getDiscoveryConfiguration(name); return getDiscoveryConfiguration(name);
} }
public DiscoveryConfiguration getDiscoveryConfiguration(final String name) { /**
* Retrieve the discovery configuration for the provided DSO. When no direct match is found, the parent object will
* be checked until there is no parent left, in which case the "default" configuration will be returned.
* @param context - The database context
* @param dso - The DSpace object to retrieve the configuration for
* @return the discovery configuration for the provided DSO.
*/
public DiscoveryConfiguration getDiscoveryDSOConfiguration(final Context context, DSpaceObject dso) {
// Fall back to default configuration
if (dso == null) {
return getDiscoveryConfiguration(null, true);
}
// Attempt to retrieve cached configuration by UUID
if (comColToDiscoveryConfigurationMap.containsKey(dso.getID())) {
return comColToDiscoveryConfigurationMap.get(dso.getID());
}
DiscoveryConfiguration configuration;
// Attempt to retrieve configuration by DSO handle
configuration = getDiscoveryConfiguration(dso.getHandle(), false);
if (configuration == null) {
// Recurse up the Comm/Coll hierarchy until a configuration is found
DSpaceObjectService<DSpaceObject> dSpaceObjectService =
ContentServiceFactory.getInstance().getDSpaceObjectService(dso);
DSpaceObject parentObject = null;
try {
parentObject = dSpaceObjectService.getParentObject(context, dso);
} catch (SQLException e) {
log.error(e);
}
configuration = getDiscoveryDSOConfiguration(context, parentObject);
}
// Cache the resulting configuration when the DSO is a Community or Collection
if (dso instanceof Community || dso instanceof Collection) {
comColToDiscoveryConfigurationMap.put(dso.getID(), configuration);
}
return configuration;
}
/**
* Retrieve the Discovery Configuration for the provided name. When no configuration can be found for the name, the
* default configuration will be returned.
* @param name - The name of the configuration to be retrieved
* @return the Discovery Configuration for the provided name, or default when none was found.
*/
public DiscoveryConfiguration getDiscoveryConfiguration(String name) {
return getDiscoveryConfiguration(name, true);
}
/**
* Retrieve the configuration for the provided name. When useDefault is set to true, the "default" configuration
* will be returned when no match is found. When useDefault is set to false, null will be returned when no match is
* found.
* @param name - The name of the configuration to retrieve
* @param useDefault - Whether the default configuration should be used when no match is found
* @return the configuration for the provided name
*/
public DiscoveryConfiguration getDiscoveryConfiguration(final String name, boolean useDefault) {
DiscoveryConfiguration result; DiscoveryConfiguration result;
result = StringUtils.isBlank(name) ? null : getMap().get(name); result = StringUtils.isBlank(name) ? null : getMap().get(name);
if (result == null) { if (result == null && useDefault) {
//No specific configuration, get the default one //No specific configuration, get the default one
result = getMap().get("default"); result = getMap().get("default");
} }
@@ -67,12 +160,23 @@ public class DiscoveryConfigurationService {
return result; return result;
} }
public DiscoveryConfiguration getDiscoveryConfigurationByNameOrDso(final String configurationName, /**
final IndexableObject dso) { * Retrieve the Discovery configuration for the provided name or IndexableObject. The configuration will first be
* checked for the provided name. When no match is found for the name, the configuration will be retrieved for the
* IndexableObject
*
* @param context - The database context
* @param configurationName - The name of the configuration to be retrieved
* @param indexableObject - The indexable object to retrieve the configuration for
* @return the Discovery configuration for the provided name, or when not found for the provided IndexableObject
*/
public DiscoveryConfiguration getDiscoveryConfigurationByNameOrIndexableObject(Context context,
String configurationName,
IndexableObject indexableObject) {
if (StringUtils.isNotBlank(configurationName) && getMap().containsKey(configurationName)) { if (StringUtils.isNotBlank(configurationName) && getMap().containsKey(configurationName)) {
return getMap().get(configurationName); return getMap().get(configurationName);
} else { } else {
return getDiscoveryConfiguration(dso); return getDiscoveryConfiguration(context, indexableObject);
} }
} }
@@ -92,6 +196,18 @@ public class DiscoveryConfigurationService {
return configs; return configs;
} }
/**
* @return All configurations for {@link org.dspace.discovery.configuration.DiscoverySearchFilterFacet}
*/
public List<DiscoverySearchFilterFacet> getAllFacetsConfig() {
List<DiscoverySearchFilterFacet> configs = new ArrayList<>();
for (String key : map.keySet()) {
DiscoveryConfiguration config = map.get(key);
configs.addAll(config.getSidebarFacets());
}
return configs;
}
public static void main(String[] args) { public static void main(String[] args) {
System.out.println(DSpaceServicesFactory.getInstance().getServiceManager().getServicesNames().size()); System.out.println(DSpaceServicesFactory.getInstance().getServiceManager().getServicesNames().size());
DiscoveryConfigurationService mainService = DSpaceServicesFactory.getInstance().getServiceManager() DiscoveryConfigurationService mainService = DSpaceServicesFactory.getInstance().getServiceManager()

View File

@@ -9,6 +9,7 @@ package org.dspace.discovery.configuration;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import javax.annotation.Nullable;
import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
@@ -22,6 +23,11 @@ public class DiscoverySortConfiguration {
private List<DiscoverySortFieldConfiguration> sortFields = new ArrayList<DiscoverySortFieldConfiguration>(); private List<DiscoverySortFieldConfiguration> sortFields = new ArrayList<DiscoverySortFieldConfiguration>();
/**
* Default sort configuration to use when needed
*/
@Nullable private DiscoverySortFieldConfiguration defaultSortField;
public List<DiscoverySortFieldConfiguration> getSortFields() { public List<DiscoverySortFieldConfiguration> getSortFields() {
return sortFields; return sortFields;
} }
@@ -30,6 +36,14 @@ public class DiscoverySortConfiguration {
this.sortFields = sortFields; this.sortFields = sortFields;
} }
public DiscoverySortFieldConfiguration getDefaultSortField() {
return defaultSortField;
}
public void setDefaultSortField(DiscoverySortFieldConfiguration configuration) {
this.defaultSortField = configuration;
}
public DiscoverySortFieldConfiguration getSortFieldConfiguration(String sortField) { public DiscoverySortFieldConfiguration getSortFieldConfiguration(String sortField) {
if (StringUtils.isBlank(sortField)) { if (StringUtils.isBlank(sortField)) {
return null; return null;

View File

@@ -86,7 +86,7 @@ public class CollectionIndexFactoryImpl extends DSpaceObjectIndexFactoryImpl<Ind
final Collection collection = indexableCollection.getIndexedObject(); final Collection collection = indexableCollection.getIndexedObject();
// Retrieve configuration // Retrieve configuration
DiscoveryConfiguration discoveryConfiguration = SearchUtils.getDiscoveryConfiguration(collection); DiscoveryConfiguration discoveryConfiguration = SearchUtils.getDiscoveryConfiguration(context, collection);
DiscoveryHitHighlightingConfiguration highlightingConfiguration = discoveryConfiguration DiscoveryHitHighlightingConfiguration highlightingConfiguration = discoveryConfiguration
.getHitHighlightingConfiguration(); .getHitHighlightingConfiguration();
List<String> highlightedMetadataFields = new ArrayList<>(); List<String> highlightedMetadataFields = new ArrayList<>();

View File

@@ -69,7 +69,7 @@ public class CommunityIndexFactoryImpl extends DSpaceObjectIndexFactoryImpl<Inde
final Community community = indexableObject.getIndexedObject(); final Community community = indexableObject.getIndexedObject();
// Retrieve configuration // Retrieve configuration
DiscoveryConfiguration discoveryConfiguration = SearchUtils.getDiscoveryConfiguration(community); DiscoveryConfiguration discoveryConfiguration = SearchUtils.getDiscoveryConfiguration(context, community);
DiscoveryHitHighlightingConfiguration highlightingConfiguration = discoveryConfiguration DiscoveryHitHighlightingConfiguration highlightingConfiguration = discoveryConfiguration
.getHitHighlightingConfiguration(); .getHitHighlightingConfiguration();
List<String> highlightedMetadataFields = new ArrayList<>(); List<String> highlightedMetadataFields = new ArrayList<>();

View File

@@ -80,11 +80,13 @@ public abstract class InprogressSubmissionIndexFactoryImpl
// Add item metadata // Add item metadata
List<DiscoveryConfiguration> discoveryConfigurations; List<DiscoveryConfiguration> discoveryConfigurations;
if (inProgressSubmission instanceof WorkflowItem) { if (inProgressSubmission instanceof WorkflowItem) {
discoveryConfigurations = SearchUtils.getAllDiscoveryConfigurations((WorkflowItem) inProgressSubmission); discoveryConfigurations = SearchUtils.getAllDiscoveryConfigurations(context,
(WorkflowItem) inProgressSubmission);
} else if (inProgressSubmission instanceof WorkspaceItem) { } else if (inProgressSubmission instanceof WorkspaceItem) {
discoveryConfigurations = SearchUtils.getAllDiscoveryConfigurations((WorkspaceItem) inProgressSubmission); discoveryConfigurations = SearchUtils.getAllDiscoveryConfigurations(context,
(WorkspaceItem) inProgressSubmission);
} else { } else {
discoveryConfigurations = SearchUtils.getAllDiscoveryConfigurations(item); discoveryConfigurations = SearchUtils.getAllDiscoveryConfigurations(context, item);
} }
indexableItemService.addDiscoveryFields(doc, context, item, discoveryConfigurations); indexableItemService.addDiscoveryFields(doc, context, item, discoveryConfigurations);
indexableCollectionService.storeCommunityCollectionLocations(doc, locations); indexableCollectionService.storeCommunityCollectionLocations(doc, locations);

View File

@@ -31,6 +31,7 @@ import org.apache.commons.lang3.time.DateFormatUtils;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.SolrInputDocument;
import org.dspace.authority.service.AuthorityValueService;
import org.dspace.content.Collection; import org.dspace.content.Collection;
import org.dspace.content.Community; import org.dspace.content.Community;
import org.dspace.content.Item; import org.dspace.content.Item;
@@ -93,6 +94,8 @@ public class ItemIndexFactoryImpl extends DSpaceObjectIndexFactoryImpl<Indexable
protected ItemService itemService; protected ItemService itemService;
@Autowired(required = true) @Autowired(required = true)
protected ChoiceAuthorityService choiceAuthorityService; protected ChoiceAuthorityService choiceAuthorityService;
@Autowired(required = true)
protected AuthorityValueService authorityValueService;
@Autowired @Autowired
protected MetadataAuthorityService metadataAuthorityService; protected MetadataAuthorityService metadataAuthorityService;
@Autowired @Autowired
@@ -157,7 +160,7 @@ public class ItemIndexFactoryImpl extends DSpaceObjectIndexFactoryImpl<Indexable
} }
// Add the item metadata // Add the item metadata
List<DiscoveryConfiguration> discoveryConfigurations = SearchUtils.getAllDiscoveryConfigurations(item); List<DiscoveryConfiguration> discoveryConfigurations = SearchUtils.getAllDiscoveryConfigurations(context, item);
addDiscoveryFields(doc, context, indexableItem.getIndexedObject(), discoveryConfigurations); addDiscoveryFields(doc, context, indexableItem.getIndexedObject(), discoveryConfigurations);
//mandatory facet to show status on mydspace //mandatory facet to show status on mydspace
@@ -415,7 +418,7 @@ public class ItemIndexFactoryImpl extends DSpaceObjectIndexFactoryImpl<Indexable
Boolean.FALSE), Boolean.FALSE),
true); true);
if (!ignorePrefered) { if (!ignorePrefered && !authority.startsWith(AuthorityValueService.GENERATE)) {
try { try {
preferedLabel = choiceAuthorityService.getLabel(meta, collection, meta.getLanguage()); preferedLabel = choiceAuthorityService.getLabel(meta, collection, meta.getLanguage());
} catch (Exception e) { } catch (Exception e) {

View File

@@ -64,6 +64,7 @@ public class MetadataFieldIndexFactoryImpl extends IndexFactoryImpl<IndexableMet
Group anonymousGroup = groupService.findByName(context, Group.ANONYMOUS); Group anonymousGroup = groupService.findByName(context, Group.ANONYMOUS);
// add read permission on doc for anonymous group // add read permission on doc for anonymous group
doc.addField("read", "g" + anonymousGroup.getID()); doc.addField("read", "g" + anonymousGroup.getID());
doc.addField(FIELD_NAME_VARIATIONS + "_sort", fieldName);
return doc; return doc;
} }

View File

@@ -332,7 +332,9 @@ public class DiscoverQueryBuilder implements InitializingBean {
} }
private String getDefaultSortDirection(DiscoverySortConfiguration searchSortConfiguration, String sortOrder) { private String getDefaultSortDirection(DiscoverySortConfiguration searchSortConfiguration, String sortOrder) {
if (Objects.nonNull(searchSortConfiguration.getSortFields()) && if (searchSortConfiguration.getDefaultSortField() != null) {
sortOrder = searchSortConfiguration.getDefaultSortField().getDefaultSortOrder().name();
} else if (Objects.nonNull(searchSortConfiguration.getSortFields()) &&
!searchSortConfiguration.getSortFields().isEmpty()) { !searchSortConfiguration.getSortFields().isEmpty()) {
sortOrder = searchSortConfiguration.getSortFields().get(0).getDefaultSortOrder().name(); sortOrder = searchSortConfiguration.getSortFields().get(0).getDefaultSortOrder().name();
} }
@@ -342,7 +344,9 @@ public class DiscoverQueryBuilder implements InitializingBean {
private String getDefaultSortField(DiscoverySortConfiguration searchSortConfiguration) { private String getDefaultSortField(DiscoverySortConfiguration searchSortConfiguration) {
String sortBy;// Attempt to find the default one, if none found we use SCORE String sortBy;// Attempt to find the default one, if none found we use SCORE
sortBy = "score"; sortBy = "score";
if (Objects.nonNull(searchSortConfiguration.getSortFields()) && if (searchSortConfiguration.getDefaultSortField() != null) {
sortBy = searchSortConfiguration.getDefaultSortField().getMetadataField();
} else if (Objects.nonNull(searchSortConfiguration.getSortFields()) &&
!searchSortConfiguration.getSortFields().isEmpty()) { !searchSortConfiguration.getSortFields().isEmpty()) {
DiscoverySortFieldConfiguration defaultSort = searchSortConfiguration.getSortFields().get(0); DiscoverySortFieldConfiguration defaultSort = searchSortConfiguration.getSortFields().get(0);
if (StringUtils.isBlank(defaultSort.getMetadataField())) { if (StringUtils.isBlank(defaultSort.getMetadataField())) {

View File

@@ -353,8 +353,6 @@ public class GroupServiceImpl extends DSpaceObjectServiceImpl<Group> implements
List<Group2GroupCache> groupCache = group2GroupCacheDAO.findByChildren(context, groups); List<Group2GroupCache> groupCache = group2GroupCacheDAO.findByChildren(context, groups);
// now we have all owning groups, also grab all parents of owning groups // now we have all owning groups, also grab all parents of owning groups
// yes, I know this could have been done as one big query and a union,
// but doing the Oracle port taught me to keep to simple SQL!
for (Group2GroupCache group2GroupCache : groupCache) { for (Group2GroupCache group2GroupCache : groupCache) {
groups.add(group2GroupCache.getParent()); groups.add(group2GroupCache.getParent());
} }

View File

@@ -27,13 +27,14 @@ import org.dspace.versioning.Version;
import org.dspace.versioning.VersionHistory; import org.dspace.versioning.VersionHistory;
import org.dspace.versioning.service.VersionHistoryService; import org.dspace.versioning.service.VersionHistoryService;
import org.dspace.versioning.service.VersioningService; import org.dspace.versioning.service.VersioningService;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
/** /**
* @author Marsa Haoua * @author Marsa Haoua
* @author Pascal-Nicolas Becker (dspace at pascal dash becker dot de) * @author Pascal-Nicolas Becker (dspace at pascal dash becker dot de)
*/ */
public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider { public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider implements InitializingBean {
/** /**
* log4j category * log4j category
*/ */
@@ -49,6 +50,19 @@ public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider {
@Autowired(required = true) @Autowired(required = true)
protected VersionHistoryService versionHistoryService; protected VersionHistoryService versionHistoryService;
/**
* After all the properties are set check that the versioning is enabled
*
* @throws Exception throws an exception if this isn't the case
*/
@Override
public void afterPropertiesSet() throws Exception {
if (!configurationService.getBooleanProperty("versioning.enabled", true)) {
throw new RuntimeException("the " + VersionedDOIIdentifierProvider.class.getName() +
" is enabled, but the versioning is disabled.");
}
}
@Override @Override
public String mint(Context context, DSpaceObject dso) throws IdentifierException { public String mint(Context context, DSpaceObject dso) throws IdentifierException {
return mint(context, dso, this.filter); return mint(context, dso, this.filter);
@@ -66,7 +80,7 @@ public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider {
try { try {
history = versionHistoryService.findByItem(context, item); history = versionHistoryService.findByItem(context, item);
} catch (SQLException ex) { } catch (SQLException ex) {
throw new RuntimeException("A problem occured while accessing the database.", ex); throw new RuntimeException("A problem occurred while accessing the database.", ex);
} }
String doi = null; String doi = null;
@@ -76,7 +90,7 @@ public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider {
return doi; return doi;
} }
} catch (SQLException ex) { } catch (SQLException ex) {
log.error("Error while attemping to retrieve information about a DOI for " log.error("Error while attempting to retrieve information about a DOI for "
+ contentServiceFactory.getDSpaceObjectService(dso).getTypeText(dso) + contentServiceFactory.getDSpaceObjectService(dso).getTypeText(dso)
+ " with ID " + dso.getID() + ".", ex); + " with ID " + dso.getID() + ".", ex);
throw new RuntimeException("Error while attempting to retrieve " throw new RuntimeException("Error while attempting to retrieve "
@@ -134,7 +148,7 @@ public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider {
if (history != null) { if (history != null) {
// versioning is currently supported for items only // versioning is currently supported for items only
// if we have a history, we have a item // if we have a history, we have a item
doi = makeIdentifierBasedOnHistory(context, dso, history); doi = makeIdentifierBasedOnHistory(context, dso, history, filter);
} else { } else {
doi = loadOrCreateDOI(context, dso, null, filter).getDoi(); doi = loadOrCreateDOI(context, dso, null, filter).getDoi();
} }
@@ -145,7 +159,7 @@ public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider {
log.error("AuthorizationException while creating a new DOI: ", ex); log.error("AuthorizationException while creating a new DOI: ", ex);
throw new IdentifierException(ex); throw new IdentifierException(ex);
} }
return doi; return doi.startsWith(DOI.SCHEME) ? doi : DOI.SCHEME + doi;
} }
@Override @Override
@@ -153,6 +167,21 @@ public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider {
register(context, dso, identifier, this.filter); register(context, dso, identifier, this.filter);
} }
@Override
public String register(Context context, DSpaceObject dso, Filter filter)
throws IdentifierException {
if (!(dso instanceof Item)) {
// DOIs are currently assigned only to Items
return null;
}
String doi = mint(context, dso, filter);
register(context, dso, doi, filter);
return doi;
}
@Override @Override
public void register(Context context, DSpaceObject dso, String identifier, Filter filter) public void register(Context context, DSpaceObject dso, String identifier, Filter filter)
throws IdentifierException { throws IdentifierException {
@@ -162,7 +191,7 @@ public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider {
Item item = (Item) dso; Item item = (Item) dso;
if (StringUtils.isEmpty(identifier)) { if (StringUtils.isEmpty(identifier)) {
identifier = mint(context, dso); identifier = mint(context, dso, filter);
} }
String doiIdentifier = doiService.formatIdentifier(identifier); String doiIdentifier = doiService.formatIdentifier(identifier);
@@ -170,10 +199,10 @@ public class VersionedDOIIdentifierProvider extends DOIIdentifierProvider {
// search DOI in our db // search DOI in our db
try { try {
doi = loadOrCreateDOI(context, dso, doiIdentifier); doi = loadOrCreateDOI(context, dso, doiIdentifier, filter);
} catch (SQLException ex) { } catch (SQLException ex) {
log.error("Error in databse connection: " + ex.getMessage(), ex); log.error("Error in database connection: " + ex.getMessage(), ex);
throw new RuntimeException("Error in database conncetion.", ex); throw new RuntimeException("Error in database connection.", ex);
} }
if (DELETED.equals(doi.getStatus()) || if (DELETED.equals(doi.getStatus()) ||

View File

@@ -35,6 +35,7 @@ import org.dspace.versioning.Version;
import org.dspace.versioning.VersionHistory; import org.dspace.versioning.VersionHistory;
import org.dspace.versioning.service.VersionHistoryService; import org.dspace.versioning.service.VersionHistoryService;
import org.dspace.versioning.service.VersioningService; import org.dspace.versioning.service.VersioningService;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component; import org.springframework.stereotype.Component;
@@ -45,7 +46,7 @@ import org.springframework.stereotype.Component;
* @author Pascal-Nicolas Becker (dspace at pascal dash becker dot de) * @author Pascal-Nicolas Becker (dspace at pascal dash becker dot de)
*/ */
@Component @Component
public class VersionedHandleIdentifierProvider extends IdentifierProvider { public class VersionedHandleIdentifierProvider extends IdentifierProvider implements InitializingBean {
/** /**
* log4j category * log4j category
*/ */
@@ -71,6 +72,19 @@ public class VersionedHandleIdentifierProvider extends IdentifierProvider {
@Autowired(required = true) @Autowired(required = true)
protected ContentServiceFactory contentServiceFactory; protected ContentServiceFactory contentServiceFactory;
/**
* After all the properties are set check that the versioning is enabled
*
* @throws Exception throws an exception if this isn't the case
*/
@Override
public void afterPropertiesSet() throws Exception {
if (!configurationService.getBooleanProperty("versioning.enabled", true)) {
throw new RuntimeException("the " + VersionedHandleIdentifierProvider.class.getName() +
" is enabled, but the versioning is disabled.");
}
}
@Override @Override
public boolean supports(Class<? extends Identifier> identifier) { public boolean supports(Class<? extends Identifier> identifier) {
return Handle.class.isAssignableFrom(identifier); return Handle.class.isAssignableFrom(identifier);

View File

@@ -30,6 +30,7 @@ import org.dspace.versioning.Version;
import org.dspace.versioning.VersionHistory; import org.dspace.versioning.VersionHistory;
import org.dspace.versioning.service.VersionHistoryService; import org.dspace.versioning.service.VersionHistoryService;
import org.dspace.versioning.service.VersioningService; import org.dspace.versioning.service.VersioningService;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component; import org.springframework.stereotype.Component;
@@ -39,7 +40,8 @@ import org.springframework.stereotype.Component;
* @author Ben Bosman (ben at atmire dot com) * @author Ben Bosman (ben at atmire dot com)
*/ */
@Component @Component
public class VersionedHandleIdentifierProviderWithCanonicalHandles extends IdentifierProvider { public class VersionedHandleIdentifierProviderWithCanonicalHandles extends IdentifierProvider
implements InitializingBean {
/** /**
* log4j category * log4j category
*/ */
@@ -65,6 +67,19 @@ public class VersionedHandleIdentifierProviderWithCanonicalHandles extends Ident
@Autowired(required = true) @Autowired(required = true)
private ItemService itemService; private ItemService itemService;
/**
* After all the properties are set check that the versioning is enabled
*
* @throws Exception throws an exception if this isn't the case
*/
@Override
public void afterPropertiesSet() throws Exception {
if (!configurationService.getBooleanProperty("versioning.enabled", true)) {
throw new RuntimeException("the " + VersionedHandleIdentifierProviderWithCanonicalHandles.class.getName() +
" is enabled, but the versioning is disabled.");
}
}
@Override @Override
public boolean supports(Class<? extends Identifier> identifier) { public boolean supports(Class<? extends Identifier> identifier) {
return Handle.class.isAssignableFrom(identifier); return Handle.class.isAssignableFrom(identifier);

View File

@@ -0,0 +1,79 @@
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.importer.external.crossref;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.dspace.importer.external.metadatamapping.contributor.JsonPathMetadataProcessor;
import org.joda.time.LocalDate;
/**
* This class is used for CrossRef's Live-Import to extract
* issued attribute.
* Beans are configured in the crossref-integration.xml file.
*
* @author Francesco Pio Scognamiglio (francescopio.scognamiglio at 4science.com)
*/
public class CrossRefDateMetadataProcessor implements JsonPathMetadataProcessor {
private final static Logger log = LogManager.getLogger();
private String pathToArray;
@Override
public Collection<String> processMetadata(String json) {
JsonNode rootNode = convertStringJsonToJsonNode(json);
Iterator<JsonNode> dates = rootNode.at(pathToArray).iterator();
Collection<String> values = new ArrayList<>();
while (dates.hasNext()) {
JsonNode date = dates.next();
LocalDate issuedDate = null;
SimpleDateFormat issuedDateFormat = null;
if (date.has(0) && date.has(1) && date.has(2)) {
issuedDate = new LocalDate(
date.get(0).numberValue().intValue(),
date.get(1).numberValue().intValue(),
date.get(2).numberValue().intValue());
issuedDateFormat = new SimpleDateFormat("yyyy-MM-dd");
} else if (date.has(0) && date.has(1)) {
issuedDate = new LocalDate().withYear(date.get(0).numberValue().intValue())
.withMonthOfYear(date.get(1).numberValue().intValue());
issuedDateFormat = new SimpleDateFormat("yyyy-MM");
} else if (date.has(0)) {
issuedDate = new LocalDate().withYear(date.get(0).numberValue().intValue());
issuedDateFormat = new SimpleDateFormat("yyyy");
}
values.add(issuedDateFormat.format(issuedDate.toDate()));
}
return values;
}
private JsonNode convertStringJsonToJsonNode(String json) {
ObjectMapper mapper = new ObjectMapper();
JsonNode body = null;
try {
body = mapper.readTree(json);
} catch (JsonProcessingException e) {
log.error("Unable to process json response.", e);
}
return body;
}
public void setPathToArray(String pathToArray) {
this.pathToArray = pathToArray;
}
}

View File

@@ -15,8 +15,8 @@ import java.util.Date;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.dspace.content.DCDate;
import org.dspace.importer.external.metadatamapping.MetadataFieldConfig; import org.dspace.importer.external.metadatamapping.MetadataFieldConfig;
import org.dspace.importer.external.metadatamapping.MetadataFieldMapping; import org.dspace.importer.external.metadatamapping.MetadataFieldMapping;
import org.dspace.importer.external.metadatamapping.MetadatumDTO; import org.dspace.importer.external.metadatamapping.MetadatumDTO;
@@ -107,28 +107,30 @@ public class PubmedDateMetadatumContributor<T> implements MetadataContributor<T>
LinkedList<MetadatumDTO> dayList = (LinkedList<MetadatumDTO>) day.contributeMetadata(t); LinkedList<MetadatumDTO> dayList = (LinkedList<MetadatumDTO>) day.contributeMetadata(t);
for (int i = 0; i < yearList.size(); i++) { for (int i = 0; i < yearList.size(); i++) {
DCDate dcDate = null; String resultDateString = "";
String dateString = ""; String dateString = "";
SimpleDateFormat resultFormatter = null;
if (monthList.size() > i && dayList.size() > i) { if (monthList.size() > i && dayList.size() > i) {
dateString = yearList.get(i).getValue() + "-" + monthList.get(i).getValue() + dateString = yearList.get(i).getValue() + "-" + monthList.get(i).getValue() +
"-" + dayList.get(i).getValue(); "-" + dayList.get(i).getValue();
resultFormatter = new SimpleDateFormat("yyyy-MM-dd");
} else if (monthList.size() > i) { } else if (monthList.size() > i) {
dateString = yearList.get(i).getValue() + "-" + monthList.get(i).getValue(); dateString = yearList.get(i).getValue() + "-" + monthList.get(i).getValue();
resultFormatter = new SimpleDateFormat("yyyy-MM");
} else { } else {
dateString = yearList.get(i).getValue(); dateString = yearList.get(i).getValue();
resultFormatter = new SimpleDateFormat("yyyy");
} }
int j = 0; int j = 0;
// Use the first dcDate that has been formatted (Config should go from most specific to most lenient) // Use the first dcDate that has been formatted (Config should go from most specific to most lenient)
while (j < dateFormatsToAttempt.size()) { while (j < dateFormatsToAttempt.size() && StringUtils.isBlank(resultDateString)) {
String dateFormat = dateFormatsToAttempt.get(j); String dateFormat = dateFormatsToAttempt.get(j);
try { try {
SimpleDateFormat formatter = new SimpleDateFormat(dateFormat); SimpleDateFormat formatter = new SimpleDateFormat(dateFormat);
Date date = formatter.parse(dateString); Date date = formatter.parse(dateString);
dcDate = new DCDate(date); resultDateString = resultFormatter.format(date);
values.add(metadataFieldMapping.toDCValue(field, formatter.format(date)));
break;
} catch (ParseException e) { } catch (ParseException e) {
// Multiple dateformats can be configured, we don't want to print the entire stacktrace every // Multiple dateformats can be configured, we don't want to print the entire stacktrace every
// time one of those formats fails. // time one of those formats fails.
@@ -138,7 +140,9 @@ public class PubmedDateMetadatumContributor<T> implements MetadataContributor<T>
} }
j++; j++;
} }
if (dcDate == null) { if (StringUtils.isNotBlank(resultDateString)) {
values.add(metadataFieldMapping.toDCValue(field, resultDateString));
} else {
log.info( log.info(
"Failed parsing " + dateString + ", check " + "Failed parsing " + dateString + ", check " +
"the configured dataformats in config/spring/api/pubmed-integration.xml"); "the configured dataformats in config/spring/api/pubmed-integration.xml");

View File

@@ -80,7 +80,7 @@ public class OrcidHistory implements ReloadableEntity<Integer> {
* A description of the synchronized resource. * A description of the synchronized resource.
*/ */
@Lob @Lob
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType") @Type(type = "org.hibernate.type.TextType")
@Column(name = "description") @Column(name = "description")
private String description; private String description;
@@ -89,7 +89,7 @@ public class OrcidHistory implements ReloadableEntity<Integer> {
* the owner itself. * the owner itself.
*/ */
@Lob @Lob
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType") @Type(type = "org.hibernate.type.TextType")
@Column(name = "metadata") @Column(name = "metadata")
private String metadata; private String metadata;
@@ -104,7 +104,7 @@ public class OrcidHistory implements ReloadableEntity<Integer> {
* The response message incoming from ORCID. * The response message incoming from ORCID.
*/ */
@Lob @Lob
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType") @Type(type = "org.hibernate.type.TextType")
@Column(name = "response_message") @Column(name = "response_message")
private String responseMessage; private String responseMessage;

View File

@@ -65,7 +65,7 @@ public class OrcidQueue implements ReloadableEntity<Integer> {
* A description of the resource to be synchronized. * A description of the resource to be synchronized.
*/ */
@Lob @Lob
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType") @Type(type = "org.hibernate.type.TextType")
@Column(name = "description") @Column(name = "description")
private String description; private String description;
@@ -89,7 +89,7 @@ public class OrcidQueue implements ReloadableEntity<Integer> {
*/ */
@Lob @Lob
@Column(name = "metadata") @Column(name = "metadata")
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType") @Type(type = "org.hibernate.type.TextType")
private String metadata; private String metadata;
/** /**

View File

@@ -7,13 +7,8 @@
*/ */
package org.dspace.orcid.script; package org.dspace.orcid.script;
import java.sql.SQLException;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* Script configuration for {@link OrcidBulkPush}. * Script configuration for {@link OrcidBulkPush}.
@@ -24,20 +19,8 @@ import org.springframework.beans.factory.annotation.Autowired;
*/ */
public class OrcidBulkPushScriptConfiguration<T extends OrcidBulkPush> extends ScriptConfiguration<T> { public class OrcidBulkPushScriptConfiguration<T extends OrcidBulkPush> extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override
public boolean isAllowedToExecute(Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Class<T> getDspaceRunnableClass() { public Class<T> getDspaceRunnableClass() {
return dspaceRunnableClass; return dspaceRunnableClass;

View File

@@ -71,7 +71,7 @@ public class Process implements ReloadableEntity<Integer> {
private ProcessStatus processStatus; private ProcessStatus processStatus;
@Lob @Lob
@Type(type = "org.dspace.storage.rdbms.hibernate.DatabaseAwareLobType") @Type(type = "org.hibernate.type.TextType")
@Column(name = "parameters") @Column(name = "parameters")
private String parameters; private String parameters;

View File

@@ -129,6 +129,11 @@ public class ProcessServiceImpl implements ProcessService {
return processes; return processes;
} }
@Override
public List<Process> findByUser(Context context, EPerson eperson, int limit, int offset) throws SQLException {
return processDAO.findByUser(context, eperson, limit, offset);
}
@Override @Override
public void start(Context context, Process process) throws SQLException { public void start(Context context, Process process) throws SQLException {
process.setProcessStatus(ProcessStatus.RUNNING); process.setProcessStatus(ProcessStatus.RUNNING);
@@ -311,6 +316,11 @@ public class ProcessServiceImpl implements ProcessService {
return this.processDAO.findByStatusAndCreationTimeOlderThan(context, statuses, date); return this.processDAO.findByStatusAndCreationTimeOlderThan(context, statuses, date);
} }
@Override
public int countByUser(Context context, EPerson user) throws SQLException {
return processDAO.countByUser(context, user);
}
private String formatLogLine(int processId, String scriptName, String output, ProcessLogLevel processLogLevel) { private String formatLogLine(int processId, String scriptName, String output, ProcessLogLevel processLogLevel) {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();

View File

@@ -37,7 +37,7 @@ public class ScriptServiceImpl implements ScriptService {
@Override @Override
public List<ScriptConfiguration> getScriptConfigurations(Context context) { public List<ScriptConfiguration> getScriptConfigurations(Context context) {
return serviceManager.getServicesByType(ScriptConfiguration.class).stream().filter( return serviceManager.getServicesByType(ScriptConfiguration.class).stream().filter(
scriptConfiguration -> scriptConfiguration.isAllowedToExecute(context)) scriptConfiguration -> scriptConfiguration.isAllowedToExecute(context, null))
.sorted(Comparator.comparing(ScriptConfiguration::getName)) .sorted(Comparator.comparing(ScriptConfiguration::getName))
.collect(Collectors.toList()); .collect(Collectors.toList());
} }

View File

@@ -7,17 +7,28 @@
*/ */
package org.dspace.scripts.configuration; package org.dspace.scripts.configuration;
import java.sql.SQLException;
import java.util.List;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.scripts.DSpaceCommandLineParameter;
import org.dspace.scripts.DSpaceRunnable; import org.dspace.scripts.DSpaceRunnable;
import org.springframework.beans.factory.BeanNameAware; import org.springframework.beans.factory.BeanNameAware;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* This class represents an Abstract class that a ScriptConfiguration can inherit to further implement this * This class represents an Abstract class that a ScriptConfiguration can inherit to further implement this
* and represent a script's configuration * and represent a script's configuration.
* By default script are available only to repository administrators script that have a broader audience
* must override the {@link #isAllowedToExecute(Context, List)} method.
*/ */
public abstract class ScriptConfiguration<T extends DSpaceRunnable> implements BeanNameAware { public abstract class ScriptConfiguration<T extends DSpaceRunnable> implements BeanNameAware {
@Autowired
protected AuthorizeService authorizeService;
/** /**
* The possible options for this script * The possible options for this script
*/ */
@@ -70,14 +81,23 @@ public abstract class ScriptConfiguration<T extends DSpaceRunnable> implements B
* @param dspaceRunnableClass The dspaceRunnableClass to be set on this IndexDiscoveryScriptConfiguration * @param dspaceRunnableClass The dspaceRunnableClass to be set on this IndexDiscoveryScriptConfiguration
*/ */
public abstract void setDspaceRunnableClass(Class<T> dspaceRunnableClass); public abstract void setDspaceRunnableClass(Class<T> dspaceRunnableClass);
/** /**
* This method will return if the script is allowed to execute in the given context. This is by default set * This method will return if the script is allowed to execute in the given context. This is by default set
* to the currentUser in the context being an admin, however this can be overwritten by each script individually * to the currentUser in the context being an admin, however this can be overwritten by each script individually
* if different rules apply * if different rules apply
* @param context The relevant DSpace context * @param context The relevant DSpace context
* @param commandLineParameters the parameters that will be used to start the process if known,
* <code>null</code> otherwise
* @return A boolean indicating whether the script is allowed to execute or not * @return A boolean indicating whether the script is allowed to execute or not
*/ */
public abstract boolean isAllowedToExecute(Context context); public boolean isAllowedToExecute(Context context, List<DSpaceCommandLineParameter> commandLineParameters) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
/** /**
* The getter for the options of the Script * The getter for the options of the Script

View File

@@ -255,4 +255,26 @@ public interface ProcessService {
*/ */
List<Process> findByStatusAndCreationTimeOlderThan(Context context, List<ProcessStatus> statuses, Date date) List<Process> findByStatusAndCreationTimeOlderThan(Context context, List<ProcessStatus> statuses, Date date)
throws SQLException; throws SQLException;
/**
* Returns a list of all Process objects in the database by the given user.
*
* @param context The relevant DSpace context
* @param user The user to search for
* @param limit The limit for the amount of Processes returned
* @param offset The offset for the Processes to be returned
* @return The list of all Process objects in the Database
* @throws SQLException If something goes wrong
*/
List<Process> findByUser(Context context, EPerson user, int limit, int offset) throws SQLException;
/**
* Count all the processes which is related to the given user.
*
* @param context The relevant DSpace context
* @param user The user to search for
* @return The number of results matching the query
* @throws SQLException If something goes wrong
*/
int countByUser(Context context, EPerson user) throws SQLException;
} }

View File

@@ -7,13 +7,8 @@
*/ */
package org.dspace.statistics.export; package org.dspace.statistics.export;
import java.sql.SQLException;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link RetryFailedOpenUrlTracker} script * The {@link ScriptConfiguration} for the {@link RetryFailedOpenUrlTracker} script
@@ -21,9 +16,6 @@ import org.springframework.beans.factory.annotation.Autowired;
public class RetryFailedOpenUrlTrackerScriptConfiguration<T extends RetryFailedOpenUrlTracker> public class RetryFailedOpenUrlTrackerScriptConfiguration<T extends RetryFailedOpenUrlTracker>
extends ScriptConfiguration<T> { extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override @Override
@@ -41,15 +33,6 @@ public class RetryFailedOpenUrlTrackerScriptConfiguration<T extends RetryFailedO
this.dspaceRunnableClass = dspaceRunnableClass; this.dspaceRunnableClass = dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (options == null) { if (options == null) {

View File

@@ -75,7 +75,6 @@ public class DatabaseUtils {
// Types of databases supported by DSpace. See getDbType() // Types of databases supported by DSpace. See getDbType()
public static final String DBMS_POSTGRES = "postgres"; public static final String DBMS_POSTGRES = "postgres";
public static final String DBMS_ORACLE = "oracle";
public static final String DBMS_H2 = "h2"; public static final String DBMS_H2 = "h2";
// Name of the table that Flyway uses for its migration history // Name of the table that Flyway uses for its migration history
@@ -369,9 +368,7 @@ public class DatabaseUtils {
.println("\nWARNING: ALL DATA AND TABLES IN YOUR DATABASE WILL BE PERMANENTLY DELETED.\n"); .println("\nWARNING: ALL DATA AND TABLES IN YOUR DATABASE WILL BE PERMANENTLY DELETED.\n");
System.out.println("There is NO turning back from this action. Backup your DB before " + System.out.println("There is NO turning back from this action. Backup your DB before " +
"continuing."); "continuing.");
if (dbType.equals(DBMS_ORACLE)) { if (dbType.equals(DBMS_POSTGRES)) {
System.out.println("\nORACLE WARNING: your RECYCLEBIN will also be PURGED.\n");
} else if (dbType.equals(DBMS_POSTGRES)) {
System.out.println( System.out.println(
"\nPOSTGRES WARNING: the '" + PostgresUtils.PGCRYPTO + "' extension will be dropped " + "\nPOSTGRES WARNING: the '" + PostgresUtils.PGCRYPTO + "' extension will be dropped " +
"if it is in the same schema as the DSpace database.\n"); "if it is in the same schema as the DSpace database.\n");
@@ -467,11 +464,10 @@ public class DatabaseUtils {
DatabaseMetaData meta = connection.getMetaData(); DatabaseMetaData meta = connection.getMetaData();
String dbType = getDbType(connection); String dbType = getDbType(connection);
System.out.println("\nDatabase Type: " + dbType); System.out.println("\nDatabase Type: " + dbType);
if (dbType.equals(DBMS_ORACLE)) { if (!dbType.equals(DBMS_POSTGRES) && !dbType.equals(DBMS_H2)) {
System.out.println("===================================="); System.err.println("====================================");
System.out.println("WARNING: Oracle support is deprecated!"); System.err.println("ERROR: Database type " + dbType + " is UNSUPPORTED!");
System.out.println("See https://github.com/DSpace/DSpace/issues/8214"); System.err.println("=====================================");
System.out.println("=====================================");
} }
System.out.println("Database URL: " + meta.getURL()); System.out.println("Database URL: " + meta.getURL());
System.out.println("Database Schema: " + getSchemaName(connection)); System.out.println("Database Schema: " + getSchemaName(connection));
@@ -606,10 +602,6 @@ public class DatabaseUtils {
String dbType = getDbType(connection); String dbType = getDbType(connection);
connection.close(); connection.close();
if (dbType.equals(DBMS_ORACLE)) {
log.warn("ORACLE SUPPORT IS DEPRECATED! See https://github.com/DSpace/DSpace/issues/8214");
}
// Determine location(s) where Flyway will load all DB migrations // Determine location(s) where Flyway will load all DB migrations
ArrayList<String> scriptLocations = new ArrayList<>(); ArrayList<String> scriptLocations = new ArrayList<>();
@@ -946,26 +938,6 @@ public class DatabaseUtils {
// First, run Flyway's clean command on database. // First, run Flyway's clean command on database.
// For MOST database types, this takes care of everything // For MOST database types, this takes care of everything
flyway.clean(); flyway.clean();
try (Connection connection = dataSource.getConnection()) {
// Get info about which database type we are using
String dbType = getDbType(connection);
// If this is Oracle, the only way to entirely clean the database
// is to also purge the "Recyclebin". See:
// http://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_9018.htm
if (dbType.equals(DBMS_ORACLE)) {
PreparedStatement statement = null;
try {
statement = connection.prepareStatement("PURGE RECYCLEBIN");
statement.executeQuery();
} finally {
if (statement != null && !statement.isClosed()) {
statement.close();
}
}
}
}
} catch (FlywayException fe) { } catch (FlywayException fe) {
// If any FlywayException (Runtime) is thrown, change it to a SQLException // If any FlywayException (Runtime) is thrown, change it to a SQLException
throw new SQLException("Flyway clean error occurred", fe); throw new SQLException("Flyway clean error occurred", fe);
@@ -1214,11 +1186,6 @@ public class DatabaseUtils {
// We need to filter by schema in PostgreSQL // We need to filter by schema in PostgreSQL
schemaFilter = true; schemaFilter = true;
break; break;
case DBMS_ORACLE:
// Oracle specific query for a sequence owned by our current DSpace user
// NOTE: No need to filter by schema for Oracle, as Schema = User
sequenceSQL = "SELECT COUNT(1) FROM user_sequences WHERE sequence_name=?";
break;
case DBMS_H2: case DBMS_H2:
// In H2, sequences are listed in the "information_schema.sequences" table // In H2, sequences are listed in the "information_schema.sequences" table
// SEE: http://www.h2database.com/html/grammar.html#information_schema // SEE: http://www.h2database.com/html/grammar.html#information_schema
@@ -1322,11 +1289,6 @@ public class DatabaseUtils {
// For PostgreSQL, the default schema is named "public" // For PostgreSQL, the default schema is named "public"
// See: http://www.postgresql.org/docs/9.0/static/ddl-schemas.html // See: http://www.postgresql.org/docs/9.0/static/ddl-schemas.html
schema = "public"; schema = "public";
} else if (dbType.equals(DBMS_ORACLE)) {
// For Oracle, default schema is actually the user account
// See: http://stackoverflow.com/a/13341390
DatabaseMetaData meta = connection.getMetaData();
schema = meta.getUserName();
} else { } else {
// For H2 (in memory), there is no such thing as a schema // For H2 (in memory), there is no such thing as a schema
schema = null; schema = null;
@@ -1552,8 +1514,6 @@ public class DatabaseUtils {
String dbms_lc = prodName.toLowerCase(Locale.ROOT); String dbms_lc = prodName.toLowerCase(Locale.ROOT);
if (dbms_lc.contains("postgresql")) { if (dbms_lc.contains("postgresql")) {
return DBMS_POSTGRES; return DBMS_POSTGRES;
} else if (dbms_lc.contains("oracle")) {
return DBMS_ORACLE;
} else if (dbms_lc.contains("h2")) { } else if (dbms_lc.contains("h2")) {
// Used for unit testing only // Used for unit testing only
return DBMS_H2; return DBMS_H2;

View File

@@ -1,57 +0,0 @@
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.storage.rdbms.hibernate;
import org.apache.commons.lang.StringUtils;
import org.dspace.services.ConfigurationService;
import org.dspace.services.factory.DSpaceServicesFactory;
import org.hibernate.type.AbstractSingleColumnStandardBasicType;
import org.hibernate.type.descriptor.java.StringTypeDescriptor;
import org.hibernate.type.descriptor.sql.ClobTypeDescriptor;
import org.hibernate.type.descriptor.sql.LongVarcharTypeDescriptor;
import org.hibernate.type.descriptor.sql.SqlTypeDescriptor;
/**
* A Hibernate @Type used to properly support the CLOB in both Postgres and Oracle.
* PostgreSQL doesn't have a CLOB type, instead it's a TEXT field.
* Normally, you'd use org.hibernate.type.TextType to support TEXT, but that won't work for Oracle.
* https://github.com/hibernate/hibernate-orm/blob/5.6/hibernate-core/src/main/java/org/hibernate/type/TextType.java
*
* This Type checks if we are using PostgreSQL.
* If so, it configures Hibernate to map CLOB to LongVarChar (same as org.hibernate.type.TextType)
* If not, it uses default CLOB (which works for other databases).
*/
public class DatabaseAwareLobType extends AbstractSingleColumnStandardBasicType<String> {
public static final DatabaseAwareLobType INSTANCE = new DatabaseAwareLobType();
public DatabaseAwareLobType() {
super( getDbDescriptor(), StringTypeDescriptor.INSTANCE );
}
public static SqlTypeDescriptor getDbDescriptor() {
if ( isPostgres() ) {
return LongVarcharTypeDescriptor.INSTANCE;
} else {
return ClobTypeDescriptor.DEFAULT;
}
}
private static boolean isPostgres() {
ConfigurationService configurationService = DSpaceServicesFactory.getInstance().getConfigurationService();
String dbDialect = configurationService.getProperty("db.dialect");
return StringUtils.containsIgnoreCase(dbDialect, "PostgreSQL");
}
@Override
public String getName() {
return "database_aware_lob";
}
}

View File

@@ -78,13 +78,6 @@ public class MigrationUtils {
constraintName += "_" + StringUtils.lowerCase(constraintSuffix); constraintName += "_" + StringUtils.lowerCase(constraintSuffix);
cascade = true; cascade = true;
break; break;
case "oracle":
// In Oracle, constraints are listed in the USER_CONS_COLUMNS table
constraintNameSQL = "SELECT CONSTRAINT_NAME " +
"FROM USER_CONS_COLUMNS " +
"WHERE TABLE_NAME = ? AND COLUMN_NAME = ?";
cascade = true;
break;
case "h2": case "h2":
// In H2, column constraints are listed in the "INFORMATION_SCHEMA.KEY_COLUMN_USAGE" table // In H2, column constraints are listed in the "INFORMATION_SCHEMA.KEY_COLUMN_USAGE" table
constraintNameSQL = "SELECT DISTINCT CONSTRAINT_NAME " + constraintNameSQL = "SELECT DISTINCT CONSTRAINT_NAME " +
@@ -160,9 +153,6 @@ public class MigrationUtils {
case "postgresql": case "postgresql":
dropTableSQL = "DROP TABLE IF EXISTS " + tableName + " CASCADE"; dropTableSQL = "DROP TABLE IF EXISTS " + tableName + " CASCADE";
break; break;
case "oracle":
dropTableSQL = "DROP TABLE " + tableName + " CASCADE CONSTRAINTS";
break;
case "h2": case "h2":
dropTableSQL = "DROP TABLE IF EXISTS " + tableName + " CASCADE"; dropTableSQL = "DROP TABLE IF EXISTS " + tableName + " CASCADE";
break; break;
@@ -208,9 +198,6 @@ public class MigrationUtils {
case "postgresql": case "postgresql":
dropSequenceSQL = "DROP SEQUENCE IF EXISTS " + sequenceName; dropSequenceSQL = "DROP SEQUENCE IF EXISTS " + sequenceName;
break; break;
case "oracle":
dropSequenceSQL = "DROP SEQUENCE " + sequenceName;
break;
case "h2": case "h2":
dropSequenceSQL = "DROP SEQUENCE IF EXISTS " + sequenceName; dropSequenceSQL = "DROP SEQUENCE IF EXISTS " + sequenceName;
break; break;
@@ -256,9 +243,6 @@ public class MigrationUtils {
case "postgresql": case "postgresql":
dropViewSQL = "DROP VIEW IF EXISTS " + viewName + " CASCADE"; dropViewSQL = "DROP VIEW IF EXISTS " + viewName + " CASCADE";
break; break;
case "oracle":
dropViewSQL = "DROP VIEW " + viewName + " CASCADE CONSTRAINTS";
break;
case "h2": case "h2":
dropViewSQL = "DROP VIEW IF EXISTS " + viewName + " CASCADE"; dropViewSQL = "DROP VIEW IF EXISTS " + viewName + " CASCADE";
break; break;

View File

@@ -19,10 +19,9 @@ import org.flywaydb.core.api.migration.Context;
* of the "community" table. This is necessary for the upgrade from 1.3 to 1.4 * of the "community" table. This is necessary for the upgrade from 1.3 to 1.4
* <P> * <P>
* This class was created because the names of database constraints differs based * This class was created because the names of database constraints differs based
* on the type of database (Postgres vs. Oracle vs. H2). As such, it becomes difficult * on the type of database (Postgres vs. H2). As such, it becomes difficult
* to write simple SQL which will work for multiple database types (especially * to write simple SQL which will work for multiple database types (especially
* since unit tests require H2 and the syntax for H2 is different from either * since unit tests require H2 and the syntax for H2 is different from Postgres).
* Oracle or Postgres).
* <P> * <P>
* NOTE: This migration class is very simple because it is meant to be used * NOTE: This migration class is very simple because it is meant to be used
* in conjuction with the corresponding SQL script: * in conjuction with the corresponding SQL script:

View File

@@ -19,10 +19,9 @@ import org.flywaydb.core.api.migration.Context;
* from 1.5 to 1.6 * from 1.5 to 1.6
* <P> * <P>
* This class was created because the names of database constraints differs based * This class was created because the names of database constraints differs based
* on the type of database (Postgres vs. Oracle vs. H2). As such, it becomes difficult * on the type of database (Postgres vs. H2). As such, it becomes difficult
* to write simple SQL which will work for multiple database types (especially * to write simple SQL which will work for multiple database types (especially
* since unit tests require H2 and the syntax for H2 is different from either * since unit tests require H2 and the syntax for H2 is different from Postgres).
* Oracle or Postgres).
* <P> * <P>
* NOTE: This migration class is very simple because it is meant to be used * NOTE: This migration class is very simple because it is meant to be used
* in conjuction with the corresponding SQL script: * in conjuction with the corresponding SQL script:

View File

@@ -20,10 +20,9 @@ import org.flywaydb.core.api.migration.Context;
* this column must be renamed to "resource_id". * this column must be renamed to "resource_id".
* <P> * <P>
* This class was created because the names of database constraints differs based * This class was created because the names of database constraints differs based
* on the type of database (Postgres vs. Oracle vs. H2). As such, it becomes difficult * on the type of database (Postgres vs. H2). As such, it becomes difficult
* to write simple SQL which will work for multiple database types (especially * to write simple SQL which will work for multiple database types (especially
* since unit tests require H2 and the syntax for H2 is different from either * since unit tests require H2 and the syntax for H2 is different from Postgres).
* Oracle or Postgres).
* <P> * <P>
* NOTE: This migration class is very simple because it is meant to be used * NOTE: This migration class is very simple because it is meant to be used
* in conjuction with the corresponding SQL script: * in conjuction with the corresponding SQL script:

View File

@@ -67,8 +67,6 @@ public class V5_0_2014_11_04__Enable_XMLWorkflow_Migration
String dbFileLocation = null; String dbFileLocation = null;
if (dbtype.toLowerCase().contains("postgres")) { if (dbtype.toLowerCase().contains("postgres")) {
dbFileLocation = "postgres"; dbFileLocation = "postgres";
} else if (dbtype.toLowerCase().contains("oracle")) {
dbFileLocation = "oracle";
} else if (dbtype.toLowerCase().contains("h2")) { } else if (dbtype.toLowerCase().contains("h2")) {
dbFileLocation = "h2"; dbFileLocation = "h2";
} }

View File

@@ -46,8 +46,6 @@ public class V6_0_2015_09_01__DS_2701_Enable_XMLWorkflow_Migration extends BaseJ
String dbFileLocation = null; String dbFileLocation = null;
if (dbtype.toLowerCase().contains("postgres")) { if (dbtype.toLowerCase().contains("postgres")) {
dbFileLocation = "postgres"; dbFileLocation = "postgres";
} else if (dbtype.toLowerCase().contains("oracle")) {
dbFileLocation = "oracle";
} else if (dbtype.toLowerCase().contains("h2")) { } else if (dbtype.toLowerCase().contains("h2")) {
dbFileLocation = "h2"; dbFileLocation = "h2";
} }

View File

@@ -7,13 +7,8 @@
*/ */
package org.dspace.submit.migration; package org.dspace.submit.migration;
import java.sql.SQLException;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.service.AuthorizeService;
import org.dspace.core.Context;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* The {@link ScriptConfiguration} for the {@link SubmissionFormsMigration} script * The {@link ScriptConfiguration} for the {@link SubmissionFormsMigration} script
@@ -23,9 +18,6 @@ import org.springframework.beans.factory.annotation.Autowired;
public class SubmissionFormsMigrationCliScriptConfiguration<T extends SubmissionFormsMigration> public class SubmissionFormsMigrationCliScriptConfiguration<T extends SubmissionFormsMigration>
extends ScriptConfiguration<T> { extends ScriptConfiguration<T> {
@Autowired
private AuthorizeService authorizeService;
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Override @Override
@@ -38,15 +30,6 @@ public class SubmissionFormsMigrationCliScriptConfiguration<T extends Submission
this.dspaceRunnableClass = dspaceRunnableClass; this.dspaceRunnableClass = dspaceRunnableClass;
} }
@Override
public boolean isAllowedToExecute(Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (options == null) { if (options == null) {

View File

@@ -7,7 +7,12 @@
*/ */
package org.dspace.submit.migration; package org.dspace.submit.migration;
import java.util.List;
import org.apache.commons.cli.Options;
import org.dspace.core.Context; import org.dspace.core.Context;
import org.dspace.scripts.DSpaceCommandLineParameter;
import org.dspace.scripts.configuration.ScriptConfiguration;
/** /**
* Subclass of {@link SubmissionFormsMigrationCliScriptConfiguration} to be use in rest/scripts.xml configuration so * Subclass of {@link SubmissionFormsMigrationCliScriptConfiguration} to be use in rest/scripts.xml configuration so
@@ -15,10 +20,37 @@ import org.dspace.core.Context;
* *
* @author Maria Verdonck (Atmire) on 05/01/2021 * @author Maria Verdonck (Atmire) on 05/01/2021
*/ */
public class SubmissionFormsMigrationScriptConfiguration extends SubmissionFormsMigrationCliScriptConfiguration { public class SubmissionFormsMigrationScriptConfiguration<T extends SubmissionFormsMigration>
extends ScriptConfiguration<T> {
private Class<T> dspaceRunnableClass;
@Override @Override
public boolean isAllowedToExecute(Context context) { public Class<T> getDspaceRunnableClass() {
return this.dspaceRunnableClass;
}
@Override
public void setDspaceRunnableClass(Class<T> dspaceRunnableClass) {
this.dspaceRunnableClass = dspaceRunnableClass;
}
@Override
public Options getOptions() {
if (options == null) {
Options options = new Options();
options.addOption("f", "input-forms", true, "Path to source input-forms.xml file location");
options.addOption("s", "item-submission", true, "Path to source item-submission.xml file location");
options.addOption("h", "help", false, "help");
super.options = options;
}
return options;
}
@Override
public boolean isAllowedToExecute(Context context, List<DSpaceCommandLineParameter> commandLineParameters) {
// Script is not allowed to be executed from REST side // Script is not allowed to be executed from REST side
return false; return false;
} }

View File

@@ -11,6 +11,8 @@ import java.text.ParseException;
import java.util.Date; import java.util.Date;
import java.util.Objects; import java.util.Objects;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.dspace.authorize.AuthorizeException; import org.dspace.authorize.AuthorizeException;
import org.dspace.authorize.ResourcePolicy; import org.dspace.authorize.ResourcePolicy;
import org.dspace.authorize.service.AuthorizeService; import org.dspace.authorize.service.AuthorizeService;
@@ -21,6 +23,7 @@ import org.dspace.core.Context;
import org.dspace.eperson.Group; import org.dspace.eperson.Group;
import org.dspace.eperson.service.GroupService; import org.dspace.eperson.service.GroupService;
import org.dspace.util.DateMathParser; import org.dspace.util.DateMathParser;
import org.dspace.util.TimeHelpers;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
/** /**
@@ -28,8 +31,7 @@ import org.springframework.beans.factory.annotation.Autowired;
* set permission on a file. An option is defined by a name such as "open * set permission on a file. An option is defined by a name such as "open
* access", "embargo", "restricted access", etc. and some optional attributes to * access", "embargo", "restricted access", etc. and some optional attributes to
* better clarify the constraints and input available to the user. For instance * better clarify the constraints and input available to the user. For instance
* an embargo option could allow to set a start date not longer than 3 years, * an embargo option could allow to set a start date not longer than 3 years.
* etc
* *
* @author Luigi Andrea Pascarelli (luigiandrea.pascarelli at 4science.it) * @author Luigi Andrea Pascarelli (luigiandrea.pascarelli at 4science.it)
*/ */
@@ -44,9 +46,9 @@ public class AccessConditionOption {
@Autowired @Autowired
private ResourcePolicyService resourcePolicyService; private ResourcePolicyService resourcePolicyService;
DateMathParser dateMathParser = new DateMathParser(); private static final Logger LOG = LogManager.getLogger();
/** An unique name identifying the access contion option **/ /** A unique name identifying the access condition option. **/
private String name; private String name;
/** /**
@@ -147,6 +149,9 @@ public class AccessConditionOption {
* startDate should be null. Otherwise startDate may not be null. * startDate should be null. Otherwise startDate may not be null.
* @param endDate end date of the resource policy. If {@link #getHasEndDate()} returns false, * @param endDate end date of the resource policy. If {@link #getHasEndDate()} returns false,
* endDate should be null. Otherwise endDate may not be null. * endDate should be null. Otherwise endDate may not be null.
* @throws SQLException passed through.
* @throws AuthorizeException passed through.
* @throws ParseException passed through (indicates problem with a date).
*/ */
public void createResourcePolicy(Context context, DSpaceObject obj, String name, String description, public void createResourcePolicy(Context context, DSpaceObject obj, String name, String description,
Date startDate, Date endDate) Date startDate, Date endDate)
@@ -175,17 +180,25 @@ public class AccessConditionOption {
} }
/** /**
* Validate the policy properties, throws exceptions if any is not valid * Validate the policy properties, throws exceptions if any is not valid.
* *
* @param context DSpace context * @param context DSpace context.
* @param name Name of the resource policy * @param name Name of the resource policy.
* @param startDate Start date of the resource policy. If {@link #getHasStartDate()} * @param startDate Start date of the resource policy. If
* returns false, startDate should be null. Otherwise startDate may not be null. * {@link #getHasStartDate()} returns false, startDate
* @param endDate End date of the resource policy. If {@link #getHasEndDate()} * should be null. Otherwise startDate may not be null.
* returns false, endDate should be null. Otherwise endDate may not be null. * @param endDate End date of the resource policy. If
* {@link #getHasEndDate()} returns false, endDate should
* be null. Otherwise endDate may not be null.
* @throws IllegalStateException if a date is required and absent,
* a date is not required and present, or a date exceeds its
* configured maximum.
* @throws ParseException passed through.
*/ */
private void validateResourcePolicy(Context context, String name, Date startDate, Date endDate) private void validateResourcePolicy(Context context, String name, Date startDate, Date endDate)
throws SQLException, AuthorizeException, ParseException { throws IllegalStateException, ParseException {
LOG.debug("Validate policy dates: name '{}', startDate {}, endDate {}",
name, startDate, endDate);
if (getHasStartDate() && Objects.isNull(startDate)) { if (getHasStartDate() && Objects.isNull(startDate)) {
throw new IllegalStateException("The access condition " + getName() + " requires a start date."); throw new IllegalStateException("The access condition " + getName() + " requires a start date.");
} }
@@ -199,29 +212,33 @@ public class AccessConditionOption {
throw new IllegalStateException("The access condition " + getName() + " cannot contain an end date."); throw new IllegalStateException("The access condition " + getName() + " cannot contain an end date.");
} }
DateMathParser dateMathParser = new DateMathParser();
Date latestStartDate = null; Date latestStartDate = null;
if (Objects.nonNull(getStartDateLimit())) { if (Objects.nonNull(getStartDateLimit())) {
latestStartDate = dateMathParser.parseMath(getStartDateLimit()); latestStartDate = TimeHelpers.toMidnightUTC(dateMathParser.parseMath(getStartDateLimit()));
} }
Date latestEndDate = null; Date latestEndDate = null;
if (Objects.nonNull(getEndDateLimit())) { if (Objects.nonNull(getEndDateLimit())) {
latestEndDate = dateMathParser.parseMath(getEndDateLimit()); latestEndDate = TimeHelpers.toMidnightUTC(dateMathParser.parseMath(getEndDateLimit()));
} }
LOG.debug(" latestStartDate {}, latestEndDate {}",
latestStartDate, latestEndDate);
// throw if startDate after latestStartDate // throw if startDate after latestStartDate
if (Objects.nonNull(startDate) && Objects.nonNull(latestStartDate) && startDate.after(latestStartDate)) { if (Objects.nonNull(startDate) && Objects.nonNull(latestStartDate) && startDate.after(latestStartDate)) {
throw new IllegalStateException(String.format( throw new IllegalStateException(String.format(
"The start date of access condition %s should be earlier than %s from now.", "The start date of access condition %s should be earlier than %s from now (%s).",
getName(), getStartDateLimit() getName(), getStartDateLimit(), dateMathParser.getNow()
)); ));
} }
// throw if endDate after latestEndDate // throw if endDate after latestEndDate
if (Objects.nonNull(endDate) && Objects.nonNull(latestEndDate) && endDate.after(latestEndDate)) { if (Objects.nonNull(endDate) && Objects.nonNull(latestEndDate) && endDate.after(latestEndDate)) {
throw new IllegalStateException(String.format( throw new IllegalStateException(String.format(
"The end date of access condition %s should be earlier than %s from now.", "The end date of access condition %s should be earlier than %s from now (%s).",
getName(), getEndDateLimit() getName(), getEndDateLimit(), dateMathParser.getNow()
)); ));
} }
} }

View File

@@ -8,15 +8,11 @@
package org.dspace.subscriptions; package org.dspace.subscriptions;
import java.sql.SQLException;
import java.util.Objects; import java.util.Objects;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.dspace.authorize.AuthorizeServiceImpl;
import org.dspace.core.Context;
import org.dspace.scripts.DSpaceRunnable; import org.dspace.scripts.DSpaceRunnable;
import org.dspace.scripts.configuration.ScriptConfiguration; import org.dspace.scripts.configuration.ScriptConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
/** /**
* Implementation of {@link DSpaceRunnable} to find subscribed objects and send notification mails about them * Implementation of {@link DSpaceRunnable} to find subscribed objects and send notification mails about them
@@ -26,18 +22,6 @@ public class SubscriptionEmailNotificationConfiguration<T
private Class<T> dspaceRunnableClass; private Class<T> dspaceRunnableClass;
@Autowired
private AuthorizeServiceImpl authorizeService;
@Override
public boolean isAllowedToExecute(Context context) {
try {
return authorizeService.isAdmin(context);
} catch (SQLException e) {
throw new RuntimeException("SQLException occurred when checking if the current user is an admin", e);
}
}
@Override @Override
public Options getOptions() { public Options getOptions() {
if (Objects.isNull(options)) { if (Objects.isNull(options)) {

View File

@@ -26,12 +26,15 @@ import java.util.Map;
import java.util.TimeZone; import java.util.TimeZone;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/** /**
* This class (Apache license) is copied from Apache Solr and add some tweaks to resolve unneeded dependency: * This class (Apache license) is copied from Apache Solr, adding some tweaks to
* https://raw.githubusercontent.com/apache/lucene-solr/releases/lucene-solr/7.1.0/solr/core/src/java/org/apache/solr * resolve an unneeded dependency. See
* /util/DateMathParser.java * <a href='https://raw.githubusercontent.com/apache/lucene-solr/releases/lucene-solr/7.1.0/solr/core/src/java/org/apache/solr/util/DateMathParser.java'>the original</a>.
* *
* <p>
* A Simple Utility class for parsing "math" like strings relating to Dates. * A Simple Utility class for parsing "math" like strings relating to Dates.
* *
* <p> * <p>
@@ -78,7 +81,7 @@ import java.util.regex.Pattern;
* "<code>setNow</code>" in the interim). The default value of 'now' is * "<code>setNow</code>" in the interim). The default value of 'now' is
* the time at the moment the <code>DateMathParser</code> instance is * the time at the moment the <code>DateMathParser</code> instance is
* constructed, unless overridden by the {@link CommonParams#NOW NOW} * constructed, unless overridden by the {@link CommonParams#NOW NOW}
* request param. * request parameter.
* </p> * </p>
* *
* <p> * <p>
@@ -88,7 +91,7 @@ import java.util.regex.Pattern;
* cascades to rounding of HOUR, MIN, MONTH, YEAR as well. The default * cascades to rounding of HOUR, MIN, MONTH, YEAR as well. The default
* <code>TimeZone</code> used is <code>UTC</code> unless overridden by the * <code>TimeZone</code> used is <code>UTC</code> unless overridden by the
* {@link CommonParams#TZ TZ} * {@link CommonParams#TZ TZ}
* request param. * request parameter.
* </p> * </p>
* *
* <p> * <p>
@@ -102,6 +105,8 @@ import java.util.regex.Pattern;
*/ */
public class DateMathParser { public class DateMathParser {
private static final Logger LOG = LogManager.getLogger();
public static final TimeZone UTC = TimeZone.getTimeZone("UTC"); public static final TimeZone UTC = TimeZone.getTimeZone("UTC");
/** /**
@@ -119,12 +124,12 @@ public class DateMathParser {
/** /**
* A mapping from (uppercased) String labels identifying time units, * A mapping from (uppercased) String labels identifying time units,
* to the corresponding {@link ChronoUnit} enum (e.g. "YEARS") used to * to the corresponding {@link ChronoUnit} value (e.g. "YEARS") used to
* set/add/roll that unit of measurement. * set/add/roll that unit of measurement.
* *
* <p> * <p>
* A single logical unit of time might be represented by multiple labels * A single logical unit of time might be represented by multiple labels
* for convenience (ie: <code>DATE==DAYS</code>, * for convenience (i.e. <code>DATE==DAYS</code>,
* <code>MILLI==MILLIS</code>) * <code>MILLI==MILLIS</code>)
* </p> * </p>
* *
@@ -220,6 +225,7 @@ public class DateMathParser {
* *
* @param now an optional fixed date to use as "NOW" * @param now an optional fixed date to use as "NOW"
* @param val the string to parse * @param val the string to parse
* @return result of applying the parsed expression to "NOW".
* @throws Exception * @throws Exception
*/ */
public static Date parseMath(Date now, String val) throws Exception { public static Date parseMath(Date now, String val) throws Exception {
@@ -308,6 +314,7 @@ public class DateMathParser {
/** /**
* Defines this instance's concept of "now". * Defines this instance's concept of "now".
* *
* @param n new value of "now".
* @see #getNow * @see #getNow
*/ */
public void setNow(Date n) { public void setNow(Date n) {
@@ -316,12 +323,12 @@ public class DateMathParser {
/** /**
* Returns a clone of this instance's concept of "now" (never null). * Returns a clone of this instance's concept of "now" (never null).
*
* If setNow was never called (or if null was specified) then this method * If setNow was never called (or if null was specified) then this method
* first defines 'now' as the value dictated by the SolrRequestInfo if it * first defines 'now' as the value dictated by the SolrRequestInfo if it
* exists -- otherwise it uses a new Date instance at the moment getNow() * exists -- otherwise it uses a new Date instance at the moment getNow()
* is first called. * is first called.
* *
* @return "now".
* @see #setNow * @see #setNow
* @see SolrRequestInfo#getNOW * @see SolrRequestInfo#getNOW
*/ */
@@ -334,9 +341,12 @@ public class DateMathParser {
} }
/** /**
* Parses a string of commands relative "now" are returns the resulting Date. * Parses a date expression relative to "now".
* *
* @throws ParseException positions in ParseExceptions are token positions, not character positions. * @param math a date expression such as "+24MONTHS".
* @return the result of applying the expression to the current time.
* @throws ParseException positions in ParseExceptions are token positions,
* not character positions.
*/ */
public Date parseMath(String math) throws ParseException { public Date parseMath(String math) throws ParseException {
/* check for No-Op */ /* check for No-Op */
@@ -344,6 +354,8 @@ public class DateMathParser {
return getNow(); return getNow();
} }
LOG.debug("parsing {}", math);
ZoneId zoneId = zone.toZoneId(); ZoneId zoneId = zone.toZoneId();
// localDateTime is a date and time local to the timezone specified // localDateTime is a date and time local to the timezone specified
LocalDateTime localDateTime = ZonedDateTime.ofInstant(getNow().toInstant(), zoneId).toLocalDateTime(); LocalDateTime localDateTime = ZonedDateTime.ofInstant(getNow().toInstant(), zoneId).toLocalDateTime();
@@ -394,11 +406,44 @@ public class DateMathParser {
} }
} }
LOG.debug("returning {}", localDateTime);
return Date.from(ZonedDateTime.of(localDateTime, zoneId).toInstant()); return Date.from(ZonedDateTime.of(localDateTime, zoneId).toInstant());
} }
private static Pattern splitter = Pattern.compile("\\b|(?<=\\d)(?=\\D)"); private static Pattern splitter = Pattern.compile("\\b|(?<=\\d)(?=\\D)");
/**
* For manual testing. With one argument, test one-argument parseMath.
* With two (or more) arguments, test two-argument parseMath.
*
* @param argv date math expressions.
* @throws java.lang.Exception passed through.
*/
public static void main(String[] argv)
throws Exception {
DateMathParser parser = new DateMathParser();
try {
Date parsed;
if (argv.length <= 0) {
System.err.println("Date math expression(s) expected.");
}
if (argv.length > 0) {
parsed = parser.parseMath(argv[0]);
System.out.format("Applied %s to implicit current time: %s%n",
argv[0], parsed.toString());
}
if (argv.length > 1) {
parsed = DateMathParser.parseMath(new Date(), argv[1]);
System.out.format("Applied %s to explicit current time: %s%n",
argv[1], parsed.toString());
}
} catch (ParseException ex) {
System.err.format("Oops: %s%n", ex.getMessage());
}
}
} }

View File

@@ -0,0 +1,42 @@
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.util;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.TimeZone;
/**
* Various manipulations of dates and times.
*
* @author mwood
*/
public class TimeHelpers {
private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
/**
* Never instantiate this class.
*/
private TimeHelpers() {}
/**
* Set a Date's time to midnight UTC.
*
* @param from some date-time.
* @return midnight UTC of the supplied date-time.
*/
public static Date toMidnightUTC(Date from) {
GregorianCalendar calendar = new GregorianCalendar(UTC);
calendar.setTime(from);
calendar.set(GregorianCalendar.HOUR_OF_DAY, 0);
calendar.set(GregorianCalendar.MINUTE, 0);
calendar.set(GregorianCalendar.SECOND, 0);
calendar.set(GregorianCalendar.MILLISECOND, 0);
return calendar.getTime();
}
}

View File

@@ -122,3 +122,5 @@ org.dspace.app.rest.exception.EPersonNameNotProvidedException.message = The eper
org.dspace.app.rest.exception.GroupNameNotProvidedException.message = Cannot create group, no group name is provided org.dspace.app.rest.exception.GroupNameNotProvidedException.message = Cannot create group, no group name is provided
org.dspace.app.rest.exception.GroupHasPendingWorkflowTasksException.message = Cannot delete group, the associated workflow role still has pending tasks org.dspace.app.rest.exception.GroupHasPendingWorkflowTasksException.message = Cannot delete group, the associated workflow role still has pending tasks
org.dspace.app.rest.exception.PasswordNotValidException.message = New password is invalid. Valid passwords must be at least 8 characters long! org.dspace.app.rest.exception.PasswordNotValidException.message = New password is invalid. Valid passwords must be at least 8 characters long!
org.dspace.app.rest.exception.RESTBitstreamNotFoundException.message = Bitstream with uuid {0} could not be found in \
the repository

View File

@@ -1,29 +0,0 @@
--
-- Copyright 2010-2017 Boxfuse GmbH
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-----------------
-- This is the Oracle upgrade script from Flyway v4.2.0, copied/borrowed from:
-- https://github.com/flyway/flyway/blob/flyway-4.2.0/flyway-core/src/main/resources/org/flywaydb/core/internal/dbsupport/oracle/upgradeMetaDataTable.sql
--
-- The variables in this script are replaced in FlywayUpgradeUtils.upgradeFlywayTable()
------------------
DROP INDEX "${schema}"."${table}_vr_idx";
DROP INDEX "${schema}"."${table}_ir_idx";
ALTER TABLE "${schema}"."${table}" DROP COLUMN "version_rank";
ALTER TABLE "${schema}"."${table}" DROP PRIMARY KEY DROP INDEX;
ALTER TABLE "${schema}"."${table}" MODIFY "version" NULL;
ALTER TABLE "${schema}"."${table}" ADD CONSTRAINT "${table}_pk" PRIMARY KEY ("installed_rank");
UPDATE "${schema}"."${table}" SET "type"='BASELINE' WHERE "type"='INIT';

View File

@@ -15,7 +15,7 @@
-- --
----------------- -----------------
-- This is the PostgreSQL upgrade script from Flyway v4.2.0, copied/borrowed from: -- This is the PostgreSQL upgrade script from Flyway v4.2.0, copied/borrowed from:
-- https://github.com/flyway/flyway/blob/flyway-4.2.0/flyway-core/src/main/resources/org/flywaydb/core/internal/dbsupport/oracle/upgradeMetaDataTable.sql -- https://github.com/flyway/flyway/blob/flyway-4.2.0/flyway-core/src/main/resources/org/flywaydb/core/internal/dbsupport/postgresql/upgradeMetaDataTable.sql
-- --
-- The variables in this script are replaced in FlywayUpgradeUtils.upgradeFlywayTable() -- The variables in this script are replaced in FlywayUpgradeUtils.upgradeFlywayTable()
------------------ ------------------

View File

@@ -4,33 +4,25 @@
in Production. Instead, DSpace uses the H2 Database to perform Unit Testing in Production. Instead, DSpace uses the H2 Database to perform Unit Testing
during development. during development.
By default, the DSpace Unit Testing environment configures H2 to run in By default, the DSpace Unit Testing environment configures H2 to run in memory
"Oracle Mode" and initializes the H2 database using the scripts in this directory. and initializes the H2 database using the scripts in this directory. See
`[src]/dspace-api/src/test/data/dspaceFolder/config/local.cfg`.
These database migrations are automatically called by [Flyway](http://flywaydb.org/) These database migrations are automatically called by [Flyway](http://flywaydb.org/)
when the `DatabaseManager` initializes itself (see `initializeDatabase()` method). in `DatabaseUtils`.
The H2 migrations in this directory are *based on* the Oracle Migrations, but The H2 migrations in this directory all use H2's grammar/syntax.
with some modifications in order to be valid in H2. For additional info see the [H2 SQL Grammar](https://www.h2database.com/html/grammar.html).
## Oracle vs H2 script differences
One of the primary differences between the Oracle scripts and these H2 ones
is in the syntax of the `ALTER TABLE` command. Unfortunately, H2's syntax for
that command differs greatly from Oracle (and PostgreSQL as well).
Most of the remainder of the scripts contain the exact Oracle syntax (which is
usually valid in H2). But, to you can always `diff` scripts of the same name
for further syntax differences.
For additional info see the [H2 SQL Grammar](http://www.h2database.com/html/grammar.html).
## More Information on Flyway ## More Information on Flyway
The SQL scripts in this directory are H2-specific database migrations. They are The SQL scripts in this directory are H2-specific database migrations. They are
used to automatically upgrade your DSpace database using [Flyway](http://flywaydb.org/). used to automatically upgrade your DSpace database using [Flyway](http://flywaydb.org/).
As such, these scripts are automatically called by Flyway when the DSpace As such, these scripts are automatically called by Flyway when the DSpace
`DatabaseManager` initializes itself (see `initializeDatabase()` method). During `DatabaseUtils` initializes.
that process, Flyway determines which version of DSpace your database is using
During that process, Flyway determines which version of DSpace your database is using
and then executes the appropriate upgrade script(s) to bring it up to the latest and then executes the appropriate upgrade script(s) to bring it up to the latest
version. version.

View File

@@ -1,90 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
-------------------------------------------------------------
-- This will create COMMUNITY handle metadata
-------------------------------------------------------------
insert into metadatavalue (metadata_field_id, text_value, text_lang, place, authority, confidence, dspace_object_id)
select distinct
T1.metadata_field_id as metadata_field_id,
concat('${handle.canonical.prefix}', h.handle) as text_value,
null as text_lang, 0 as place,
null as authority,
-1 as confidence,
c.uuid as dspace_object_id
from community c
left outer join handle h on h.resource_id = c.uuid
left outer join metadatavalue mv on mv.dspace_object_id = c.uuid
left outer join metadatafieldregistry mfr on mv.metadata_field_id = mfr.metadata_field_id
left outer join metadataschemaregistry msr on mfr.metadata_schema_id = msr.metadata_schema_id
cross join (select mfr.metadata_field_id as metadata_field_id from metadatafieldregistry mfr
left outer join metadataschemaregistry msr on mfr.metadata_schema_id = msr.metadata_schema_id
where msr.short_id = 'dc'
and mfr.element = 'identifier'
and mfr.qualifier = 'uri') T1
where uuid not in (
select c.uuid as uuid from community c
left outer join handle h on h.resource_id = c.uuid
left outer join metadatavalue mv on mv.dspace_object_id = c.uuid
left outer join metadatafieldregistry mfr on mv.metadata_field_id = mfr.metadata_field_id
left outer join metadataschemaregistry msr on mfr.metadata_schema_id = msr.metadata_schema_id
where msr.short_id = 'dc'
and mfr.element = 'identifier'
and mfr.qualifier = 'uri'
)
;
-------------------------------------------------------------
-- This will create COLLECTION handle metadata
-------------------------------------------------------------
insert into metadatavalue (metadata_field_id, text_value, text_lang, place, authority, confidence, dspace_object_id)
select distinct
T1.metadata_field_id as metadata_field_id,
concat('${handle.canonical.prefix}', h.handle) as text_value,
null as text_lang, 0 as place,
null as authority,
-1 as confidence,
c.uuid as dspace_object_id
from collection c
left outer join handle h on h.resource_id = c.uuid
left outer join metadatavalue mv on mv.dspace_object_id = c.uuid
left outer join metadatafieldregistry mfr on mv.metadata_field_id = mfr.metadata_field_id
left outer join metadataschemaregistry msr on mfr.metadata_schema_id = msr.metadata_schema_id
cross join (select mfr.metadata_field_id as metadata_field_id from metadatafieldregistry mfr
left outer join metadataschemaregistry msr on mfr.metadata_schema_id = msr.metadata_schema_id
where msr.short_id = 'dc'
and mfr.element = 'identifier'
and mfr.qualifier = 'uri') T1
where uuid not in (
select c.uuid as uuid from collection c
left outer join handle h on h.resource_id = c.uuid
left outer join metadatavalue mv on mv.dspace_object_id = c.uuid
left outer join metadatafieldregistry mfr on mv.metadata_field_id = mfr.metadata_field_id
left outer join metadataschemaregistry msr on mfr.metadata_schema_id = msr.metadata_schema_id
where msr.short_id = 'dc'
and mfr.element = 'identifier'
and mfr.qualifier = 'uri'
)
;

View File

@@ -1,84 +0,0 @@
# Oracle Flyway Database Migrations (i.e. Upgrades)
---
WARNING: Oracle Support is deprecated.
See https://github.com/DSpace/DSpace/issues/8214
---
The SQL scripts in this directory are Oracle-specific database migrations. They are
used to automatically upgrade your DSpace database using [Flyway](http://flywaydb.org/).
As such, these scripts are automatically called by Flyway when the DSpace
`DatabaseManager` initializes itself (see `initializeDatabase()` method). During
that process, Flyway determines which version of DSpace your database is using
and then executes the appropriate upgrade script(s) to bring it up to the latest
version.
If any failures occur, Flyway will "rollback" the upgrade script which resulted
in an error and log the issue in the DSpace log file at `[dspace]/log/dspace.log.[date]`
**WARNING:** IT IS NOT RECOMMENDED TO RUN THESE SCRIPTS MANUALLY. If you do so,
Flyway will may throw failures the next time you startup DSpace, as Flyway will
not realize you manually ran one or more scripts.
Please see the Flyway Documentation for more information: http://flywaydb.org/
## Oracle Porting Notes for the Curious
Oracle is missing quite a number of cool features found in Postgres, so
workarounds had to be found, most of which are hidden behind tests in
DatabaseManager. If Oracle is your DBMS, the workarounds are activated:
Oracle doesn't like ';' characters in JDBC SQL - they have all been removed
from the DSpace source, including code in the .sql file reader to strip ;'s.
browse code - LIMIT and OFFSET is used to limit browse results, and an
Oracle-hack is used to limit the result set to a given size
Oracle has no boolean data type, so a new schema file was created that
uses NUMBER(1) (AKA 'integers') and code is inserted everywhere to use 0 for
false and 1 for true if DSpace is using Oracle.
Oracle doesn't have a TEXT data type either, so TEXT columns are defined
as VARCHAR2 in the Oracle-specific schema.
Oracle doesn't allow dynamic naming for objects, so our cute trick to
derive the name of the sequence by appending _seq to the table name
in a function doesn't work in Oracle - workaround is to insert Oracle
code to generate the name of the sequence and then place that into
our SQL calls to generate a new ID.
Oracle doesn't let you directly set the value of sequences, so
update-sequences.sql is forced to use a special script sequpdate.sql
to update the sequences.
Bitstream had a column 'size' which is a reserved word in Oracle,
so this had to be changed to 'size_bytes' with corresponding code changes.
VARCHAR2 has a limit of 4000 characters, so DSpace text data is limited to 4k.
Going to the CLOB data type can get around that, but seemed like too much effort
for now. Note that with UTF-8 encoding that 4k could translate to 1300
characters worst-case (every character taking up 3 bytes is the worst case
scenario.)
### UPDATE 5 April 2007
CLOBs are now used as follows:
MetadataValue:text_value
Community:introductory_text
Community:copyright_text
Collection:introductory_text
Collection:license
Collection:copyright_text
DatabaseManager had to have some of the type checking changed, because Oracle's
JDBC driver is reporting INTEGERS as type DECIMAL.
Oracle doesn't like it when you reference table names in lower case when
getting JDBC metadata for the tables, so they are converted in TableRow
to upper case.
### UPDATE 27 November 2012
Oracle complains with ORA-01408 if you attempt to create an index on a column which
has already had the UNIQUE contraint added (such an index is implicit in maintaining the uniqueness
of the column). See [DS-1370](https://jira.duraspace.org/browse/DS-1370) for details.

View File

@@ -1,550 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
CREATE SEQUENCE bitstreamformatregistry_seq;
CREATE SEQUENCE fileextension_seq;
CREATE SEQUENCE bitstream_seq;
CREATE SEQUENCE eperson_seq;
-- start group sequence at 0, since Anonymous group = 0
CREATE SEQUENCE epersongroup_seq MINVALUE 0 START WITH 0;
CREATE SEQUENCE item_seq;
CREATE SEQUENCE bundle_seq;
CREATE SEQUENCE item2bundle_seq;
CREATE SEQUENCE bundle2bitstream_seq;
CREATE SEQUENCE dctyperegistry_seq;
CREATE SEQUENCE dcvalue_seq;
CREATE SEQUENCE community_seq;
CREATE SEQUENCE collection_seq;
CREATE SEQUENCE community2community_seq;
CREATE SEQUENCE community2collection_seq;
CREATE SEQUENCE collection2item_seq;
CREATE SEQUENCE resourcepolicy_seq;
CREATE SEQUENCE epersongroup2eperson_seq;
CREATE SEQUENCE handle_seq;
CREATE SEQUENCE workspaceitem_seq;
CREATE SEQUENCE workflowitem_seq;
CREATE SEQUENCE tasklistitem_seq;
CREATE SEQUENCE registrationdata_seq;
CREATE SEQUENCE subscription_seq;
CREATE SEQUENCE history_seq;
CREATE SEQUENCE historystate_seq;
CREATE SEQUENCE communities2item_seq;
CREATE SEQUENCE itemsbyauthor_seq;
CREATE SEQUENCE itemsbytitle_seq;
CREATE SEQUENCE itemsbydate_seq;
CREATE SEQUENCE itemsbydateaccessioned_seq;
-------------------------------------------------------
-- BitstreamFormatRegistry table
-------------------------------------------------------
CREATE TABLE BitstreamFormatRegistry
(
bitstream_format_id INTEGER PRIMARY KEY,
mimetype VARCHAR2(48),
short_description VARCHAR2(128) UNIQUE,
description VARCHAR2(2000),
support_level INTEGER,
-- Identifies internal types
internal NUMBER(1)
);
-------------------------------------------------------
-- FileExtension table
-------------------------------------------------------
CREATE TABLE FileExtension
(
file_extension_id INTEGER PRIMARY KEY,
bitstream_format_id INTEGER REFERENCES BitstreamFormatRegistry(bitstream_format_id),
extension VARCHAR2(16)
);
-------------------------------------------------------
-- Bitstream table
-------------------------------------------------------
CREATE TABLE Bitstream
(
bitstream_id INTEGER PRIMARY KEY,
bitstream_format_id INTEGER REFERENCES BitstreamFormatRegistry(bitstream_format_id),
name VARCHAR2(256),
size_bytes INTEGER,
checksum VARCHAR2(64),
checksum_algorithm VARCHAR2(32),
description VARCHAR2(2000),
user_format_description VARCHAR2(2000),
source VARCHAR2(256),
internal_id VARCHAR2(256),
deleted NUMBER(1),
store_number INTEGER,
sequence_id INTEGER
);
-------------------------------------------------------
-- EPerson table
-------------------------------------------------------
CREATE TABLE EPerson
(
eperson_id INTEGER PRIMARY KEY,
email VARCHAR2(64) UNIQUE,
password VARCHAR2(64),
firstname VARCHAR2(64),
lastname VARCHAR2(64),
can_log_in NUMBER(1),
require_certificate NUMBER(1),
self_registered NUMBER(1),
last_active TIMESTAMP,
sub_frequency INTEGER,
phone VARCHAR2(32)
);
-------------------------------------------------------
-- EPersonGroup table
-------------------------------------------------------
CREATE TABLE EPersonGroup
(
eperson_group_id INTEGER PRIMARY KEY,
name VARCHAR2(256) UNIQUE
);
-------------------------------------------------------
-- Item table
-------------------------------------------------------
CREATE TABLE Item
(
item_id INTEGER PRIMARY KEY,
submitter_id INTEGER REFERENCES EPerson(eperson_id),
in_archive NUMBER(1),
withdrawn NUMBER(1),
last_modified TIMESTAMP,
owning_collection INTEGER
);
-------------------------------------------------------
-- Bundle table
-------------------------------------------------------
CREATE TABLE Bundle
(
bundle_id INTEGER PRIMARY KEY,
mets_bitstream_id INTEGER REFERENCES Bitstream(bitstream_id),
name VARCHAR2(16), -- ORIGINAL | THUMBNAIL | TEXT
primary_bitstream_id INTEGER REFERENCES Bitstream(bitstream_id)
);
-------------------------------------------------------
-- Item2Bundle table
-------------------------------------------------------
CREATE TABLE Item2Bundle
(
id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
bundle_id INTEGER REFERENCES Bundle(bundle_id)
);
-- index by item_id
CREATE INDEX item2bundle_item_idx on Item2Bundle(item_id);
-------------------------------------------------------
-- Bundle2Bitstream table
-------------------------------------------------------
CREATE TABLE Bundle2Bitstream
(
id INTEGER PRIMARY KEY,
bundle_id INTEGER REFERENCES Bundle(bundle_id),
bitstream_id INTEGER REFERENCES Bitstream(bitstream_id)
);
-- index by bundle_id
CREATE INDEX bundle2bitstream_bundle_idx ON Bundle2Bitstream(bundle_id);
-------------------------------------------------------
-- DCTypeRegistry table
-------------------------------------------------------
CREATE TABLE DCTypeRegistry
(
dc_type_id INTEGER PRIMARY KEY,
element VARCHAR2(64),
qualifier VARCHAR2(64),
scope_note VARCHAR2(2000),
UNIQUE(element, qualifier)
);
-------------------------------------------------------
-- DCValue table
-------------------------------------------------------
CREATE TABLE DCValue
(
dc_value_id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
dc_type_id INTEGER REFERENCES DCTypeRegistry(dc_type_id),
text_value VARCHAR2(2000),
text_lang VARCHAR2(24),
place INTEGER,
source_id INTEGER
);
-- An index for item_id - almost all access is based on
-- instantiating the item object, which grabs all dcvalues
-- related to that item
CREATE INDEX dcvalue_item_idx on DCValue(item_id);
-------------------------------------------------------
-- Community table
-------------------------------------------------------
CREATE TABLE Community
(
community_id INTEGER PRIMARY KEY,
name VARCHAR2(128) UNIQUE,
short_description VARCHAR2(512),
introductory_text VARCHAR2(2000),
logo_bitstream_id INTEGER REFERENCES Bitstream(bitstream_id),
copyright_text VARCHAR2(2000),
side_bar_text VARCHAR2(2000)
);
-------------------------------------------------------
-- Collection table
-------------------------------------------------------
CREATE TABLE Collection
(
collection_id INTEGER PRIMARY KEY,
name VARCHAR2(128),
short_description VARCHAR2(512),
introductory_text VARCHAR2(2000),
logo_bitstream_id INTEGER REFERENCES Bitstream(bitstream_id),
template_item_id INTEGER REFERENCES Item(item_id),
provenance_description VARCHAR2(2000),
license VARCHAR2(2000),
copyright_text VARCHAR2(2000),
side_bar_text VARCHAR2(2000),
workflow_step_1 INTEGER REFERENCES EPersonGroup( eperson_group_id ),
workflow_step_2 INTEGER REFERENCES EPersonGroup( eperson_group_id ),
workflow_step_3 INTEGER REFERENCES EPersonGroup( eperson_group_id )
);
-------------------------------------------------------
-- Community2Community table
-------------------------------------------------------
CREATE TABLE Community2Community
(
id INTEGER PRIMARY KEY,
parent_comm_id INTEGER REFERENCES Community(community_id),
child_comm_id INTEGER REFERENCES Community(community_id)
);
-------------------------------------------------------
-- Community2Collection table
-------------------------------------------------------
CREATE TABLE Community2Collection
(
id INTEGER PRIMARY KEY,
community_id INTEGER REFERENCES Community(community_id),
collection_id INTEGER REFERENCES Collection(collection_id)
);
-------------------------------------------------------
-- Collection2Item table
-------------------------------------------------------
CREATE TABLE Collection2Item
(
id INTEGER PRIMARY KEY,
collection_id INTEGER REFERENCES Collection(collection_id),
item_id INTEGER REFERENCES Item(item_id)
);
-- index by collection_id
CREATE INDEX collection2item_collection_idx ON Collection2Item(collection_id);
-------------------------------------------------------
-- ResourcePolicy table
-------------------------------------------------------
CREATE TABLE ResourcePolicy
(
policy_id INTEGER PRIMARY KEY,
resource_type_id INTEGER,
resource_id INTEGER,
action_id INTEGER,
eperson_id INTEGER REFERENCES EPerson(eperson_id),
epersongroup_id INTEGER REFERENCES EPersonGroup(eperson_group_id),
start_date DATE,
end_date DATE
);
-- index by resource_type,resource_id - all queries by
-- authorization manager are select type=x, id=y, action=z
CREATE INDEX resourcepolicy_type_id_idx ON ResourcePolicy(resource_type_id,resource_id);
-------------------------------------------------------
-- EPersonGroup2EPerson table
-------------------------------------------------------
CREATE TABLE EPersonGroup2EPerson
(
id INTEGER PRIMARY KEY,
eperson_group_id INTEGER REFERENCES EPersonGroup(eperson_group_id),
eperson_id INTEGER REFERENCES EPerson(eperson_id)
);
-- Index by group ID (used heavily by AuthorizeManager)
CREATE INDEX epersongroup2eperson_group_idx on EPersonGroup2EPerson(eperson_group_id);
-------------------------------------------------------
-- Handle table
-------------------------------------------------------
CREATE TABLE Handle
(
handle_id INTEGER PRIMARY KEY,
handle VARCHAR2(256) UNIQUE,
resource_type_id INTEGER,
resource_id INTEGER
);
-------------------------------------------------------
-- WorkspaceItem table
-------------------------------------------------------
CREATE TABLE WorkspaceItem
(
workspace_item_id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
collection_id INTEGER REFERENCES Collection(collection_id),
-- Answers to questions on first page of submit UI
multiple_titles NUMBER(1), -- boolean
published_before NUMBER(1),
multiple_files NUMBER(1),
-- How for the user has got in the submit process
stage_reached INTEGER
);
-------------------------------------------------------
-- WorkflowItem table
-------------------------------------------------------
CREATE TABLE WorkflowItem
(
workflow_id INTEGER PRIMARY KEY,
item_id INTEGER UNIQUE REFERENCES Item(item_id),
collection_id INTEGER REFERENCES Collection(collection_id),
state INTEGER,
owner INTEGER REFERENCES EPerson(eperson_id),
-- Answers to questions on first page of submit UI
multiple_titles NUMBER(1),
published_before NUMBER(1),
multiple_files NUMBER(1)
-- Note: stage reached not applicable here - people involved in workflow
-- can always jump around submission UI
);
-------------------------------------------------------
-- TasklistItem table
-------------------------------------------------------
CREATE TABLE TasklistItem
(
tasklist_id INTEGER PRIMARY KEY,
eperson_id INTEGER REFERENCES EPerson(eperson_id),
workflow_id INTEGER REFERENCES WorkflowItem(workflow_id)
);
-------------------------------------------------------
-- RegistrationData table
-------------------------------------------------------
CREATE TABLE RegistrationData
(
registrationdata_id INTEGER PRIMARY KEY,
email VARCHAR2(64) UNIQUE,
token VARCHAR2(48),
expires TIMESTAMP
);
-------------------------------------------------------
-- Subscription table
-------------------------------------------------------
CREATE TABLE Subscription
(
subscription_id INTEGER PRIMARY KEY,
eperson_id INTEGER REFERENCES EPerson(eperson_id),
collection_id INTEGER REFERENCES Collection(collection_id)
);
-------------------------------------------------------
-- History table
-------------------------------------------------------
CREATE TABLE History
(
history_id INTEGER PRIMARY KEY,
-- When it was stored
creation_date TIMESTAMP,
-- A checksum to keep INTEGERizations from being stored more than once
checksum VARCHAR2(32) UNIQUE
);
-------------------------------------------------------
-- HistoryState table
-------------------------------------------------------
CREATE TABLE HistoryState
(
history_state_id INTEGER PRIMARY KEY,
object_id VARCHAR2(64)
);
------------------------------------------------------------
-- Browse subsystem tables and views
------------------------------------------------------------
-------------------------------------------------------
-- Communities2Item table
-------------------------------------------------------
CREATE TABLE Communities2Item
(
id INTEGER PRIMARY KEY,
community_id INTEGER REFERENCES Community(community_id),
item_id INTEGER REFERENCES Item(item_id)
);
-------------------------------------------------------
-- Community2Item view
------------------------------------------------------
CREATE VIEW Community2Item as
SELECT Community2Collection.community_id, Collection2Item.item_id
FROM Community2Collection, Collection2Item
WHERE Collection2Item.collection_id = Community2Collection.collection_id
;
-------------------------------------------------------
-- ItemsByAuthor table
-------------------------------------------------------
CREATE TABLE ItemsByAuthor
(
items_by_author_id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
author VARCHAR2(2000),
sort_author VARCHAR2(2000)
);
-- index by sort_author, of course!
CREATE INDEX sort_author_idx on ItemsByAuthor(sort_author);
-------------------------------------------------------
-- CollectionItemsByAuthor view
-------------------------------------------------------
CREATE VIEW CollectionItemsByAuthor as
SELECT Collection2Item.collection_id, ItemsByAuthor.*
FROM ItemsByAuthor, Collection2Item
WHERE ItemsByAuthor.item_id = Collection2Item.item_id
;
-------------------------------------------------------
-- CommunityItemsByAuthor view
-------------------------------------------------------
CREATE VIEW CommunityItemsByAuthor as
SELECT Communities2Item.community_id, ItemsByAuthor.*
FROM ItemsByAuthor, Communities2Item
WHERE ItemsByAuthor.item_id = Communities2Item.item_id
;
----------------------------------------
-- ItemsByTitle table
----------------------------------------
CREATE TABLE ItemsByTitle
(
items_by_title_id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
title VARCHAR2(2000),
sort_title VARCHAR2(2000)
);
-- index by the sort_title
CREATE INDEX sort_title_idx on ItemsByTitle(sort_title);
-------------------------------------------------------
-- CollectionItemsByTitle view
-------------------------------------------------------
CREATE VIEW CollectionItemsByTitle as
SELECT Collection2Item.collection_id, ItemsByTitle.*
FROM ItemsByTitle, Collection2Item
WHERE ItemsByTitle.item_id = Collection2Item.item_id
;
-------------------------------------------------------
-- CommunityItemsByTitle view
-------------------------------------------------------
CREATE VIEW CommunityItemsByTitle as
SELECT Communities2Item.community_id, ItemsByTitle.*
FROM ItemsByTitle, Communities2Item
WHERE ItemsByTitle.item_id = Communities2Item.item_id
;
-------------------------------------------------------
-- ItemsByDate table
-------------------------------------------------------
CREATE TABLE ItemsByDate
(
items_by_date_id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
date_issued VARCHAR2(2000)
);
-- sort by date
CREATE INDEX date_issued_idx on ItemsByDate(date_issued);
-------------------------------------------------------
-- CollectionItemsByDate view
-------------------------------------------------------
CREATE VIEW CollectionItemsByDate as
SELECT Collection2Item.collection_id, ItemsByDate.*
FROM ItemsByDate, Collection2Item
WHERE ItemsByDate.item_id = Collection2Item.item_id
;
-------------------------------------------------------
-- CommunityItemsByDate view
-------------------------------------------------------
CREATE VIEW CommunityItemsByDate as
SELECT Communities2Item.community_id, ItemsByDate.*
FROM ItemsByDate, Communities2Item
WHERE ItemsByDate.item_id = Communities2Item.item_id
;
-------------------------------------------------------
-- ItemsByDateAccessioned table
-------------------------------------------------------
CREATE TABLE ItemsByDateAccessioned
(
items_by_date_accessioned_id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
date_accessioned VARCHAR2(2000)
);
-------------------------------------------------------
-- CollectionItemsByDateAccession view
-------------------------------------------------------
CREATE VIEW CollectionItemsByDateAccession as
SELECT Collection2Item.collection_id, ItemsByDateAccessioned.*
FROM ItemsByDateAccessioned, Collection2Item
WHERE ItemsByDateAccessioned.item_id = Collection2Item.item_id
;
-------------------------------------------------------
-- CommunityItemsByDateAccession view
-------------------------------------------------------
CREATE VIEW CommunityItemsByDateAccession as
SELECT Communities2Item.community_id, ItemsByDateAccessioned.*
FROM ItemsByDateAccessioned, Communities2Item
WHERE ItemsByDateAccessioned.item_id = Communities2Item.item_id
;

View File

@@ -1,57 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
CREATE SEQUENCE epersongroup2workspaceitem_seq;
-------------------------------------------------------------------------------
-- create the new EPersonGroup2WorkspaceItem table
-------------------------------------------------------------------------------
CREATE TABLE EPersonGroup2WorkspaceItem
(
id INTEGER PRIMARY KEY,
eperson_group_id INTEGER REFERENCES EPersonGroup(eperson_group_id),
workspace_item_id INTEGER REFERENCES WorkspaceItem(workspace_item_id)
);
-------------------------------------------------------------------------------
-- modification to collection table to support being able to change the
-- submitter and collection admin group names
-------------------------------------------------------------------------------
ALTER TABLE collection ADD submitter INTEGER REFERENCES EPersonGroup(eperson_group_id);
ALTER TABLE collection ADD admin INTEGER REFERENCES EPersonGroup(eperson_group_id);
ALTER TABLE eperson ADD netid VARCHAR2(64) UNIQUE;
-------------------------------------------------------------------------------
-- Additional indices for performance
-------------------------------------------------------------------------------
-- index by resource id and resource type id
CREATE INDEX handle_resource_id_type_idx ON handle(resource_id, resource_type_id);
-- Indexing browse tables update/re-index performance
CREATE INDEX Communities2Item_item_id_idx ON Communities2Item( item_id );
CREATE INDEX ItemsByAuthor_item_id_idx ON ItemsByAuthor(item_id);
CREATE INDEX ItemsByTitle_item_id_idx ON ItemsByTitle(item_id);
CREATE INDEX ItemsByDate_item_id_idx ON ItemsByDate(item_id);
CREATE INDEX ItemsByDateAcc_item_id_idx ON ItemsByDateAccessioned(item_id);
-- Improve mapping tables
CREATE INDEX Com2Coll_community_id_idx ON Community2Collection(community_id);
CREATE INDEX Com2Coll_collection_id_idx ON Community2Collection(collection_id);
CREATE INDEX Coll2Item_item_id_idx ON Collection2Item( item_id );

View File

@@ -1,133 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
---------------------------------------
-- Update MetadataValue to include CLOB
---------------------------------------
CREATE TABLE MetadataValueTemp
(
metadata_value_id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
metadata_field_id INTEGER REFERENCES MetadataFieldRegistry(metadata_field_id),
text_value CLOB,
text_lang VARCHAR(64),
place INTEGER
);
INSERT INTO MetadataValueTemp
SELECT * FROM MetadataValue;
DROP VIEW dcvalue;
DROP TABLE MetadataValue;
ALTER TABLE MetadataValueTemp RENAME TO MetadataValue;
CREATE VIEW dcvalue AS
SELECT MetadataValue.metadata_value_id AS "dc_value_id", MetadataValue.item_id,
MetadataValue.metadata_field_id AS "dc_type_id", MetadataValue.text_value,
MetadataValue.text_lang, MetadataValue.place
FROM MetadataValue, MetadataFieldRegistry
WHERE MetadataValue.metadata_field_id = MetadataFieldRegistry.metadata_field_id
AND MetadataFieldRegistry.metadata_schema_id = 1;
CREATE INDEX metadatavalue_item_idx ON MetadataValue(item_id);
CREATE INDEX metadatavalue_item_idx2 ON MetadataValue(item_id,metadata_field_id);
------------------------------------
-- Update Community to include CLOBs
------------------------------------
CREATE TABLE CommunityTemp
(
community_id INTEGER PRIMARY KEY,
name VARCHAR2(128),
short_description VARCHAR2(512),
introductory_text CLOB,
logo_bitstream_id INTEGER REFERENCES Bitstream(bitstream_id),
copyright_text CLOB,
side_bar_text VARCHAR2(2000)
);
INSERT INTO CommunityTemp
SELECT * FROM Community;
DROP TABLE Community CASCADE CONSTRAINTS;
ALTER TABLE CommunityTemp RENAME TO Community;
ALTER TABLE Community2Community ADD CONSTRAINT fk_c2c_parent
FOREIGN KEY (parent_comm_id)
REFERENCES Community (community_id);
ALTER TABLE Community2Community ADD CONSTRAINT fk_c2c_child
FOREIGN KEY (child_comm_id)
REFERENCES Community (community_id);
ALTER TABLE Community2Collection ADD CONSTRAINT fk_c2c_community
FOREIGN KEY (community_id)
REFERENCES Community (community_id);
ALTER TABLE Communities2Item ADD CONSTRAINT fk_c2i_community
FOREIGN KEY (community_id)
REFERENCES Community (community_id);
-------------------------------------
-- Update Collection to include CLOBs
-------------------------------------
CREATE TABLE CollectionTemp
(
collection_id INTEGER PRIMARY KEY,
name VARCHAR2(128),
short_description VARCHAR2(512),
introductory_text CLOB,
logo_bitstream_id INTEGER REFERENCES Bitstream(bitstream_id),
template_item_id INTEGER REFERENCES Item(item_id),
provenance_description VARCHAR2(2000),
license CLOB,
copyright_text CLOB,
side_bar_text VARCHAR2(2000),
workflow_step_1 INTEGER REFERENCES EPersonGroup( eperson_group_id ),
workflow_step_2 INTEGER REFERENCES EPersonGroup( eperson_group_id ),
workflow_step_3 INTEGER REFERENCES EPersonGroup( eperson_group_id ),
submitter INTEGER REFERENCES EPersonGroup( eperson_group_id ),
admin INTEGER REFERENCES EPersonGroup( eperson_group_id )
);
INSERT INTO CollectionTemp
SELECT * FROM Collection;
DROP TABLE Collection CASCADE CONSTRAINTS;
ALTER TABLE CollectionTemp RENAME TO Collection;
ALTER TABLE Community2Collection ADD CONSTRAINT fk_c2c_collection
FOREIGN KEY (collection_id)
REFERENCES Collection (collection_id);
ALTER TABLE Collection2Item ADD CONSTRAINT fk_c2i_collection
FOREIGN KEY (collection_id)
REFERENCES Collection (collection_id);
ALTER TABLE WorkspaceItem ADD CONSTRAINT fk_wsi_collection
FOREIGN KEY (collection_id)
REFERENCES Collection (collection_id);
ALTER TABLE WorkflowItem ADD CONSTRAINT fk_wfi_collection
FOREIGN KEY (collection_id)
REFERENCES Collection (collection_id);
ALTER TABLE Subscription ADD CONSTRAINT fk_subs_collection
FOREIGN KEY (collection_id)
REFERENCES Collection (collection_id);

View File

@@ -1,371 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
-------------------------------------------------------------------------------
-- Sequences for Group within Group feature
-------------------------------------------------------------------------------
CREATE SEQUENCE group2group_seq;
CREATE SEQUENCE group2groupcache_seq;
------------------------------------------------------
-- Group2Group table, records group membership in other groups
------------------------------------------------------
CREATE TABLE Group2Group
(
id INTEGER PRIMARY KEY,
parent_id INTEGER REFERENCES EPersonGroup(eperson_group_id),
child_id INTEGER REFERENCES EPersonGroup(eperson_group_id)
);
------------------------------------------------------
-- Group2GroupCache table, is the 'unwound' hierarchy in
-- Group2Group. It explicitly names every parent child
-- relationship, even with nested groups. For example,
-- If Group2Group lists B is a child of A and C is a child of B,
-- this table will have entries for parent(A,B), and parent(B,C)
-- AND parent(A,C) so that all of the child groups of A can be
-- looked up in a single simple query
------------------------------------------------------
CREATE TABLE Group2GroupCache
(
id INTEGER PRIMARY KEY,
parent_id INTEGER REFERENCES EPersonGroup(eperson_group_id),
child_id INTEGER REFERENCES EPersonGroup(eperson_group_id)
);
-------------------------------------------------------
-- New Metadata Tables and Sequences
-------------------------------------------------------
CREATE SEQUENCE metadataschemaregistry_seq;
CREATE SEQUENCE metadatafieldregistry_seq;
CREATE SEQUENCE metadatavalue_seq;
-- MetadataSchemaRegistry table
CREATE TABLE MetadataSchemaRegistry
(
metadata_schema_id INTEGER PRIMARY KEY,
namespace VARCHAR(256) UNIQUE,
short_id VARCHAR(32)
);
-- MetadataFieldRegistry table
CREATE TABLE MetadataFieldRegistry
(
metadata_field_id INTEGER PRIMARY KEY,
metadata_schema_id INTEGER NOT NULL REFERENCES MetadataSchemaRegistry(metadata_schema_id),
element VARCHAR(64),
qualifier VARCHAR(64),
scope_note VARCHAR2(2000)
);
-- MetadataValue table
CREATE TABLE MetadataValue
(
metadata_value_id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
metadata_field_id INTEGER REFERENCES MetadataFieldRegistry(metadata_field_id),
text_value VARCHAR2(2000),
text_lang VARCHAR(24),
place INTEGER
);
-- Create the DC schema
INSERT INTO MetadataSchemaRegistry VALUES (1,'http://dublincore.org/documents/dcmi-terms/','dc');
-- Migrate the existing DCTypes into the new metadata field registry
INSERT INTO MetadataFieldRegistry
(metadata_schema_id, metadata_field_id, element, qualifier, scope_note)
SELECT '1' AS metadata_schema_id, dc_type_id, element,
qualifier, scope_note FROM dctyperegistry;
-- Copy the DCValues into the new MetadataValue table
INSERT INTO MetadataValue (item_id, metadata_field_id, text_value, text_lang, place)
SELECT item_id, dc_type_id, text_value, text_lang, place FROM dcvalue;
DROP TABLE dcvalue;
CREATE VIEW dcvalue AS
SELECT MetadataValue.metadata_value_id AS "dc_value_id", MetadataValue.item_id,
MetadataValue.metadata_field_id AS "dc_type_id", MetadataValue.text_value,
MetadataValue.text_lang, MetadataValue.place
FROM MetadataValue, MetadataFieldRegistry
WHERE MetadataValue.metadata_field_id = MetadataFieldRegistry.metadata_field_id
AND MetadataFieldRegistry.metadata_schema_id = 1;
-- After copying data from dctypregistry to metadataschemaregistry, we need to reset our sequences
-- Update metadatafieldregistry_seq to new max value
DECLARE
curr NUMBER := 0;
BEGIN
SELECT max(metadata_field_id) INTO curr FROM metadatafieldregistry;
curr := curr + 1;
EXECUTE IMMEDIATE 'DROP SEQUENCE metadatafieldregistry_seq';
EXECUTE IMMEDIATE 'CREATE SEQUENCE metadatafieldregistry_seq START WITH ' || NVL(curr,1);
END;
/
-- Update metadatavalue_seq to new max value
DECLARE
curr NUMBER := 0;
BEGIN
SELECT max(metadata_value_id) INTO curr FROM metadatavalue;
curr := curr + 1;
EXECUTE IMMEDIATE 'DROP SEQUENCE metadatavalue_seq';
EXECUTE IMMEDIATE 'CREATE SEQUENCE metadatavalue_seq START WITH ' || NVL(curr,1);
END;
/
-- Update metadataschemaregistry_seq to new max value
DECLARE
curr NUMBER := 0;
BEGIN
SELECT max(metadata_schema_id) INTO curr FROM metadataschemaregistry;
curr := curr + 1;
EXECUTE IMMEDIATE 'DROP SEQUENCE metadataschemaregistry_seq';
EXECUTE IMMEDIATE 'CREATE SEQUENCE metadataschemaregistry_seq START WITH ' || NVL(curr,1);
END;
/
-- Drop the old dctyperegistry
DROP TABLE dctyperegistry;
-- create indexes for the metadata tables
CREATE INDEX metadatavalue_item_idx ON MetadataValue(item_id);
CREATE INDEX metadatavalue_item_idx2 ON MetadataValue(item_id,metadata_field_id);
CREATE INDEX metadatafield_schema_idx ON MetadataFieldRegistry(metadata_schema_id);
-------------------------------------------------------
-- Create the checksum checker tables
-------------------------------------------------------
-- list of the possible results as determined
-- by the system or an administrator
CREATE TABLE checksum_results
(
result_code VARCHAR(64) PRIMARY KEY,
result_description VARCHAR2(2000)
);
-- This table has a one-to-one relationship
-- with the bitstream table. A row will be inserted
-- every time a row is inserted into the bitstream table, and
-- that row will be updated every time the checksum is
-- re-calculated.
CREATE TABLE most_recent_checksum
(
bitstream_id INTEGER PRIMARY KEY,
to_be_processed NUMBER(1) NOT NULL,
expected_checksum VARCHAR(64) NOT NULL,
current_checksum VARCHAR(64) NOT NULL,
last_process_start_date TIMESTAMP NOT NULL,
last_process_end_date TIMESTAMP NOT NULL,
checksum_algorithm VARCHAR(64) NOT NULL,
matched_prev_checksum NUMBER(1) NOT NULL,
result VARCHAR(64) REFERENCES checksum_results(result_code)
);
-- A row will be inserted into this table every
-- time a checksum is re-calculated.
CREATE SEQUENCE checksum_history_seq;
CREATE TABLE checksum_history
(
check_id INTEGER PRIMARY KEY,
bitstream_id INTEGER,
process_start_date TIMESTAMP,
process_end_date TIMESTAMP,
checksum_expected VARCHAR(64),
checksum_calculated VARCHAR(64),
result VARCHAR(64) REFERENCES checksum_results(result_code)
);
-- this will insert into the result code
-- the initial results
insert into checksum_results
values
(
'INVALID_HISTORY',
'Install of the cheksum checking code do not consider this history as valid'
);
insert into checksum_results
values
(
'BITSTREAM_NOT_FOUND',
'The bitstream could not be found'
);
insert into checksum_results
values
(
'CHECKSUM_MATCH',
'Current checksum matched previous checksum'
);
insert into checksum_results
values
(
'CHECKSUM_NO_MATCH',
'Current checksum does not match previous checksum'
);
insert into checksum_results
values
(
'CHECKSUM_PREV_NOT_FOUND',
'Previous checksum was not found: no comparison possible'
);
insert into checksum_results
values
(
'BITSTREAM_INFO_NOT_FOUND',
'Bitstream info not found'
);
insert into checksum_results
values
(
'CHECKSUM_ALGORITHM_INVALID',
'Invalid checksum algorithm'
);
insert into checksum_results
values
(
'BITSTREAM_NOT_PROCESSED',
'Bitstream marked to_be_processed=false'
);
insert into checksum_results
values
(
'BITSTREAM_MARKED_DELETED',
'Bitstream marked deleted in bitstream table'
);
-- this will insert into the most recent checksum
-- on install all existing bitstreams
-- setting all bitstreams already set as
-- deleted to not be processed
insert into most_recent_checksum
(
bitstream_id,
to_be_processed,
expected_checksum,
current_checksum,
last_process_start_date,
last_process_end_date,
checksum_algorithm,
matched_prev_checksum
)
select
bitstream.bitstream_id,
'1',
CASE WHEN bitstream.checksum IS NULL THEN '' ELSE bitstream.checksum END,
CASE WHEN bitstream.checksum IS NULL THEN '' ELSE bitstream.checksum END,
TO_TIMESTAMP(TO_CHAR(current_timestamp, 'DD-MM-RRRR HH24:MI:SS'), 'DD-MM-RRRR HH24:MI:SS'),
TO_TIMESTAMP(TO_CHAR(current_timestamp, 'DD-MM-RRRR HH24:MI:SS'), 'DD-MM-RRRR HH24:MI:SS'),
CASE WHEN bitstream.checksum_algorithm IS NULL THEN 'MD5' ELSE bitstream.checksum_algorithm END,
'1'
from bitstream;
-- Update all the deleted checksums
-- to not be checked
-- because they have since been
-- deleted from the system
update most_recent_checksum
set to_be_processed = 0
where most_recent_checksum.bitstream_id in (
select bitstream_id
from bitstream where deleted = '1' );
-- this will insert into history table
-- for the initial start
-- we want to tell the users to disregard the initial
-- inserts into the checksum history table
insert into checksum_history
(
bitstream_id,
process_start_date,
process_end_date,
checksum_expected,
checksum_calculated
)
select most_recent_checksum.bitstream_id,
most_recent_checksum.last_process_end_date,
TO_TIMESTAMP(TO_CHAR(current_timestamp, 'DD-MM-RRRR HH24:MI:SS'), 'DD-MM-RRRR HH24:MI:SS'),
most_recent_checksum.expected_checksum,
most_recent_checksum.expected_checksum
FROM most_recent_checksum;
-- update the history to indicate that this was
-- the first time the software was installed
update checksum_history
set result = 'INVALID_HISTORY';
-------------------------------------------------------
-- Table and views for 'browse by subject' functionality
-------------------------------------------------------
CREATE SEQUENCE itemsbysubject_seq;
-------------------------------------------------------
-- ItemsBySubject table
-------------------------------------------------------
CREATE TABLE ItemsBySubject
(
items_by_subject_id INTEGER PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
subject VARCHAR2(2000),
sort_subject VARCHAR2(2000)
);
-- index by sort_subject
CREATE INDEX sort_subject_idx on ItemsBySubject(sort_subject);
-------------------------------------------------------
-- CollectionItemsBySubject view
-------------------------------------------------------
CREATE VIEW CollectionItemsBySubject as
SELECT Collection2Item.collection_id, ItemsBySubject.*
FROM ItemsBySubject, Collection2Item
WHERE ItemsBySubject.item_id = Collection2Item.item_id
;
-------------------------------------------------------
-- CommunityItemsBySubject view
-------------------------------------------------------
CREATE VIEW CommunityItemsBySubject as
SELECT Communities2Item.community_id, ItemsBySubject.*
FROM ItemsBySubject, Communities2Item
WHERE ItemsBySubject.item_id = Communities2Item.item_id
;

View File

@@ -1,142 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
-- Remove NOT NULL restrictions from the checksum columns of most_recent_checksum
ALTER TABLE most_recent_checksum MODIFY expected_checksum null;
ALTER TABLE most_recent_checksum MODIFY current_checksum null;
------------------------------------------------------
-- New Column language language in EPerson
------------------------------------------------------
alter table eperson ADD language VARCHAR2(64);
update eperson set language = 'en';
-- totally unused column
alter table bundle drop column mets_bitstream_id;
-------------------------------------------------------------------------------
-- Necessary for Configurable Submission functionality:
-- Modification to workspaceitem table to support keeping track
-- of the last page reached within a step in the Configurable Submission Process
-------------------------------------------------------------------------------
ALTER TABLE workspaceitem ADD page_reached INTEGER;
-------------------------------------------------------------------------
-- Increase the mimetype field size to support larger types, such as the
-- new Word 2007 mimetypes.
-------------------------------------------------------------------------
ALTER TABLE BitstreamFormatRegistry MODIFY (mimetype VARCHAR(256));
-------------------------------------------------------------------------
-- Tables to manage cache of item counts for communities and collections
-------------------------------------------------------------------------
CREATE TABLE collection_item_count (
collection_id INTEGER PRIMARY KEY REFERENCES collection(collection_id),
count INTEGER
);
CREATE TABLE community_item_count (
community_id INTEGER PRIMARY KEY REFERENCES community(community_id),
count INTEGER
);
------------------------------------------------------------------
-- Remove sequences and tables of the old browse system
------------------------------------------------------------------
DROP SEQUENCE itemsbyauthor_seq;
DROP SEQUENCE itemsbytitle_seq;
DROP SEQUENCE itemsbydate_seq;
DROP SEQUENCE itemsbydateaccessioned_seq;
DROP SEQUENCE itemsbysubject_seq;
DROP TABLE ItemsByAuthor CASCADE CONSTRAINTS;
DROP TABLE ItemsByTitle CASCADE CONSTRAINTS;
DROP TABLE ItemsByDate CASCADE CONSTRAINTS;
DROP TABLE ItemsByDateAccessioned CASCADE CONSTRAINTS;
DROP TABLE ItemsBySubject CASCADE CONSTRAINTS;
DROP TABLE History CASCADE CONSTRAINTS;
DROP TABLE HistoryState CASCADE CONSTRAINTS;
----------------------------------------------------------------
-- Add indexes for foreign key columns
----------------------------------------------------------------
CREATE INDEX fe_bitstream_fk_idx ON FileExtension(bitstream_format_id);
CREATE INDEX bit_bitstream_fk_idx ON Bitstream(bitstream_format_id);
CREATE INDEX g2g_parent_fk_idx ON Group2Group(parent_id);
CREATE INDEX g2g_child_fk_idx ON Group2Group(child_id);
-- CREATE INDEX g2gc_parent_fk_idx ON Group2Group(parent_id);
-- CREATE INDEX g2gc_child_fk_idx ON Group2Group(child_id);
CREATE INDEX item_submitter_fk_idx ON Item(submitter_id);
CREATE INDEX bundle_primary_fk_idx ON Bundle(primary_bitstream_id);
CREATE INDEX item2bundle_bundle_fk_idx ON Item2Bundle(bundle_id);
CREATE INDEX bundle2bits_bitstream_fk_idx ON Bundle2Bitstream(bitstream_id);
CREATE INDEX metadatavalue_field_fk_idx ON MetadataValue(metadata_field_id);
CREATE INDEX community_logo_fk_idx ON Community(logo_bitstream_id);
CREATE INDEX collection_logo_fk_idx ON Collection(logo_bitstream_id);
CREATE INDEX collection_template_fk_idx ON Collection(template_item_id);
CREATE INDEX collection_workflow1_fk_idx ON Collection(workflow_step_1);
CREATE INDEX collection_workflow2_fk_idx ON Collection(workflow_step_2);
CREATE INDEX collection_workflow3_fk_idx ON Collection(workflow_step_3);
CREATE INDEX collection_submitter_fk_idx ON Collection(submitter);
CREATE INDEX collection_admin_fk_idx ON Collection(admin);
CREATE INDEX com2com_parent_fk_idx ON Community2Community(parent_comm_id);
CREATE INDEX com2com_child_fk_idx ON Community2Community(child_comm_id);
CREATE INDEX rp_eperson_fk_idx ON ResourcePolicy(eperson_id);
CREATE INDEX rp_epersongroup_fk_idx ON ResourcePolicy(epersongroup_id);
CREATE INDEX epg2ep_eperson_fk_idx ON EPersonGroup2EPerson(eperson_id);
CREATE INDEX workspace_item_fk_idx ON WorkspaceItem(item_id);
CREATE INDEX workspace_coll_fk_idx ON WorkspaceItem(collection_id);
-- CREATE INDEX workflow_item_fk_idx ON WorkflowItem(item_id);
CREATE INDEX workflow_coll_fk_idx ON WorkflowItem(collection_id);
CREATE INDEX workflow_owner_fk_idx ON WorkflowItem(owner);
CREATE INDEX tasklist_eperson_fk_idx ON TasklistItem(eperson_id);
CREATE INDEX tasklist_workflow_fk_idx ON TasklistItem(workflow_id);
CREATE INDEX subs_eperson_fk_idx ON Subscription(eperson_id);
CREATE INDEX subs_collection_fk_idx ON Subscription(collection_id);
CREATE INDEX epg2wi_group_fk_idx ON epersongroup2workspaceitem(eperson_group_id);
CREATE INDEX epg2wi_workspace_fk_idx ON epersongroup2workspaceitem(workspace_item_id);
CREATE INDEX Comm2Item_community_fk_idx ON Communities2Item( community_id );
CREATE INDEX mrc_result_fk_idx ON most_recent_checksum( result );
CREATE INDEX ch_result_fk_idx ON checksum_history( result );

View File

@@ -1,93 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
------------------------------------------------------------------
-- New Column for Community Admin - Delegated Admin patch (DS-228)
------------------------------------------------------------------
ALTER TABLE community ADD admin INTEGER REFERENCES epersongroup ( eperson_group_id );
CREATE INDEX community_admin_fk_idx ON Community(admin);
-------------------------------------------------------------------------
-- DS-236 schema changes for Authority Control of Metadata Values
-------------------------------------------------------------------------
ALTER TABLE MetadataValue
ADD ( authority VARCHAR(100),
confidence INTEGER DEFAULT -1);
--------------------------------------------------------------------------
-- DS-295 CC License being assigned incorrect Mime Type during submission.
--------------------------------------------------------------------------
UPDATE bitstream SET bitstream_format_id =
(SELECT bitstream_format_id FROM bitstreamformatregistry WHERE short_description = 'CC License')
WHERE name = 'license_text' AND source = 'org.dspace.license.CreativeCommons';
UPDATE bitstream SET bitstream_format_id =
(SELECT bitstream_format_id FROM bitstreamformatregistry WHERE short_description = 'RDF XML')
WHERE name = 'license_rdf' AND source = 'org.dspace.license.CreativeCommons';
-------------------------------------------------------------------------
-- DS-260 Cleanup of Owning collection column for template item created
-- with the JSPUI after the collection creation
-------------------------------------------------------------------------
UPDATE item SET owning_collection = null WHERE item_id IN
(SELECT template_item_id FROM collection WHERE template_item_id IS NOT null);
-- Recreate restraints with a know name and deferrable option!
-- (The previous version of these constraints is dropped by org.dspace.storage.rdbms.migration.V1_5_9__Drop_constraint_for_DSpace_1_6_schema)
ALTER TABLE community2collection ADD CONSTRAINT comm2coll_collection_fk FOREIGN KEY (collection_id) REFERENCES collection DEFERRABLE;
ALTER TABLE community2community ADD CONSTRAINT com2com_child_fk FOREIGN KEY (child_comm_id) REFERENCES community DEFERRABLE;
ALTER TABLE collection2item ADD CONSTRAINT coll2item_item_fk FOREIGN KEY (item_id) REFERENCES item DEFERRABLE;
------------------------------------------------------------------
-- New tables /sequences for the harvester functionality (DS-289)
------------------------------------------------------------------
CREATE SEQUENCE harvested_collection_seq;
CREATE SEQUENCE harvested_item_seq;
-------------------------------------------------------
-- Create the harvest settings table
-------------------------------------------------------
-- Values used by the OAIHarvester to harvest a collection
-- HarvestInstance is the DAO class for this table
CREATE TABLE harvested_collection
(
collection_id INTEGER REFERENCES collection(collection_id) ON DELETE CASCADE,
harvest_type INTEGER,
oai_source VARCHAR(256),
oai_set_id VARCHAR(256),
harvest_message VARCHAR2(512),
metadata_config_id VARCHAR(256),
harvest_status INTEGER,
harvest_start_time TIMESTAMP,
last_harvested TIMESTAMP,
id INTEGER PRIMARY KEY
);
CREATE INDEX harvested_collection_fk_idx ON harvested_collection(collection_id);
CREATE TABLE harvested_item
(
item_id INTEGER REFERENCES item(item_id) ON DELETE CASCADE,
last_harvested TIMESTAMP,
oai_id VARCHAR(64),
id INTEGER PRIMARY KEY
);
CREATE INDEX harvested_item_fk_idx ON harvested_item(item_id);

View File

@@ -1,20 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
------------------------------------------------------------------
-- Remove unused / obsolete sequence 'dctyperegistry_seq' (DS-729)
------------------------------------------------------------------
DROP SEQUENCE dctyperegistry_seq;

View File

@@ -1,23 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
-------------------------------------------
-- New column for bitstream order DS-749 --
-------------------------------------------
ALTER TABLE bundle2bitstream ADD bitstream_order INTEGER;
--Place the sequence id's in the order
UPDATE bundle2bitstream SET bitstream_order=(SELECT sequence_id FROM bitstream WHERE bitstream.bitstream_id=bundle2bitstream.bitstream_id);

View File

@@ -1,52 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
ALTER TABLE resourcepolicy
ADD (
rpname VARCHAR2(30),
rptype VARCHAR2(30),
rpdescription VARCHAR2(100)
);
ALTER TABLE item ADD discoverable NUMBER(1);
CREATE TABLE versionhistory
(
versionhistory_id INTEGER NOT NULL PRIMARY KEY
);
CREATE TABLE versionitem
(
versionitem_id INTEGER NOT NULL PRIMARY KEY,
item_id INTEGER REFERENCES Item(item_id),
version_number INTEGER,
eperson_id INTEGER REFERENCES EPerson(eperson_id),
version_date TIMESTAMP,
version_summary VARCHAR2(255),
versionhistory_id INTEGER REFERENCES VersionHistory(versionhistory_id)
);
CREATE SEQUENCE versionitem_seq;
CREATE SEQUENCE versionhistory_seq;
-------------------------------------------
-- New columns and longer hash for salted password hashing DS-861 --
-------------------------------------------
ALTER TABLE EPerson modify( password VARCHAR(128));
ALTER TABLE EPerson ADD salt VARCHAR(32);
ALTER TABLE EPerson ADD digest_algorithm VARCHAR(16);

View File

@@ -1,88 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
-------------------------------------------
-- Ensure that discoverable has a sensible default
-------------------------------------------
update item set discoverable=1 WHERE discoverable IS NULL;
-------------------------------------------
-- Add support for DOIs (table and seq.) --
-------------------------------------------
CREATE TABLE Doi
(
doi_id INTEGER PRIMARY KEY,
doi VARCHAR2(256) UNIQUE,
resource_type_id INTEGER,
resource_id INTEGER,
status INTEGER
);
CREATE SEQUENCE doi_seq;
-- index by resource id and resource type id
CREATE INDEX doi_resource_id_type_idx ON doi(resource_id, resource_type_id);
-------------------------------------------
-- Table of running web applications for 'dspace version' --
-------------------------------------------
CREATE TABLE Webapp
(
webapp_id INTEGER NOT NULL PRIMARY KEY,
AppName VARCHAR2(32),
URL VARCHAR2(1000),
Started TIMESTAMP,
isUI NUMBER(1)
);
CREATE SEQUENCE webapp_seq;
-------------------------------------------------------
-- DS-824 RequestItem table
-------------------------------------------------------
CREATE TABLE requestitem
(
requestitem_id INTEGER NOT NULL,
token varchar(48),
item_id INTEGER,
bitstream_id INTEGER,
allfiles NUMBER(1),
request_email VARCHAR2(64),
request_name VARCHAR2(64),
request_date TIMESTAMP,
accept_request NUMBER(1),
decision_date TIMESTAMP,
expires TIMESTAMP,
CONSTRAINT requestitem_pkey PRIMARY KEY (requestitem_id),
CONSTRAINT requestitem_token_key UNIQUE (token)
);
CREATE SEQUENCE requestitem_seq;
-------------------------------------------------------
-- DS-1655 Disable "Initial Questions" page in Submission UI by default
-------------------------------------------------------
update workspaceitem set multiple_titles=1, published_before=1, multiple_files=1;
update workflowitem set multiple_titles=1, published_before=1, multiple_files=1;
-------------------------------------------------------
-- DS-1811 Removing a collection fails if non-Solr DAO has been used before for item count
-------------------------------------------------------
delete from collection_item_count;
delete from community_item_count;

View File

@@ -1,64 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
-- Special case of migration, we need to the EPerson schema in order to get our metadata for all queries to work
-- but we cannot a DB connection until our database is up to date, so we need to create our registries manually in sql
INSERT INTO metadataschemaregistry (metadata_schema_id, namespace, short_id) SELECT metadataschemaregistry_seq.nextval, 'http://dspace.org/eperson' as namespace, 'eperson' as short_id FROM dual
WHERE NOT EXISTS (SELECT metadata_schema_id,namespace,short_id FROM metadataschemaregistry WHERE namespace = 'http://dspace.org/eperson' AND short_id = 'eperson');
-- Insert eperson.firstname
INSERT INTO metadatafieldregistry (metadata_field_id, metadata_schema_id, element)
SELECT metadatafieldregistry_seq.nextval,
(SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='eperson'), 'firstname' FROM dual
WHERE NOT EXISTS
(SELECT metadata_field_id,element FROM metadatafieldregistry WHERE element = 'firstname' AND qualifier IS NULL AND metadata_schema_id = (SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='eperson'));
-- Insert eperson.lastname
INSERT INTO metadatafieldregistry (metadata_field_id, metadata_schema_id, element)
SELECT metadatafieldregistry_seq.nextval,
(SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='eperson'), 'lastname' FROM dual
WHERE NOT EXISTS
(SELECT metadata_field_id,element FROM metadatafieldregistry WHERE element = 'lastname' AND qualifier IS NULL AND metadata_schema_id = (SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='eperson'));
-- Insert eperson.phone
INSERT INTO metadatafieldregistry (metadata_field_id, metadata_schema_id, element)
SELECT metadatafieldregistry_seq.nextval,
(SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='eperson'), 'phone' FROM dual
WHERE NOT EXISTS
(SELECT metadata_field_id,element FROM metadatafieldregistry WHERE element = 'phone' AND qualifier IS NULL AND metadata_schema_id = (SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='eperson'));
-- Insert eperson.language
INSERT INTO metadatafieldregistry (metadata_field_id, metadata_schema_id, element)
SELECT metadatafieldregistry_seq.nextval,
(SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='eperson'), 'language' FROM dual
WHERE NOT EXISTS
(SELECT metadata_field_id,element FROM metadatafieldregistry WHERE element = 'language' AND qualifier IS NULL AND metadata_schema_id = (SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='eperson'));
-- Insert into dc.provenance
INSERT INTO metadatafieldregistry (metadata_field_id, metadata_schema_id, element)
SELECT metadatafieldregistry_seq.nextval,
(SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='dc'), 'provenance' FROM dual
WHERE NOT EXISTS
(SELECT metadata_field_id,element FROM metadatafieldregistry WHERE element = 'provenance' AND qualifier IS NULL AND metadata_schema_id = (SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='dc'));
-- Insert into dc.rights.license
INSERT INTO metadatafieldregistry (metadata_field_id, metadata_schema_id, element, qualifier)
SELECT metadatafieldregistry_seq.nextval,
(SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='dc'), 'rights', 'license' FROM dual
WHERE NOT EXISTS
(SELECT metadata_field_id,element,qualifier FROM metadatafieldregistry WHERE element = 'rights' AND qualifier='license' AND metadata_schema_id = (SELECT metadata_schema_id FROM metadataschemaregistry WHERE short_id='dc'));

View File

@@ -1,20 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
------------------------------------------------------
-- DS-1945 RequestItem Helpdesk, store request message
------------------------------------------------------
ALTER TABLE requestitem ADD request_message VARCHAR2(2000);

View File

@@ -1,333 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
-- ===============================================================
-- WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
--
-- DO NOT MANUALLY RUN THIS DATABASE MIGRATION. IT WILL BE EXECUTED
-- AUTOMATICALLY (IF NEEDED) BY "FLYWAY" WHEN YOU STARTUP DSPACE.
-- http://flywaydb.org/
-- ===============================================================
------------------------------------------------------
-- DS-1582 Metadata on all DSpace Objects
-- NOTE: This script also has a complimentary Flyway Java Migration
-- which drops the "item_id" constraint on metadatavalue
-- org.dspace.storage.rdbms.migration.V5_0_2014_09_25__DS_1582_Metadata_For_All_Objects_drop_constraint
------------------------------------------------------
alter table metadatavalue rename column item_id to resource_id;
alter table metadatavalue MODIFY(resource_id not null);
alter table metadatavalue add resource_type_id integer;
UPDATE metadatavalue SET resource_type_id = 2;
alter table metadatavalue MODIFY(resource_type_id not null);
-- ---------
-- community
-- ---------
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
community_id AS resource_id,
4 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'description' and qualifier is null) AS metadata_field_id,
introductory_text AS text_value,
null AS text_lang,
0 AS place
FROM community where not introductory_text is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
community_id AS resource_id,
4 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'description' and qualifier = 'abstract') AS metadata_field_id,
short_description AS text_value,
null AS text_lang,
0 AS place
FROM community where not short_description is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
community_id AS resource_id,
4 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'description' and qualifier = 'tableofcontents') AS metadata_field_id,
side_bar_text AS text_value,
null AS text_lang,
0 AS place
FROM community where not side_bar_text is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
community_id AS resource_id,
4 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'rights' and qualifier is null) AS metadata_field_id,
copyright_text AS text_value,
null AS text_lang,
0 AS place
FROM community where not copyright_text is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
community_id AS resource_id,
4 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'title' and qualifier is null) AS metadata_field_id,
name AS text_value,
null AS text_lang,
0 AS place
FROM community where not name is null;
alter table community drop (introductory_text, short_description, side_bar_text, copyright_text, name);
-- ----------
-- collection
-- ----------
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
collection_id AS resource_id,
3 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'description' and qualifier is null) AS metadata_field_id,
introductory_text AS text_value,
null AS text_lang,
0 AS place
FROM collection where not introductory_text is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
collection_id AS resource_id,
3 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'description' and qualifier = 'abstract') AS metadata_field_id,
short_description AS text_value,
null AS text_lang,
0 AS place
FROM collection where not short_description is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
collection_id AS resource_id,
3 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'description' and qualifier = 'tableofcontents') AS metadata_field_id,
side_bar_text AS text_value,
null AS text_lang,
0 AS place
FROM collection where not side_bar_text is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
collection_id AS resource_id,
3 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'rights' and qualifier is null) AS metadata_field_id,
copyright_text AS text_value,
null AS text_lang,
0 AS place
FROM collection where not copyright_text is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
collection_id AS resource_id,
3 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'title' and qualifier is null) AS metadata_field_id,
name AS text_value,
null AS text_lang,
0 AS place
FROM collection where not name is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
collection_id AS resource_id,
3 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'provenance' and qualifier is null) AS metadata_field_id,
provenance_description AS text_value,
null AS text_lang,
0 AS place
FROM collection where not provenance_description is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
collection_id AS resource_id,
3 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'rights' and qualifier = 'license') AS metadata_field_id,
license AS text_value,
null AS text_lang,
0 AS place
FROM collection where not license is null;
alter table collection drop (introductory_text, short_description, copyright_text, side_bar_text, name, license, provenance_description);
-- ---------
-- bundle
-- ---------
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
bundle_id AS resource_id,
1 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'title' and qualifier is null) AS metadata_field_id,
name AS text_value,
null AS text_lang,
0 AS place
FROM bundle where not name is null;
alter table bundle drop column name;
-- ---------
-- bitstream
-- ---------
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
bitstream_id AS resource_id,
0 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'title' and qualifier is null) AS metadata_field_id,
name AS text_value,
null AS text_lang,
0 AS place
FROM bitstream where not name is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
bitstream_id AS resource_id,
0 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'description' and qualifier is null) AS metadata_field_id,
description AS text_value,
null AS text_lang,
0 AS place
FROM bitstream where not description is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
bitstream_id AS resource_id,
0 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'format' and qualifier is null) AS metadata_field_id,
user_format_description AS text_value,
null AS text_lang,
0 AS place
FROM bitstream where not user_format_description is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
bitstream_id AS resource_id,
0 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'source' and qualifier is null) AS metadata_field_id,
source AS text_value,
null AS text_lang,
0 AS place
FROM bitstream where not source is null;
alter table bitstream drop (name, description, user_format_description, source);
-- ---------
-- epersongroup
-- ---------
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
eperson_group_id AS resource_id,
6 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='dc') and element = 'title' and qualifier is null) AS metadata_field_id,
name AS text_value,
null AS text_lang,
0 AS place
FROM epersongroup where not name is null;
alter table epersongroup drop column name;
-- ---------
-- eperson
-- ---------
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
eperson_id AS resource_id,
7 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='eperson') and element = 'firstname' and qualifier is null) AS metadata_field_id,
firstname AS text_value,
null AS text_lang,
0 AS place
FROM eperson where not firstname is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
eperson_id AS resource_id,
7 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='eperson') and element = 'lastname' and qualifier is null) AS metadata_field_id,
lastname AS text_value,
null AS text_lang,
0 AS place
FROM eperson where not lastname is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
eperson_id AS resource_id,
7 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='eperson') and element = 'phone' and qualifier is null) AS metadata_field_id,
phone AS text_value,
null AS text_lang,
0 AS place
FROM eperson where not phone is null;
INSERT INTO metadatavalue (metadata_value_id, resource_id, resource_type_id, metadata_field_id, text_value, text_lang, place)
SELECT
metadatavalue_seq.nextval as metadata_value_id,
eperson_id AS resource_id,
7 AS resource_type_id,
(select metadata_field_id from metadatafieldregistry where metadata_schema_id=(select metadata_schema_id from metadataschemaregistry where short_id='eperson') and element = 'language' and qualifier is null) AS metadata_field_id,
language AS text_value,
null AS text_lang,
0 AS place
FROM eperson where not language is null;
alter table eperson drop (firstname, lastname, phone, language);
-- ---------
-- dcvalue view
-- ---------
drop view dcvalue;
CREATE VIEW dcvalue AS
SELECT MetadataValue.metadata_value_id AS "dc_value_id", MetadataValue.resource_id,
MetadataValue.metadata_field_id AS "dc_type_id", MetadataValue.text_value,
MetadataValue.text_lang, MetadataValue.place
FROM MetadataValue, MetadataFieldRegistry
WHERE MetadataValue.metadata_field_id = MetadataFieldRegistry.metadata_field_id
AND MetadataFieldRegistry.metadata_schema_id = 1 AND MetadataValue.resource_type_id = 2;

View File

@@ -1,24 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
------------------------------------------------------
-- DS-3097 introduced new action id for WITHDRAWN_READ
------------------------------------------------------
UPDATE resourcepolicy SET action_id = 12 where action_id = 0 and resource_type_id = 0 and resource_id in (
SELECT bundle2bitstream.bitstream_id FROM bundle2bitstream
LEFT JOIN item2bundle ON bundle2bitstream.bundle_id = item2bundle.bundle_id
LEFT JOIN item ON item2bundle.item_id = item.item_id
WHERE item.withdrawn = 1
);
UPDATE resourcepolicy SET action_id = 12 where action_id = 0 and resource_type_id = 1 and resource_id in (
SELECT item2bundle.bundle_id FROM item2bundle
LEFT JOIN item ON item2bundle.item_id = item.item_id
WHERE item.withdrawn = 1
);

View File

@@ -1,23 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
------------------------------------------------------
-- DS-3563 Missing database index on metadatavalue.resource_type_id
------------------------------------------------------
-- Create an index on the metadata value resource_type_id column so that it can be searched efficiently.
declare
index_not_exists EXCEPTION;
PRAGMA EXCEPTION_INIT(index_not_exists, -1418);
begin
execute immediate 'DROP INDEX metadatavalue_type_id_idx';
exception
when index_not_exists then null;
end;
/
CREATE INDEX metadatavalue_type_id_idx ON metadatavalue (resource_type_id);

View File

@@ -1,469 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
------------------------------------------------------
-- DS-2701 Service based API / Hibernate integration
------------------------------------------------------
DROP VIEW community2item;
CREATE TABLE dspaceobject
(
uuid RAW(16) NOT NULL PRIMARY KEY
);
CREATE TABLE site
(
uuid RAW(16) NOT NULL PRIMARY KEY REFERENCES dspaceobject(uuid)
);
ALTER TABLE eperson ADD uuid RAW(16) DEFAULT SYS_GUID();
INSERT INTO dspaceobject (uuid) SELECT uuid FROM eperson;
ALTER TABLE eperson ADD FOREIGN KEY (uuid) REFERENCES dspaceobject;
ALTER TABLE eperson MODIFY uuid NOT NULL;
ALTER TABLE eperson ADD CONSTRAINT eperson_id_unique PRIMARY KEY (uuid);
UPDATE eperson SET require_certificate = '0' WHERE require_certificate IS NULL;
UPDATE eperson SET self_registered = '0' WHERE self_registered IS NULL;
UPDATE metadatavalue SET text_value='Administrator'
WHERE resource_type_id=6 AND resource_id=1;
UPDATE metadatavalue SET text_value='Anonymous'
WHERE resource_type_id=6 AND resource_id=0;
ALTER TABLE epersongroup ADD uuid RAW(16) DEFAULT SYS_GUID();
INSERT INTO dspaceobject (uuid) SELECT uuid FROM epersongroup;
ALTER TABLE epersongroup ADD FOREIGN KEY (uuid) REFERENCES dspaceobject;
ALTER TABLE epersongroup MODIFY uuid NOT NULL;
ALTER TABLE epersongroup ADD CONSTRAINT epersongroup_id_unique PRIMARY KEY (uuid);
ALTER TABLE item ADD uuid RAW(16) DEFAULT SYS_GUID();
INSERT INTO dspaceobject (uuid) SELECT uuid FROM item;
ALTER TABLE item ADD FOREIGN KEY (uuid) REFERENCES dspaceobject;
ALTER TABLE item MODIFY uuid NOT NULL;
ALTER TABLE item ADD CONSTRAINT item_id_unique PRIMARY KEY (uuid);
ALTER TABLE community ADD uuid RAW(16) DEFAULT SYS_GUID();
INSERT INTO dspaceobject (uuid) SELECT uuid FROM community;
ALTER TABLE community ADD FOREIGN KEY (uuid) REFERENCES dspaceobject;
ALTER TABLE community MODIFY uuid NOT NULL;
ALTER TABLE community ADD CONSTRAINT community_id_unique PRIMARY KEY (uuid);
ALTER TABLE collection ADD uuid RAW(16) DEFAULT SYS_GUID();
INSERT INTO dspaceobject (uuid) SELECT uuid FROM collection;
ALTER TABLE collection ADD FOREIGN KEY (uuid) REFERENCES dspaceobject;
ALTER TABLE collection MODIFY uuid NOT NULL;
ALTER TABLE collection ADD CONSTRAINT collection_id_unique PRIMARY KEY (uuid);
ALTER TABLE bundle ADD uuid RAW(16) DEFAULT SYS_GUID();
INSERT INTO dspaceobject (uuid) SELECT uuid FROM bundle;
ALTER TABLE bundle ADD FOREIGN KEY (uuid) REFERENCES dspaceobject;
ALTER TABLE bundle MODIFY uuid NOT NULL;
ALTER TABLE bundle ADD CONSTRAINT bundle_id_unique PRIMARY KEY (uuid);
ALTER TABLE bitstream ADD uuid RAW(16) DEFAULT SYS_GUID();
INSERT INTO dspaceobject (uuid) SELECT uuid FROM bitstream;
ALTER TABLE bitstream ADD FOREIGN KEY (uuid) REFERENCES dspaceobject;
ALTER TABLE bitstream MODIFY uuid NOT NULL;
ALTER TABLE bitstream ADD CONSTRAINT bitstream_id_unique PRIMARY KEY (uuid);
UPDATE bitstream SET sequence_id = -1 WHERE sequence_id IS NULL;
UPDATE bitstream SET size_bytes = -1 WHERE size_bytes IS NULL;
UPDATE bitstream SET deleted = '0' WHERE deleted IS NULL;
UPDATE bitstream SET store_number = -1 WHERE store_number IS NULL;
-- Migrate EPersonGroup2EPerson table
ALTER TABLE EPersonGroup2EPerson RENAME COLUMN eperson_group_id to eperson_group_legacy_id;
ALTER TABLE EPersonGroup2EPerson RENAME COLUMN eperson_id to eperson_legacy_id;
ALTER TABLE EPersonGroup2EPerson ADD eperson_group_id RAW(16) REFERENCES EpersonGroup(uuid);
ALTER TABLE EPersonGroup2EPerson ADD eperson_id RAW(16) REFERENCES Eperson(uuid);
CREATE INDEX EpersonGroup2Eperson_group on EpersonGroup2Eperson(eperson_group_id);
CREATE INDEX EpersonGroup2Eperson_person on EpersonGroup2Eperson(eperson_id);
UPDATE EPersonGroup2EPerson SET eperson_group_id = (SELECT EPersonGroup.uuid FROM EpersonGroup WHERE EPersonGroup2EPerson.eperson_group_legacy_id = EPersonGroup.eperson_group_id);
UPDATE EPersonGroup2EPerson SET eperson_id = (SELECT eperson.uuid FROM eperson WHERE EPersonGroup2EPerson.eperson_legacy_id = eperson.eperson_id);
ALTER TABLE EPersonGroup2EPerson MODIFY eperson_group_id NOT NULL;
ALTER TABLE EPersonGroup2EPerson MODIFY eperson_id NOT NULL;
ALTER TABLE EPersonGroup2EPerson DROP COLUMN eperson_group_legacy_id;
ALTER TABLE EPersonGroup2EPerson DROP COLUMN eperson_legacy_id;
ALTER TABLE epersongroup2eperson DROP COLUMN id;
ALTER TABLE EPersonGroup2EPerson add CONSTRAINT EPersonGroup2EPerson_unique primary key (eperson_group_id,eperson_id);
-- Migrate GROUP2GROUP table
ALTER TABLE Group2Group RENAME COLUMN parent_id to parent_legacy_id;
ALTER TABLE Group2Group RENAME COLUMN child_id to child_legacy_id;
ALTER TABLE Group2Group ADD parent_id RAW(16) REFERENCES EpersonGroup(uuid);
ALTER TABLE Group2Group ADD child_id RAW(16) REFERENCES EpersonGroup(uuid);
CREATE INDEX Group2Group_parent on Group2Group(parent_id);
CREATE INDEX Group2Group_child on Group2Group(child_id);
UPDATE Group2Group SET parent_id = (SELECT EPersonGroup.uuid FROM EpersonGroup WHERE Group2Group.parent_legacy_id = EPersonGroup.eperson_group_id);
UPDATE Group2Group SET child_id = (SELECT EpersonGroup.uuid FROM EpersonGroup WHERE Group2Group.child_legacy_id = EpersonGroup.eperson_group_id);
ALTER TABLE Group2Group MODIFY parent_id NOT NULL;
ALTER TABLE Group2Group MODIFY child_id NOT NULL;
ALTER TABLE Group2Group DROP COLUMN parent_legacy_id;
ALTER TABLE Group2Group DROP COLUMN child_legacy_id;
ALTER TABLE Group2Group DROP COLUMN id;
ALTER TABLE Group2Group add CONSTRAINT Group2Group_unique primary key (parent_id,child_id);
-- Migrate collection2item
ALTER TABLE Collection2Item RENAME COLUMN collection_id to collection_legacy_id;
ALTER TABLE Collection2Item RENAME COLUMN item_id to item_legacy_id;
ALTER TABLE Collection2Item ADD collection_id RAW(16) REFERENCES Collection(uuid);
ALTER TABLE Collection2Item ADD item_id RAW(16) REFERENCES Item(uuid);
CREATE INDEX Collecion2Item_collection on Collection2Item(collection_id);
CREATE INDEX Collecion2Item_item on Collection2Item(item_id);
UPDATE Collection2Item SET collection_id = (SELECT Collection.uuid FROM Collection WHERE Collection2Item.collection_legacy_id = Collection.collection_id);
UPDATE Collection2Item SET item_id = (SELECT Item.uuid FROM Item WHERE Collection2Item.item_legacy_id = Item.item_id);
ALTER TABLE Collection2Item MODIFY collection_id NOT NULL;
ALTER TABLE Collection2Item MODIFY item_id NOT NULL;
ALTER TABLE Collection2Item DROP COLUMN collection_legacy_id;
ALTER TABLE Collection2Item DROP COLUMN item_legacy_id;
ALTER TABLE Collection2Item DROP COLUMN id;
-- Magic query that will delete all duplicate collection item_id references from the database (if we don't do this the primary key creation will fail)
DELETE FROM collection2item WHERE rowid NOT IN (SELECT MIN(rowid) FROM collection2item GROUP BY collection_id,item_id);
ALTER TABLE Collection2Item add CONSTRAINT collection2item_unique primary key (collection_id,item_id);
-- Migrate Community2Community
ALTER TABLE Community2Community RENAME COLUMN parent_comm_id to parent_legacy_id;
ALTER TABLE Community2Community RENAME COLUMN child_comm_id to child_legacy_id;
ALTER TABLE Community2Community ADD parent_comm_id RAW(16) REFERENCES Community(uuid);
ALTER TABLE Community2Community ADD child_comm_id RAW(16) REFERENCES Community(uuid);
CREATE INDEX Community2Community_parent on Community2Community(parent_comm_id);
CREATE INDEX Community2Community_child on Community2Community(child_comm_id);
UPDATE Community2Community SET parent_comm_id = (SELECT Community.uuid FROM Community WHERE Community2Community.parent_legacy_id = Community.community_id);
UPDATE Community2Community SET child_comm_id = (SELECT Community.uuid FROM Community WHERE Community2Community.child_legacy_id = Community.community_id);
ALTER TABLE Community2Community MODIFY parent_comm_id NOT NULL;
ALTER TABLE Community2Community MODIFY child_comm_id NOT NULL;
ALTER TABLE Community2Community DROP COLUMN parent_legacy_id;
ALTER TABLE Community2Community DROP COLUMN child_legacy_id;
ALTER TABLE Community2Community DROP COLUMN id;
ALTER TABLE Community2Community add CONSTRAINT Community2Community_unique primary key (parent_comm_id,child_comm_id);
-- Migrate community2collection
ALTER TABLE community2collection RENAME COLUMN collection_id to collection_legacy_id;
ALTER TABLE community2collection RENAME COLUMN community_id to community_legacy_id;
ALTER TABLE community2collection ADD collection_id RAW(16) REFERENCES Collection(uuid);
ALTER TABLE community2collection ADD community_id RAW(16) REFERENCES Community(uuid);
CREATE INDEX community2collection_collectio on community2collection(collection_id);
CREATE INDEX community2collection_community on community2collection(community_id);
UPDATE community2collection SET collection_id = (SELECT Collection.uuid FROM Collection WHERE community2collection.collection_legacy_id = Collection.collection_id);
UPDATE community2collection SET community_id = (SELECT Community.uuid FROM Community WHERE community2collection.community_legacy_id = Community.community_id);
ALTER TABLE community2collection MODIFY collection_id NOT NULL;
ALTER TABLE community2collection MODIFY community_id NOT NULL;
ALTER TABLE community2collection DROP COLUMN collection_legacy_id;
ALTER TABLE community2collection DROP COLUMN community_legacy_id;
ALTER TABLE community2collection DROP COLUMN id;
ALTER TABLE community2collection add CONSTRAINT community2collection_unique primary key (collection_id,community_id);
-- Migrate Group2GroupCache table
ALTER TABLE Group2GroupCache RENAME COLUMN parent_id to parent_legacy_id;
ALTER TABLE Group2GroupCache RENAME COLUMN child_id to child_legacy_id;
ALTER TABLE Group2GroupCache ADD parent_id RAW(16) REFERENCES EpersonGroup(uuid);
ALTER TABLE Group2GroupCache ADD child_id RAW(16) REFERENCES EpersonGroup(uuid);
CREATE INDEX Group2GroupCache_parent on Group2GroupCache(parent_id);
CREATE INDEX Group2GroupCache_child on Group2GroupCache(child_id);
UPDATE Group2GroupCache SET parent_id = (SELECT EPersonGroup.uuid FROM EpersonGroup WHERE Group2GroupCache.parent_legacy_id = EPersonGroup.eperson_group_id);
UPDATE Group2GroupCache SET child_id = (SELECT EpersonGroup.uuid FROM EpersonGroup WHERE Group2GroupCache.child_legacy_id = EpersonGroup.eperson_group_id);
ALTER TABLE Group2GroupCache MODIFY parent_id NOT NULL;
ALTER TABLE Group2GroupCache MODIFY child_id NOT NULL;
ALTER TABLE Group2GroupCache DROP COLUMN parent_legacy_id;
ALTER TABLE Group2GroupCache DROP COLUMN child_legacy_id;
ALTER TABLE Group2GroupCache DROP COLUMN id;
ALTER TABLE Group2GroupCache add CONSTRAINT Group2GroupCache_unique primary key (parent_id,child_id);
-- Migrate Item2Bundle
ALTER TABLE item2bundle RENAME COLUMN bundle_id to bundle_legacy_id;
ALTER TABLE item2bundle RENAME COLUMN item_id to item_legacy_id;
ALTER TABLE item2bundle ADD bundle_id RAW(16) REFERENCES Bundle(uuid);
ALTER TABLE item2bundle ADD item_id RAW(16) REFERENCES Item(uuid);
CREATE INDEX item2bundle_bundle on item2bundle(bundle_id);
CREATE INDEX item2bundle_item on item2bundle(item_id);
UPDATE item2bundle SET bundle_id = (SELECT Bundle.uuid FROM Bundle WHERE item2bundle.bundle_legacy_id = Bundle.bundle_id);
UPDATE item2bundle SET item_id = (SELECT Item.uuid FROM Item WHERE item2bundle.item_legacy_id = Item.item_id);
ALTER TABLE item2bundle MODIFY bundle_id NOT NULL;
ALTER TABLE item2bundle MODIFY item_id NOT NULL;
ALTER TABLE item2bundle DROP COLUMN bundle_legacy_id;
ALTER TABLE item2bundle DROP COLUMN item_legacy_id;
ALTER TABLE item2bundle DROP COLUMN id;
ALTER TABLE item2bundle add CONSTRAINT item2bundle_unique primary key (bundle_id,item_id);
--Migrate Bundle2Bitsteam
ALTER TABLE bundle2bitstream RENAME COLUMN bundle_id to bundle_legacy_id;
ALTER TABLE bundle2bitstream RENAME COLUMN bitstream_id to bitstream_legacy_id;
ALTER TABLE bundle2bitstream ADD bundle_id RAW(16) REFERENCES Bundle(uuid);
ALTER TABLE bundle2bitstream ADD bitstream_id RAW(16) REFERENCES Bitstream(uuid);
CREATE INDEX bundle2bitstream_bundle on bundle2bitstream(bundle_id);
CREATE INDEX bundle2bitstream_bitstream on bundle2bitstream(bitstream_id);
UPDATE bundle2bitstream SET bundle_id = (SELECT bundle.uuid FROM bundle WHERE bundle2bitstream.bundle_legacy_id = bundle.bundle_id);
UPDATE bundle2bitstream SET bitstream_id = (SELECT bitstream.uuid FROM bitstream WHERE bundle2bitstream.bitstream_legacy_id = bitstream.bitstream_id);
ALTER TABLE bundle2bitstream RENAME COLUMN bitstream_order to bitstream_order_legacy;
ALTER TABLE bundle2bitstream ADD bitstream_order INTEGER;
MERGE INTO bundle2bitstream dst
USING ( SELECT ROWID AS r_id
, ROW_NUMBER () OVER ( PARTITION BY bundle_id
ORDER BY bitstream_order_legacy, bitstream_id
) AS new_order
FROM bundle2bitstream
) src
ON (dst.ROWID = src.r_id)
WHEN MATCHED THEN UPDATE
SET dst.bitstream_order = (src.new_order-1)
;
ALTER TABLE bundle2bitstream MODIFY bundle_id NOT NULL;
ALTER TABLE bundle2bitstream MODIFY bitstream_id NOT NULL;
ALTER TABLE bundle2bitstream DROP COLUMN bundle_legacy_id;
ALTER TABLE bundle2bitstream DROP COLUMN bitstream_legacy_id;
ALTER TABLE bundle2bitstream DROP COLUMN id;
ALTER TABLE bundle2bitstream add CONSTRAINT bundle2bitstream_unique primary key (bitstream_id,bundle_id,bitstream_order);
-- Migrate item
ALTER TABLE item RENAME COLUMN submitter_id to submitter_id_legacy_id;
ALTER TABLE item ADD submitter_id RAW(16) REFERENCES EPerson(uuid);
CREATE INDEX item_submitter on item(submitter_id);
UPDATE item SET submitter_id = (SELECT eperson.uuid FROM eperson WHERE item.submitter_id_legacy_id = eperson.eperson_id);
ALTER TABLE item DROP COLUMN submitter_id_legacy_id;
ALTER TABLE item RENAME COLUMN owning_collection to owning_collection_legacy;
ALTER TABLE item ADD owning_collection RAW(16) REFERENCES Collection(uuid);
CREATE INDEX item_collection on item(owning_collection);
UPDATE item SET owning_collection = (SELECT Collection.uuid FROM Collection WHERE item.owning_collection_legacy = collection.collection_id);
ALTER TABLE item DROP COLUMN owning_collection_legacy;
UPDATE item SET in_archive = '0' WHERE in_archive IS NULL;
UPDATE item SET discoverable = '0' WHERE discoverable IS NULL;
UPDATE item SET withdrawn = '0' WHERE withdrawn IS NULL;
-- Migrate bundle
ALTER TABLE bundle RENAME COLUMN primary_bitstream_id to primary_bitstream_legacy_id;
ALTER TABLE bundle ADD primary_bitstream_id RAW(16) REFERENCES Bitstream(uuid);
CREATE INDEX bundle_primary on bundle(primary_bitstream_id);
UPDATE bundle SET primary_bitstream_id = (SELECT Bitstream.uuid FROM Bitstream WHERE bundle.primary_bitstream_legacy_id = Bitstream.bitstream_id);
ALTER TABLE bundle DROP COLUMN primary_bitstream_legacy_id;
-- Migrate community references
ALTER TABLE Community RENAME COLUMN admin to admin_legacy;
ALTER TABLE Community ADD admin RAW(16) REFERENCES EPersonGroup(uuid);
CREATE INDEX Community_admin on Community(admin);
UPDATE Community SET admin = (SELECT EPersonGroup.uuid FROM EPersonGroup WHERE Community.admin_legacy = EPersonGroup.eperson_group_id);
ALTER TABLE Community DROP COLUMN admin_legacy;
ALTER TABLE Community RENAME COLUMN logo_bitstream_id to logo_bitstream_legacy_id;
ALTER TABLE Community ADD logo_bitstream_id RAW(16) REFERENCES Bitstream(uuid);
CREATE INDEX Community_bitstream on Community(logo_bitstream_id);
UPDATE Community SET logo_bitstream_id = (SELECT Bitstream.uuid FROM Bitstream WHERE Community.logo_bitstream_legacy_id = Bitstream.bitstream_id);
ALTER TABLE Community DROP COLUMN logo_bitstream_legacy_id;
--Migrate Collection references
ALTER TABLE Collection RENAME COLUMN workflow_step_1 to workflow_step_1_legacy;
ALTER TABLE Collection RENAME COLUMN workflow_step_2 to workflow_step_2_legacy;
ALTER TABLE Collection RENAME COLUMN workflow_step_3 to workflow_step_3_legacy;
ALTER TABLE Collection RENAME COLUMN submitter to submitter_legacy;
ALTER TABLE Collection RENAME COLUMN template_item_id to template_item_legacy_id;
ALTER TABLE Collection RENAME COLUMN logo_bitstream_id to logo_bitstream_legacy_id;
ALTER TABLE Collection RENAME COLUMN admin to admin_legacy;
ALTER TABLE Collection ADD workflow_step_1 RAW(16) REFERENCES EPersonGroup(uuid);
ALTER TABLE Collection ADD workflow_step_2 RAW(16) REFERENCES EPersonGroup(uuid);
ALTER TABLE Collection ADD workflow_step_3 RAW(16) REFERENCES EPersonGroup(uuid);
ALTER TABLE Collection ADD submitter RAW(16) REFERENCES EPersonGroup(uuid);
ALTER TABLE Collection ADD template_item_id RAW(16);
ALTER TABLE Collection ADD logo_bitstream_id RAW(16);
ALTER TABLE Collection ADD admin RAW(16) REFERENCES EPersonGroup(uuid);
CREATE INDEX Collection_workflow1 on Collection(workflow_step_1);
CREATE INDEX Collection_workflow2 on Collection(workflow_step_2);
CREATE INDEX Collection_workflow3 on Collection(workflow_step_3);
CREATE INDEX Collection_submitter on Collection(submitter);
CREATE INDEX Collection_template on Collection(template_item_id);
CREATE INDEX Collection_bitstream on Collection(logo_bitstream_id);
UPDATE Collection SET workflow_step_1 = (SELECT EPersonGroup.uuid FROM EPersonGroup WHERE Collection.workflow_step_1_legacy = EPersonGroup.eperson_group_id);
UPDATE Collection SET workflow_step_2 = (SELECT EPersonGroup.uuid FROM EPersonGroup WHERE Collection.workflow_step_2_legacy = EPersonGroup.eperson_group_id);
UPDATE Collection SET workflow_step_3 = (SELECT EPersonGroup.uuid FROM EPersonGroup WHERE Collection.workflow_step_3_legacy = EPersonGroup.eperson_group_id);
UPDATE Collection SET submitter = (SELECT EPersonGroup.uuid FROM EPersonGroup WHERE Collection.submitter_legacy = EPersonGroup.eperson_group_id);
UPDATE Collection SET template_item_id = (SELECT Item.uuid FROM Item WHERE Collection.template_item_legacy_id = Item.item_id);
UPDATE Collection SET logo_bitstream_id = (SELECT Bitstream.uuid FROM Bitstream WHERE Collection.logo_bitstream_legacy_id = Bitstream.bitstream_id);
UPDATE Collection SET admin = (SELECT EPersonGroup.uuid FROM EPersonGroup WHERE Collection.admin_legacy = EPersonGroup.eperson_group_id);
ALTER TABLE Collection DROP COLUMN workflow_step_1_legacy;
ALTER TABLE Collection DROP COLUMN workflow_step_2_legacy;
ALTER TABLE Collection DROP COLUMN workflow_step_3_legacy;
ALTER TABLE Collection DROP COLUMN submitter_legacy;
ALTER TABLE Collection DROP COLUMN template_item_legacy_id;
ALTER TABLE Collection DROP COLUMN logo_bitstream_legacy_id;
ALTER TABLE Collection DROP COLUMN admin_legacy;
-- Migrate resource policy references
ALTER TABLE ResourcePolicy RENAME COLUMN eperson_id to eperson_id_legacy_id;
ALTER TABLE ResourcePolicy ADD eperson_id RAW(16) REFERENCES EPerson(uuid);
CREATE INDEX resourcepolicy_person on resourcepolicy(eperson_id);
UPDATE ResourcePolicy SET eperson_id = (SELECT eperson.uuid FROM eperson WHERE ResourcePolicy.eperson_id_legacy_id = eperson.eperson_id);
ALTER TABLE ResourcePolicy DROP COLUMN eperson_id_legacy_id;
ALTER TABLE ResourcePolicy RENAME COLUMN epersongroup_id to epersongroup_id_legacy_id;
ALTER TABLE ResourcePolicy ADD epersongroup_id RAW(16) REFERENCES EPersonGroup(uuid);
CREATE INDEX resourcepolicy_group on resourcepolicy(epersongroup_id);
UPDATE ResourcePolicy SET epersongroup_id = (SELECT epersongroup.uuid FROM epersongroup WHERE ResourcePolicy.epersongroup_id_legacy_id = epersongroup.eperson_group_id);
ALTER TABLE ResourcePolicy DROP COLUMN epersongroup_id_legacy_id;
ALTER TABLE ResourcePolicy ADD dspace_object RAW(16) REFERENCES dspaceobject(uuid);
CREATE INDEX resourcepolicy_object on resourcepolicy(dspace_object);
UPDATE ResourcePolicy SET dspace_object = (SELECT eperson.uuid FROM eperson WHERE ResourcePolicy.resource_id = eperson.eperson_id AND ResourcePolicy.resource_type_id = 7) WHERE ResourcePolicy.resource_type_id = 7;
UPDATE ResourcePolicy SET dspace_object = (SELECT epersongroup.uuid FROM epersongroup WHERE ResourcePolicy.resource_id = epersongroup.eperson_group_id AND ResourcePolicy.resource_type_id = 6) WHERE ResourcePolicy.resource_type_id = 6;
UPDATE ResourcePolicy SET dspace_object = (SELECT community.uuid FROM community WHERE ResourcePolicy.resource_id = community.community_id AND ResourcePolicy.resource_type_id = 4) WHERE ResourcePolicy.resource_type_id = 4;
UPDATE ResourcePolicy SET dspace_object = (SELECT collection.uuid FROM collection WHERE ResourcePolicy.resource_id = collection.collection_id AND ResourcePolicy.resource_type_id = 3) WHERE ResourcePolicy.resource_type_id = 3;
UPDATE ResourcePolicy SET dspace_object = (SELECT item.uuid FROM item WHERE ResourcePolicy.resource_id = item.item_id AND ResourcePolicy.resource_type_id = 2) WHERE ResourcePolicy.resource_type_id = 2;
UPDATE ResourcePolicy SET dspace_object = (SELECT bundle.uuid FROM bundle WHERE ResourcePolicy.resource_id = bundle.bundle_id AND ResourcePolicy.resource_type_id = 1) WHERE ResourcePolicy.resource_type_id = 1;
UPDATE ResourcePolicy SET dspace_object = (SELECT bitstream.uuid FROM bitstream WHERE ResourcePolicy.resource_id = bitstream.bitstream_id AND ResourcePolicy.resource_type_id = 0) WHERE ResourcePolicy.resource_type_id = 0;
UPDATE resourcepolicy SET resource_type_id = -1 WHERE resource_type_id IS NULL;
UPDATE resourcepolicy SET action_id = -1 WHERE action_id IS NULL;
-- Migrate Subscription
ALTER TABLE Subscription RENAME COLUMN eperson_id to eperson_legacy_id;
ALTER TABLE Subscription ADD eperson_id RAW(16) REFERENCES EPerson(uuid);
CREATE INDEX Subscription_person on Subscription(eperson_id);
UPDATE Subscription SET eperson_id = (SELECT eperson.uuid FROM eperson WHERE Subscription.eperson_legacy_id = eperson.eperson_id);
ALTER TABLE Subscription DROP COLUMN eperson_legacy_id;
ALTER TABLE Subscription RENAME COLUMN collection_id to collection_legacy_id;
ALTER TABLE Subscription ADD collection_id RAW(16) REFERENCES Collection(uuid);
CREATE INDEX Subscription_collection on Subscription(collection_id);
UPDATE Subscription SET collection_id = (SELECT collection.uuid FROM collection WHERE Subscription.collection_legacy_id = collection.collection_id);
ALTER TABLE Subscription DROP COLUMN collection_legacy_id;
-- Migrate versionitem
ALTER TABLE versionitem RENAME COLUMN eperson_id to eperson_legacy_id;
ALTER TABLE versionitem ADD eperson_id RAW(16) REFERENCES EPerson(uuid);
CREATE INDEX versionitem_person on versionitem(eperson_id);
UPDATE versionitem SET eperson_id = (SELECT eperson.uuid FROM eperson WHERE versionitem.eperson_legacy_id = eperson.eperson_id);
ALTER TABLE versionitem DROP COLUMN eperson_legacy_id;
ALTER TABLE versionitem RENAME COLUMN item_id to item_legacy_id;
ALTER TABLE versionitem ADD item_id RAW(16) REFERENCES Item(uuid);
CREATE INDEX versionitem_item on versionitem(item_id);
UPDATE versionitem SET item_id = (SELECT item.uuid FROM item WHERE versionitem.item_legacy_id = item.item_id);
ALTER TABLE versionitem DROP COLUMN item_legacy_id;
UPDATE versionitem SET version_number = -1 WHERE version_number IS NULL;
-- Migrate handle table
ALTER TABLE handle RENAME COLUMN resource_id to resource_legacy_id;
ALTER TABLE handle ADD resource_id RAW(16) REFERENCES dspaceobject(uuid);
CREATE INDEX handle_object on handle(resource_id);
UPDATE handle SET resource_id = (SELECT community.uuid FROM community WHERE handle.resource_legacy_id = community.community_id AND handle.resource_type_id = 4);
UPDATE handle SET resource_id = (SELECT collection.uuid FROM collection WHERE handle.resource_legacy_id = collection.collection_id AND handle.resource_type_id = 3);
UPDATE handle SET resource_id = (SELECT item.uuid FROM item WHERE handle.resource_legacy_id = item.item_id AND handle.resource_type_id = 2);
-- Migrate metadata value table
DROP VIEW dcvalue;
ALTER TABLE metadatavalue ADD dspace_object_id RAW(16) REFERENCES dspaceobject(uuid);
-- CREATE INDEX metadatavalue_field on metadatavalue(metadata_field_id);
CREATE INDEX metadatavalue_object on metadatavalue(dspace_object_id);
CREATE INDEX metadatavalue_field_object on metadatavalue(metadata_field_id, dspace_object_id);
UPDATE metadatavalue SET dspace_object_id = (SELECT eperson.uuid FROM eperson WHERE metadatavalue.resource_id = eperson.eperson_id AND metadatavalue.resource_type_id = 7) WHERE metadatavalue.resource_type_id= 7;
UPDATE metadatavalue SET dspace_object_id = (SELECT epersongroup.uuid FROM epersongroup WHERE metadatavalue.resource_id = epersongroup.eperson_group_id AND metadatavalue.resource_type_id = 6) WHERE metadatavalue.resource_type_id= 6;
UPDATE metadatavalue SET dspace_object_id = (SELECT community.uuid FROM community WHERE metadatavalue.resource_id = community.community_id AND metadatavalue.resource_type_id = 4) WHERE metadatavalue.resource_type_id= 4;
UPDATE metadatavalue SET dspace_object_id = (SELECT collection.uuid FROM collection WHERE metadatavalue.resource_id = collection.collection_id AND metadatavalue.resource_type_id = 3) WHERE metadatavalue.resource_type_id= 3;
UPDATE metadatavalue SET dspace_object_id = (SELECT item.uuid FROM item WHERE metadatavalue.resource_id = item.item_id AND metadatavalue.resource_type_id = 2) WHERE metadatavalue.resource_type_id= 2;
UPDATE metadatavalue SET dspace_object_id = (SELECT bundle.uuid FROM bundle WHERE metadatavalue.resource_id = bundle.bundle_id AND metadatavalue.resource_type_id = 1) WHERE metadatavalue.resource_type_id= 1;
UPDATE metadatavalue SET dspace_object_id = (SELECT bitstream.uuid FROM bitstream WHERE metadatavalue.resource_id = bitstream.bitstream_id AND metadatavalue.resource_type_id = 0) WHERE metadatavalue.resource_type_id= 0;
DROP INDEX metadatavalue_item_idx;
DROP INDEX metadatavalue_item_idx2;
ALTER TABLE metadatavalue DROP COLUMN resource_id;
ALTER TABLE metadatavalue DROP COLUMN resource_type_id;
UPDATE MetadataValue SET confidence = -1 WHERE confidence IS NULL;
UPDATE metadatavalue SET place = -1 WHERE place IS NULL;
-- Alter harvested item
ALTER TABLE harvested_item RENAME COLUMN item_id to item_legacy_id;
ALTER TABLE harvested_item ADD item_id RAW(16) REFERENCES item(uuid);
CREATE INDEX harvested_item_item on harvested_item(item_id);
UPDATE harvested_item SET item_id = (SELECT item.uuid FROM item WHERE harvested_item.item_legacy_id = item.item_id);
ALTER TABLE harvested_item DROP COLUMN item_legacy_id;
-- Alter harvested collection
ALTER TABLE harvested_collection RENAME COLUMN collection_id to collection_legacy_id;
ALTER TABLE harvested_collection ADD collection_id RAW(16) REFERENCES Collection(uuid);
CREATE INDEX harvested_collection_collectio on harvested_collection(collection_id);
UPDATE harvested_collection SET collection_id = (SELECT collection.uuid FROM collection WHERE harvested_collection.collection_legacy_id = collection.collection_id);
ALTER TABLE harvested_collection DROP COLUMN collection_legacy_id;
UPDATE harvested_collection SET harvest_type = -1 WHERE harvest_type IS NULL;
UPDATE harvested_collection SET harvest_status = -1 WHERE harvest_status IS NULL;
--Alter workspaceitem
ALTER TABLE workspaceitem RENAME COLUMN item_id to item_legacy_id;
ALTER TABLE workspaceitem ADD item_id RAW(16) REFERENCES Item(uuid);
CREATE INDEX workspaceitem_item on workspaceitem(item_id);
UPDATE workspaceitem SET item_id = (SELECT item.uuid FROM item WHERE workspaceitem.item_legacy_id = item.item_id);
ALTER TABLE workspaceitem DROP COLUMN item_legacy_id;
ALTER TABLE workspaceitem RENAME COLUMN collection_id to collection_legacy_id;
ALTER TABLE workspaceitem ADD collection_id RAW(16) REFERENCES Collection(uuid);
CREATE INDEX workspaceitem_coll on workspaceitem(collection_id);
UPDATE workspaceitem SET collection_id = (SELECT collection.uuid FROM collection WHERE workspaceitem.collection_legacy_id = collection.collection_id);
ALTER TABLE workspaceitem DROP COLUMN collection_legacy_id;
UPDATE workspaceitem SET multiple_titles = '0' WHERE multiple_titles IS NULL;
UPDATE workspaceitem SET published_before = '0' WHERE published_before IS NULL;
UPDATE workspaceitem SET multiple_files = '0' WHERE multiple_files IS NULL;
UPDATE workspaceitem SET stage_reached = -1 WHERE stage_reached IS NULL;
UPDATE workspaceitem SET page_reached = -1 WHERE page_reached IS NULL;
--Alter epersongroup2workspaceitem
ALTER TABLE epersongroup2workspaceitem RENAME COLUMN eperson_group_id to eperson_group_legacy_id;
ALTER TABLE epersongroup2workspaceitem ADD eperson_group_id RAW(16) REFERENCES epersongroup(uuid);
CREATE INDEX epersongroup2workspaceitem_gro on epersongroup2workspaceitem(eperson_group_id);
UPDATE epersongroup2workspaceitem SET eperson_group_id = (SELECT epersongroup.uuid FROM epersongroup WHERE epersongroup2workspaceitem.eperson_group_legacy_id = epersongroup.eperson_group_id);
ALTER TABLE epersongroup2workspaceitem DROP COLUMN eperson_group_legacy_id;
ALTER TABLE epersongroup2workspaceitem DROP COLUMN id;
ALTER TABLE epersongroup2workspaceitem MODIFY workspace_item_id NOT NULL;
ALTER TABLE epersongroup2workspaceitem MODIFY eperson_group_id NOT NULL;
ALTER TABLE epersongroup2workspaceitem add CONSTRAINT epersongroup2wsitem_unqiue primary key (workspace_item_id,eperson_group_id);
--Alter most_recent_checksum
ALTER TABLE most_recent_checksum RENAME COLUMN bitstream_id to bitstream_legacy_id;
ALTER TABLE most_recent_checksum ADD bitstream_id RAW(16) REFERENCES Bitstream(uuid);
CREATE INDEX most_recent_checksum_bitstream on most_recent_checksum(bitstream_id);
UPDATE most_recent_checksum SET bitstream_id = (SELECT Bitstream.uuid FROM Bitstream WHERE most_recent_checksum.bitstream_legacy_id = Bitstream.bitstream_id);
ALTER TABLE most_recent_checksum DROP COLUMN bitstream_legacy_id;
UPDATE most_recent_checksum SET to_be_processed = '0' WHERE to_be_processed IS NULL;
UPDATE most_recent_checksum SET matched_prev_checksum = '0' WHERE matched_prev_checksum IS NULL;
--Alter checksum_history
ALTER TABLE checksum_history RENAME COLUMN bitstream_id to bitstream_legacy_id;
ALTER TABLE checksum_history ADD bitstream_id RAW(16) REFERENCES Bitstream(uuid);
CREATE INDEX checksum_history_bitstream on checksum_history(bitstream_id);
UPDATE checksum_history SET bitstream_id = (SELECT Bitstream.uuid FROM Bitstream WHERE checksum_history.bitstream_legacy_id = Bitstream.bitstream_id);
ALTER TABLE checksum_history DROP COLUMN bitstream_legacy_id;
RENAME checksum_history_seq TO checksum_history_check_id_seq;
--Alter table doi
ALTER TABLE doi ADD dspace_object RAW(16) REFERENCES dspaceobject(uuid);
CREATE INDEX doi_object on doi(dspace_object);
UPDATE doi SET dspace_object = (SELECT community.uuid FROM community WHERE doi.resource_id = community.community_id AND doi.resource_type_id = 4) WHERE doi.resource_type_id = 4;
UPDATE doi SET dspace_object = (SELECT collection.uuid FROM collection WHERE doi.resource_id = collection.collection_id AND doi.resource_type_id = 3) WHERE doi.resource_type_id = 3;
UPDATE doi SET dspace_object = (SELECT item.uuid FROM item WHERE doi.resource_id = item.item_id AND doi.resource_type_id = 2) WHERE doi.resource_type_id = 2;
UPDATE doi SET dspace_object = (SELECT bundle.uuid FROM bundle WHERE doi.resource_id = bundle.bundle_id AND doi.resource_type_id = 1) WHERE doi.resource_type_id = 1;
UPDATE doi SET dspace_object = (SELECT bitstream.uuid FROM bitstream WHERE doi.resource_id = bitstream.bitstream_id AND doi.resource_type_id = 0) WHERE doi.resource_type_id = 0;
--Update table bitstreamformatregistry
UPDATE bitstreamformatregistry SET support_level = -1 WHERE support_level IS NULL;
--Update table requestitem
UPDATE requestitem SET allfiles = '0' WHERE allfiles IS NULL;
UPDATE requestitem SET accept_request = '0' WHERE accept_request IS NULL;
--Update table webapp
UPDATE webapp SET isui = -1 WHERE isui IS NULL;

View File

@@ -1,18 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
------------------------------------------------------
-- DS_3378 Lost oracle indexes
------------------------------------------------------
CREATE UNIQUE INDEX eperson_eperson on eperson(eperson_id);
CREATE UNIQUE INDEX epersongroup_eperson_group on epersongroup(eperson_group_id);
CREATE UNIQUE INDEX community_community on community(community_id);
CREATE UNIQUE INDEX collection_collection on collection(collection_id);
CREATE UNIQUE INDEX item_item on item(item_id);
CREATE UNIQUE INDEX bundle_bundle on bundle(bundle_id);
CREATE UNIQUE INDEX bitstream_bitstream on bitstream(bitstream_id);

View File

@@ -1,25 +0,0 @@
--
-- The contents of this file are subject to the license and copyright
-- detailed in the LICENSE and NOTICE files at the root of the source
-- tree and available online at
--
-- http://www.dspace.org/license/
--
------------------------------------------------------
-- DS-3024 Invent "permanent" groups
------------------------------------------------------
ALTER TABLE epersongroup
ADD (permanent NUMBER(1) DEFAULT 0);
UPDATE epersongroup SET permanent = 1
WHERE uuid IN (
SELECT dspace_object_id
FROM metadataschemaregistry s
JOIN metadatafieldregistry f USING (metadata_schema_id)
JOIN metadatavalue v USING (metadata_field_id)
WHERE s.short_id = 'dc'
AND f.element = 'title'
AND f.qualifier IS NULL
AND dbms_lob.compare(v.text_value, 'Administrator') = 0 OR dbms_lob.compare(v.text_value,'Anonymous') = 0
);

Some files were not shown because too many files have changed in this diff Show More