Merge pull request #1076 from tdonohue/DS-2701-service-api-unit-tests

DS-2701: Fixes for Unit Tests
This commit is contained in:
Tim Donohue
2015-09-25 13:03:43 -05:00
13 changed files with 197 additions and 142 deletions

View File

@@ -32,15 +32,14 @@ install: "echo 'Skipping install stage, dependencies will be downloaded during b
# 2. Assemble DSpace
script:
# 1. [Install & Unit Test] Check source code licenses and run source code Unit Tests
# (This explicitly skips building the 'dspace' assembly module, since we only want to do that ONCE.)
# license:check => Validate all source code license headers
# -Dmaven.test.skip=false => Enable DSpace Unit Tests
# -P !dspace => SKIP full DSpace assembly (will do below)
# -P !assembly => Skip normal assembly (as it can be memory intensive)
# -B => Maven batch/non-interactive mode (recommended for CI)
# -V => Display Maven version info before build
- "mvn clean install license:check -Dmaven.test.skip=false -P !dspace -B -V"
- "mvn clean install license:check -Dmaven.test.skip=false -P !assembly -B -V"
# 2. [Assemble DSpace] Ensure assembly process works (from [src]/dspace/), including Mirage 2
# -Dmirage2.on=true => Build Mirage2
# -Dmirage2.deps.included=false => Don't include Mirage2 build dependencies (We installed them in before_install)
# -P !assembly => SKIP the actual building of [src]/dspace/dspace-installer (as it can be memory intensive)
- "cd dspace && mvn package -Dmirage2.on=true -Dmirage2.deps.included=false -P !assembly -B -V"
- "cd dspace && mvn package -Dmirage2.on=true -Dmirage2.deps.included=false -P !assembly -B -V"

View File

@@ -259,7 +259,16 @@
<phase>process-test-resources</phase>
<configuration>
<target>
<!-- Ant task to copy dspace.cfg.woven to location of test dspace.cfg file -->
<copy file="${agnostic.build.dir}/testing/dspace.cfg.woven" tofile="${agnostic.build.dir}/testing/dspace/config/dspace.cfg" />
<!-- Now, do one final filter of our Test configs, replacing any remaining "${dspace.dir}"
placeholders, with the full path of our Unit Test directory -->
<!-- NOTE: This final filtering is necessary, because dspace.dir doesn't get filled out
in our test dspace.cfg until Fileweaver runs above. -->
<replace dir="${agnostic.build.dir}/testing/dspace/config/" value="${agnostic.build.dir}/testing/dspace">
<include name="**/*"/>
<replacetoken>${dspace.dir}</replacetoken>
</replace>
</target>
</configuration>
<goals>

View File

@@ -59,7 +59,7 @@ public class DatabaseUtils
// Our Flyway DB object (initialized by setupFlyway())
private static Flyway flywaydb;
// When this temp file exists, the "checkReindexDiscovery()" method will auto-reindex Discovery
// When this temp file exists, the "checkReindexDiscovery()" method will auto-reindex Discovery
// Reindex flag file is at [dspace]/solr/search/conf/reindex.flag
// See also setReindexDiscovery()/getReindexDiscover()
private static final String reindexDiscoveryFilePath = ConfigurationManager.getProperty("dspace.dir") +
@@ -68,6 +68,7 @@ public class DatabaseUtils
File.separator + "conf" +
File.separator + "reindex.flag";
// Types of databases supported by DSpace. See getDbType()
public static final String DBMS_POSTGRES="postgres";
public static final String DBMS_ORACLE="oracle";
public static final String DBMS_H2="h2";
@@ -117,7 +118,7 @@ public class DatabaseUtils
System.out.println(" - Driver: " + meta.getDriverName() + " version " + meta.getDriverVersion());
System.out.println(" - Username: " + meta.getUserName());
System.out.println(" - Password: [hidden]");
System.out.println(" - Schema: " + connection.getSchema());
System.out.println(" - Schema: " + getSchemaName(connection));
connection.close();
}
catch (SQLException sqle)
@@ -134,7 +135,7 @@ public class DatabaseUtils
Connection connection = dataSource.getConnection();
DatabaseMetaData meta = connection.getMetaData();
System.out.println("\nDatabase URL: " + meta.getURL());
System.out.println("Database Schema: " + connection.getSchema());
System.out.println("Database Schema: " + getSchemaName(connection));
System.out.println("Database Software: " + meta.getDatabaseProductName() + " version " + meta.getDatabaseProductVersion());
System.out.println("Database Driver: " + meta.getDriverName() + " version " + meta.getDriverVersion());
@@ -275,8 +276,7 @@ public class DatabaseUtils
flywaydb.setEncoding("UTF-8");
// Migration scripts are based on DBMS Keyword (see full path below)
DatabaseMetaData meta = connection.getMetaData();
String dbType = findDbKeyword(meta);
String dbType = getDbType(connection);
connection.close();
// Determine location(s) where Flyway will load all DB migrations
@@ -468,7 +468,7 @@ public class DatabaseUtils
// Get info about which database type we are using
connection = dataSource.getConnection();
DatabaseMetaData meta = connection.getMetaData();
String dbKeyword = findDbKeyword(meta);
String dbKeyword = getDbType(connection);
// If this is Oracle, the only way to entirely clean the database
// is to also purge the "Recyclebin". See:
@@ -641,7 +641,7 @@ public class DatabaseUtils
{
// Get the name of the Schema that the DSpace Database is using
// (That way we can search the right schema)
String schema = connection.getSchema();
String schema = getSchemaName(connection);
// Get information about our database.
DatabaseMetaData meta = connection.getMetaData();
@@ -702,7 +702,7 @@ public class DatabaseUtils
{
// Get the name of the Schema that the DSpace Database is using
// (That way we can search the right schema)
String schema = connection.getSchema();
String schema = getSchemaName(connection);
// Canonicalize everything to the proper case based on DB type
schema = canonicalize(connection, schema);
@@ -761,14 +761,14 @@ public class DatabaseUtils
{
// Get the name of the Schema that the DSpace Database is using
// (That way we can search the right schema)
String schema = connection.getSchema();
String schema = getSchemaName(connection);
// Canonicalize everything to the proper case based on DB type
schema = canonicalize(connection, schema);
sequenceName = canonicalize(connection, sequenceName);
// Different database types store sequence information in different tables
String dbtype = findDbKeyword(connection.getMetaData());
String dbtype = getDbType(connection);
String sequenceSQL = null;
switch(dbtype)
{
@@ -877,6 +877,61 @@ public class DatabaseUtils
}
}
/**
* Get the Database Schema Name in use by this Connection, so that it can
* be used to limit queries in other methods (e.g. tableExists()).
*
* @param connection
* Current Database Connection
* @return Schema name as a string, or "null" if cannot be determined or unspecified
*/
public static String getSchemaName(Connection connection)
throws SQLException
{
String schema = null;
// Try to get the schema from the DB connection itself.
// As long as the Database driver supports JDBC4.1, there should be a getSchema() method
// If this method is unimplemented or doesn't exist, it will throw an exception (likely an AbstractMethodError)
try
{
schema = connection.getSchema();
}
catch (Exception|AbstractMethodError e)
{
}
// If we don't know our schema, let's try the schema in the DSpace configuration
if(StringUtils.isBlank(schema))
{
schema = canonicalize(connection, ConfigurationManager.getProperty("db.schema"));
}
// Still blank? Ok, we'll find a "sane" default based on the DB type
if(StringUtils.isBlank(schema))
{
String dbType = getDbType(connection);
if(dbType.equals(DBMS_POSTGRES))
{
// For PostgreSQL, the default schema is named "public"
// See: http://www.postgresql.org/docs/9.0/static/ddl-schemas.html
schema = "public";
}
else if (dbType.equals(DBMS_ORACLE))
{
// For Oracle, default schema is actually the user account
// See: http://stackoverflow.com/a/13341390
DatabaseMetaData meta = connection.getMetaData();
schema = meta.getUserName();
}
else // For H2 (in memory), there is no such thing as a schema
schema = null;
}
return schema;
}
/**
* Return the canonical name for a database identifier based on whether this
* database defaults to storing identifiers in uppercase or lowercase.
@@ -1068,14 +1123,16 @@ public class DatabaseUtils
}
/**
* Determine the type of Database, based on the DB connection's metadata info
* @param meta DatabaseMetaData from DB Connection
* Determine the type of Database, based on the DB connection.
*
* @param connection current DB Connection
* @return a DB keyword/type (see DatabaseUtils.DBMS_* constants)
* @throws SQLException
*/
protected static String findDbKeyword(DatabaseMetaData meta)
public static String getDbType(Connection connection)
throws SQLException
{
DatabaseMetaData meta = connection.getMetaData();
String prodName = meta.getDatabaseProductName();
String dbms_lc = prodName.toLowerCase(Locale.ROOT);
if (dbms_lc.contains("postgresql"))

View File

@@ -30,18 +30,11 @@ public class V6_0_2015_08_31__DS_2701_Hibernate_Workflow_Migration implements Jd
@Override
public void migrate(Connection connection) throws Exception
{
String dbtype = connection.getMetaData().getDatabaseProductName();
String dbFileLocation = null;
if(dbtype.toLowerCase().contains("postgres"))
{
dbFileLocation = "postgres";
}else
if(dbtype.toLowerCase().contains("oracle")){
dbFileLocation = "oracle";
}
// Based on type of DB, get path to SQL migration script
String dbtype = DatabaseUtils.getDbType(connection);
String dataMigrateSQL;
String sqlMigrationPath = "org/dspace/storage/rdbms/sqlmigration/workflow/" + dbFileLocation +"/";
String sqlMigrationPath = "org/dspace/storage/rdbms/sqlmigration/workflow/" + dbtype +"/";
// Now, check if the XMLWorkflow table (cwf_workflowitem) already exists in this database
// If XMLWorkflow Table does NOT exist in this database, then lets do the migration!
// If XMLWorkflow Table ALREADY exists, then this migration is a noop, we assume you manually ran the sql scripts

View File

@@ -1,18 +1,30 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE hibernate-configuration PUBLIC "-//Hibernate/Hibernate Configuration DTD 3.0//EN"
"http://www.hibernate.org/dtd/hibernate-configuration-3.0.dtd">
<!--
A slightly custom version of hibernate.cfg.xml which is used for Unit Testing and the H2 database.
This OVERRIDES the default [dspace]/config/hibernate.cfg.xml. So it should be kept in sync with that file!
-->
<hibernate-configuration>
<session-factory>
<!--
NOTE: If you are looking for the Hibernate database connection info
(driver, url, username, pwd), it is initialized in the 'dataSource'
bean in [dspace.dir]/config/spring/api/core-hibernate.xml
-->
<property name="hibernate.dialect">org.hibernate.dialect.H2Dialect</property>
<property name="hibernate.hbm2ddl.auto">update</property>
<property name="hibernate.dialect">${db.dialect}</property>
<!--
Schema name - if your database contains multiple schemas, you can avoid
problems with retrieving the definitions of duplicate object names by
specifying the schema name that is used for DSpace.
ORACLE USAGE NOTE: In Oracle, schema is equivalent to "username". This means
specifying a "db.schema" is often unnecessary (i.e. you can leave it blank),
UNLESS your Oracle DB Account (in db.username) has access to multiple schemas.
-->
<!-- H2 doesn't use schemas -->
<!--<property name="hibernate.default_schema"></property>-->
<property name="hibernate.hbm2ddl.auto">update</property> <!-- custom for H2 -->
<property name="hibernate.hbm2ddl.import_files_sql_extractor">org.hibernate.tool.hbm2ddl.SingleLineSqlCommandExtractor</property>
<property name="hibernate.connection.autocommit">false</property>
<property name="hibernate.current_session_context_class">org.hibernate.context.internal.ThreadLocalSessionContext</property>
@@ -21,9 +33,9 @@
<!--Connection pool parameters -->
<!-- Maximum number of DB connections in pool -->
<property name="hibernate.c3p0.max_size">30</property>
<property name="hibernate.c3p0.max_size">${db.maxconnections}</property>
<!-- Determine the number of statements to be cached. -->
<property name="hibernate.c3p0.max_statements">50</property>
<property name="hibernate.c3p0.max_statements">${db.statementpool.cache}</property>
@@ -63,7 +75,6 @@
<mapping class="org.dspace.eperson.EPerson"/>
<mapping class="org.dspace.eperson.Group"/>
<mapping class="org.dspace.eperson.Group2GroupCache"/>
<!--<mapping class="org.dspace.eperson.Group2Group"/>-->
<mapping class="org.dspace.eperson.RegistrationData"/>
<mapping class="org.dspace.eperson.Subscription"/>

View File

@@ -1,20 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd" default-lazy-init="true">
<bean id="sessionFactory" class="org.springframework.orm.hibernate4.LocalSessionFactoryBean" lazy-init="true">
<property name="configLocation" value="classpath:hibernate.cfg.xml"/>
<property name="dataSource" ref="dataSource" />
</bean>
<bean id="dataSource" class="org.apache.commons.dbcp2.BasicDataSource" lazy-init="true" destroy-method="close">
<property name="driverClassName" value="org.h2.Driver"/>
<property name="url" value="jdbc:h2:mem:test;LOCK_TIMEOUT=10000;MVCC=true"/>
<property name="username" value="sa"/>
<property name="password" value=""/>
</bean>
<bean name="org.dspace.core.DBConnection" class="org.dspace.core.HibernateDBConnection" lazy-init="true"/>
</beans>

View File

@@ -91,7 +91,7 @@ public class CommunityCollectionIntegrationTest extends AbstractIntegrationTest
context.restoreAuthSystemState();
//verify it works as expected
assertThat("testCreateTree 0", parent.getParentCommunities().size(), not(0));
assertThat("testCreateTree 0", parent.getParentCommunities().size(), is(0));
assertThat("testCreateTree 1", child1.getParentCommunities().get(0), equalTo(parent));
assertThat("testCreateTree 2", (Community) collectionService.getParentObject(context, col1), equalTo(child1));
assertThat("testCreateTree 3", (Community) collectionService.getParentObject(context, col2), equalTo(child1));

View File

@@ -7,7 +7,6 @@
*/
package org.dspace.eperson;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.dspace.AbstractUnitTest;
import org.dspace.authorize.AuthorizeException;
@@ -89,16 +88,16 @@ public class GroupTest extends AbstractUnitTest {
{
try {
context.turnOffAuthorisationSystem();
if(level1Group != null)
{
groupService.delete(context, level1Group);
level1Group = null;
}
if(level2Group != null)
{
groupService.delete(context,level2Group);
level2Group = null;
}
if(level1Group != null)
{
groupService.delete(context, level1Group);
level1Group = null;
}
if(topGroup != null)
{
groupService.delete(context,topGroup);
@@ -184,18 +183,25 @@ public class GroupTest extends AbstractUnitTest {
@Test
public void findAllNameSort() throws SQLException {
// Retrieve groups sorted by name
List<Group> groups = groupService.findAll(context, GroupService.NAME);
assertThat("findAllNameSort 1", groups, notNullValue());
//Check our sorting order by adding to a treeSet & check against arraylist values
List<String> listNames = new ArrayList<String>();
Set<String> setNames = new TreeSet<String>();
// Add all group names to two arraylists (arraylists are unsorted)
// NOTE: we use lists here because we don't want duplicate names removed
List<String> names = new ArrayList<String>();
List<String> sortedNames = new ArrayList<String>();
for (Group group : groups) {
listNames.add(group.getName());
setNames.add(group.getName());
names.add(group.getName());
sortedNames.add(group.getName());
}
assertTrue("findAllNameSort 2 ", ArrayUtils.isEquals(setNames.toArray(new String[setNames.size()]), listNames.toArray(new String[listNames.size()])));
// Now, sort the "sortedNames" Arraylist
Collections.sort(sortedNames);
// Verify the sorted arraylist is still equal to the original (unsorted) one
assertEquals("findAllNameSort compareLists", sortedNames, names);
}
@Test

View File

@@ -55,5 +55,7 @@ log4j.rootLogger=INFO, stdout
# Hibernate logging options (INFO only shows startup messages)
log4j.logger.org.hibernate=INFO
# Log JDBC bind parameter runtime arguments
log4j.logger.org.hibernate.type=trace
# For detailed Hibernate logging in Unit Tests, you can enable the following
# setting which logs all JDBC bind parameter runtime arguments.
# This will drastically increase the size of Unit Test logs though.
#log4j.logger.org.hibernate.type=TRACE

View File

@@ -71,7 +71,6 @@
<mapping class="org.dspace.eperson.EPerson"/>
<mapping class="org.dspace.eperson.Group"/>
<mapping class="org.dspace.eperson.Group2GroupCache"/>
<!--<mapping class="org.dspace.eperson.Group2Group"/>-->
<mapping class="org.dspace.eperson.RegistrationData"/>
<mapping class="org.dspace.eperson.Subscription"/>

View File

@@ -89,7 +89,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.17</version>
<version>2.18.1</version>
<configuration>
<!-- Allow for the ability to pass JVM memory flags for Unit Tests. Since
maven-surefire-plugin forks a new JVM, it ignores MAVEN_OPTS.-->
@@ -130,7 +130,7 @@
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<version>2.4.1</version>
<version>2.5.5</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>

View File

@@ -24,50 +24,69 @@
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<!-- First, copy the following from our 'dspace' subfolder assembly project into the
same subdirectory in the final ZIP file. NOTE: We do this in a <fileSet> INSTEAD
of a <moduleSet> so that we don't have to first build the "dspace" module. -->
<fileSets>
<fileSet>
<!-- Copy necessary DSpace subdirectories into Test environment -->
<includes>
<include>dspace/bin/**</include>
<include>dspace/config/**</include>
<include>dspace/etc/**</include>
<include>dspace/solr/**</include>
</includes>
<!-- But, exclude specific configs (which require filtering) -->
<excludes>
<exclude>dspace/config/dspace.cfg</exclude>
<exclude>dspace/config/log4j.properties</exclude>
<exclude>dspace/config/modules/**</exclude>
</excludes>
</fileSet>
<fileSet>
<!-- Copy specific configs (filtering their content) also into Test environment -->
<includes>
<include>dspace/config/modules/**</include>
<include>dspace/config/dspace.cfg</include>
<include>dspace/config/log4j.properties</include>
</includes>
<filtered>true</filtered>
</fileSet>
</fileSets>
<!-- NOTE: first file in wins when using maven-assembly-plugin. So, overridden
files have to be included first. -->
<moduleSets>
<!-- Next, search for a 'src/test/data/dspaceFolder' data directory in
<!-- FIRST, search for a 'src/test/data/dspaceFolder' data directory in
ANY of our modules. If found, copy its contents into the same "dspace"
subdirectory in the final ZIP file, as this is data to be used in testing.
NOTE: This *might* overwrite/overlay default files copied from above. -->
NOTE: This *might* overwrite/overlay default files copied below. -->
<moduleSet>
<!-- Enable access to all projects in the current multimodule build! -->
<useAllReactorProjects>true</useAllReactorProjects>
<includes>
<include>org.dspace:*</include>
</includes>
<sources>
<outputDirectoryMapping>dspace</outputDirectoryMapping>
<fileSets>
<!-- First, copy over custom/overridden configs, filtering each of them -->
<fileSet>
<directory>src/test/data/dspaceFolder/config</directory>
<outputDirectory>config</outputDirectory>
<filtered>true</filtered>
</fileSet>
<!-- Then copy over everything else (EXCEPT configs) -->
<fileSet>
<directory>src/test/data/dspaceFolder</directory>
<excludes>
<exclude>config/**</exclude>
</excludes>
</fileSet>
</fileSets>
</sources>
</moduleSet>
<!-- NEXT, we add in the default configuration/settings from the
DSpace Assembly module. -->
<moduleSet>
<includes>
<include>org.dspace:dspace</include>
</includes>
<sources>
<outputDirectoryMapping>dspace</outputDirectoryMapping>
<fileSets>
<!-- First add in the config directory, filtering all config files -->
<fileSet>
<!-- Include the config directory and all subdirectories
(without this explicit include, the "modules"
subdirectory is strangely excluded) -->
<includes>
<include>config/**</include>
</includes>
<filtered>true</filtered>
</fileSet>
<!-- Then add in other necessary directories -->
<fileSet>
<directory>bin</directory>
<outputDirectory>bin</outputDirectory>
</fileSet>
<fileSet>
<directory>etc</directory>
<outputDirectory>etc</outputDirectory>
</fileSet>
<fileSet>
<directory>solr</directory>
<outputDirectory>solr</outputDirectory>
</fileSet>
</fileSets>
</sources>

View File

@@ -1,18 +1,7 @@
# DSpace build.properties
# This file should be customised to suit your build environment.
# Note that not all configuration is handled here, only the most common
# properties that tend to differ between build environments.
# For adjusting global settings or more complex settings, edit the relevant config file.
#
# IMPORTANT: Do not remove or comment out settings in build.properties
# When you edit the "build.properties" file (or a custom *.properties file),
# take care not to remove or comment out any settings. Doing so, may cause
# your final "dspace.cfg" file to be misconfigured with regards to that
# particular setting. Instead, if you wish to remove/disable a particular
# setting, just clear out its value. For example, if you don't want to be
# notified of new user registrations, ensure the "mail.registration.notify"
# setting has no value, e.g. "mail.registration.notify="
#
# This file is a custom version of "build.properties" which is used
# specifically for Unit Testing.
##########################
# SERVER CONFIGURATION #
@@ -53,33 +42,24 @@ default.language = en_US
##########################
# DATABASE CONFIGURATION #
##########################
# For Unit Testing we use H2 running in "Oracle mode"
db.driver = org.h2.Driver
# Use a 10 second database lock timeout to avoid occasional JDBC lock timeout errors
db.url = jdbc:h2:mem:test;MODE=Oracle;LOCK_TIMEOUT=10000
db.username = sa
db.password = sa
# Schema name - if your database contains multiple schemas, you can avoid problems with
# retrieving the definitions of duplicate object names by specifying
# the schema name here that is used for DSpace by uncommenting the following entry
# For Unit Testing we use the H2 (in memory) database
db.driver = org.h2.Driver
db.dialect=org.hibernate.dialect.H2Dialect
# Use H2 running in "Oracle mode"
# Use a 10 second database lock timeout to avoid occasional JDBC lock timeout errors
db.url = jdbc:h2:mem:test;LOCK_TIMEOUT=10000;MVCC=true
#db.url = jdbc:h2:mem:test;MODE=Oracle;LOCK_TIMEOUT=10000
db.username = sa
db.password =
# H2 doesn't use schemas
db.schema =
# Maximum number of DB connections in pool
db.maxconnections = 30
# Maximum time to wait before giving up if all connections in pool are busy (milliseconds)
db.maxwait = 5000
# Maximum number of idle connections in pool (-1 = unlimited)
db.maxidle = -1
# Determine if prepared statement should be cached. (default is true)
db.statementpool = true
# Specify a name for the connection pool (useful if you have multiple applications sharing Tomcat's dbcp)
# If not specified, defaults to 'dspacepool'
db.poolname = dspacepool
# Determine the number of statements that can be cached (set to 0 to disable caching)
db.statementpool.cache = 50
#######################
# EMAIL CONFIGURATION #