From b46b781f83df3ebc69a1cb57ae9cbf9218907dd8 Mon Sep 17 00:00:00 2001
From: jakubs <jakubs>
Date: Tue, 4 Nov 2014 16:49:42 +0000
Subject: [PATCH] SSDM-1010 refactor multi data set archiver. Improve handling
 of transaction, use the share.freeSpace for the unarchiving. Bring back the
 transaction.close() method.

SVN: 32726
---
 .../plugins/SegmentedStoreShufflingTask.java  |   7 +-
 .../postregistration/EagerShufflingTask.java  |   3 +-
 .../server/api/v1/DssServiceRpcGeneric.java   |   3 +-
 .../AbstractArchiverProcessingPlugin.java     |  22 +++-
 .../plugins/standard/RsyncArchiver.java       |   2 +-
 .../archiver/MultiDataSetArchiver.java        | 121 +++++++++++++++---
 .../IMultiDataSetArchiverDBTransaction.java   |   2 +
 .../MultiDataSetArchiverDBTransaction.java    |   8 ++
 .../generic/shared/ArchiverTaskContext.java   |  12 +-
 .../shared/IUnarchivingPreparation.java       |  11 +-
 .../shared/utils/SegmentedStoreUtils.java     |  53 ++++++--
 .../shared/utils/SegmentedStoreUtilsTest.java |  41 +++---
 12 files changed, 213 insertions(+), 72 deletions(-)

diff --git a/datastore_server/source/java/ch/systemsx/cisd/etlserver/plugins/SegmentedStoreShufflingTask.java b/datastore_server/source/java/ch/systemsx/cisd/etlserver/plugins/SegmentedStoreShufflingTask.java
index d638cd3d7dd..8a9a365d124 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/etlserver/plugins/SegmentedStoreShufflingTask.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/etlserver/plugins/SegmentedStoreShufflingTask.java
@@ -49,12 +49,13 @@ import ch.systemsx.cisd.openbis.dss.generic.shared.IncomingShareIdProvider;
 import ch.systemsx.cisd.openbis.dss.generic.shared.ServiceProvider;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.DssPropertyParametersUtil;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils;
+import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils.FilterOptions;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.Share;
 import ch.systemsx.cisd.openbis.generic.shared.dto.SimpleDataSetInformationDTO;
 
 /**
- * Maintenance task which shuffles data sets between shares of a segmented store. This task is
- * supposed to prevent incoming shares from having not enough space.
+ * Maintenance task which shuffles data sets between shares of a segmented store. This task is supposed to prevent incoming shares from having not
+ * enough space.
  * 
  * @author Franz-Josef Elmer
  */
@@ -241,7 +242,7 @@ public class SegmentedStoreShufflingTask implements IDataStoreLockingMaintenance
 
     private List<Share> listShares()
     {
-        return SegmentedStoreUtils.getSharesWithDataSets(storeRoot, dataStoreCode, true,
+        return SegmentedStoreUtils.getSharesWithDataSets(storeRoot, dataStoreCode, FilterOptions.AVAILABLE_FOR_SHUFFLING,
                 Collections.<String> emptySet(), freeSpaceProvider, service, operationLogger);
     }
 
diff --git a/datastore_server/source/java/ch/systemsx/cisd/etlserver/postregistration/EagerShufflingTask.java b/datastore_server/source/java/ch/systemsx/cisd/etlserver/postregistration/EagerShufflingTask.java
index f9ce5bcd9c8..261d9b76e01 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/etlserver/postregistration/EagerShufflingTask.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/etlserver/postregistration/EagerShufflingTask.java
@@ -49,6 +49,7 @@ import ch.systemsx.cisd.openbis.dss.generic.shared.IShareIdManager;
 import ch.systemsx.cisd.openbis.dss.generic.shared.IncomingShareIdProvider;
 import ch.systemsx.cisd.openbis.dss.generic.shared.ServiceProvider;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils;
+import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils.FilterOptions;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.Share;
 import ch.systemsx.cisd.openbis.generic.shared.dto.SimpleDataSetInformationDTO;
 
@@ -199,7 +200,7 @@ public class EagerShufflingTask extends AbstractPostRegistrationTaskForPhysicalD
         public ICleanupTask createCleanupTask()
         {
             List<Share> shares =
-                    SegmentedStoreUtils.getSharesWithDataSets(storeRoot, dataStoreCode, true,
+                    SegmentedStoreUtils.getSharesWithDataSets(storeRoot, dataStoreCode, FilterOptions.AVAILABLE_FOR_SHUFFLING,
                             incomingShares, freeSpaceProvider, service, logger);
             dataSet = findDataSet(shares, dataSetCode);
             shareWithMostFreeOrNull = finder.tryToFindShare(dataSet, shares);
diff --git a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/api/v1/DssServiceRpcGeneric.java b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/api/v1/DssServiceRpcGeneric.java
index e7833e54ce0..dc4f13c1da2 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/api/v1/DssServiceRpcGeneric.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/api/v1/DssServiceRpcGeneric.java
@@ -65,6 +65,7 @@ import ch.systemsx.cisd.openbis.dss.generic.shared.api.v1.NewDataSetDTO;
 import ch.systemsx.cisd.openbis.dss.generic.shared.api.v1.ShareInfo;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.DatasetLocationUtil;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils;
+import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils.FilterOptions;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.Share;
 import ch.systemsx.cisd.openbis.generic.shared.basic.dto.AbstractExternalData;
 import ch.systemsx.cisd.openbis.generic.shared.basic.dto.PhysicalDataSet;
@@ -453,7 +454,7 @@ public class DssServiceRpcGeneric extends AbstractDssServiceRpc<IDssServiceRpcGe
         getOpenBISService().checkSession(sessionToken);
         List<Share> shares =
                 SegmentedStoreUtils.getSharesWithDataSets(getStoreDirectory(), dataStoreCode,
-                        false, Collections.<String> emptySet(), freeSpaceProvider,
+                        FilterOptions.ALL, Collections.<String> emptySet(), freeSpaceProvider,
                         getOpenBISService(), new Log4jSimpleLogger(operationLog));
         List<ShareInfo> result = new ArrayList<ShareInfo>();
         for (Share share : shares)
diff --git a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/AbstractArchiverProcessingPlugin.java b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/AbstractArchiverProcessingPlugin.java
index 73d10113f36..c21f7ba0f6d 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/AbstractArchiverProcessingPlugin.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/AbstractArchiverProcessingPlugin.java
@@ -59,6 +59,7 @@ import ch.systemsx.cisd.openbis.dss.generic.shared.ServiceProvider;
 import ch.systemsx.cisd.openbis.dss.generic.shared.StandardShareFinder;
 import ch.systemsx.cisd.openbis.dss.generic.shared.dto.DataSetCodesWithStatus;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils;
+import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils.FilterOptions;
 import ch.systemsx.cisd.openbis.dss.generic.shared.utils.Share;
 import ch.systemsx.cisd.openbis.generic.shared.basic.dto.DataSetArchivingStatus;
 import ch.systemsx.cisd.openbis.generic.shared.basic.dto.DatasetLocation;
@@ -358,15 +359,20 @@ public abstract class AbstractArchiverProcessingPlugin extends AbstractDatastore
     }
 
     private void setUpUnarchivingPreparation(ArchiverTaskContext context)
+    {
+        context.setUnarchivingPreparation(getUnarchivingPreparation());
+    }
+
+    protected IUnarchivingPreparation getUnarchivingPreparation()
     {
         String dataStoreCode = ServiceProvider.getConfigProvider().getDataStoreCode();
         Set<String> incomingShares = IncomingShareIdProvider.getIdsOfIncomingShares();
         IFreeSpaceProvider freeSpaceProvider = new SimpleFreeSpaceProvider();
         List<Share> shares =
-                SegmentedStoreUtils.getSharesWithDataSets(storeRoot, dataStoreCode, true, incomingShares,
+                SegmentedStoreUtils.getSharesWithDataSets(storeRoot, dataStoreCode, FilterOptions.ALL, incomingShares,
                         freeSpaceProvider, getService(), new Log4jSimpleLogger(operationLog));
-        context.setUnarchivingPreparation(new UnarchivingPreparation(getShareFinder(),
-                getShareIdManager(), getService(), shares));
+        return new UnarchivingPreparation(getShareFinder(),
+                getShareIdManager(), getService(), shares);
     }
 
     /**
@@ -717,7 +723,15 @@ public abstract class AbstractArchiverProcessingPlugin extends AbstractDatastore
         }
 
         @Override
-        public void prepareForUnarchiving(DatasetDescription dataSet)
+        public void prepareForUnarchiving(List<DatasetDescription> dataSets)
+        {
+            for (DatasetDescription datasetDescription : dataSets)
+            {
+                findAndUpdateShareForDataset(datasetDescription);
+            }
+        }
+
+        protected void findAndUpdateShareForDataset(DatasetDescription dataSet)
         {
             SimpleDataSetInformationDTO translatedDataSet = SimpleDataSetHelper.translate(dataSet);
             String dataSetCode = dataSet.getDataSetCode();
diff --git a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/RsyncArchiver.java b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/RsyncArchiver.java
index 82da6056cc3..8a853188e7b 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/RsyncArchiver.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/RsyncArchiver.java
@@ -373,7 +373,7 @@ public class RsyncArchiver extends AbstractArchiverProcessingPlugin
         DatasetProcessingStatuses statuses = new DatasetProcessingStatuses();
         for (DatasetDescription dataset : datasets)
         {
-            context.getUnarchivingPreparation().prepareForUnarchiving(dataset);
+            context.getUnarchivingPreparation().prepareForUnarchiving(Collections.singletonList(dataset));
             File originalData = getDatasetDirectory(context, dataset);
             Status status = doUnarchive(dataset, originalData);
             statuses.addResult(dataset.getDataSetCode(), status, Operation.UNARCHIVE);
diff --git a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/MultiDataSetArchiver.java b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/MultiDataSetArchiver.java
index 936cd70082d..b936269b607 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/MultiDataSetArchiver.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/MultiDataSetArchiver.java
@@ -25,12 +25,17 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.Set;
 
 import ch.rinn.restrictions.Private;
+import ch.systemsx.cisd.common.exceptions.ConfigurationFailureException;
 import ch.systemsx.cisd.common.exceptions.NotImplementedException;
 import ch.systemsx.cisd.common.exceptions.Status;
 import ch.systemsx.cisd.common.filesystem.BooleanStatus;
 import ch.systemsx.cisd.common.filesystem.FileUtilities;
+import ch.systemsx.cisd.common.filesystem.IFreeSpaceProvider;
+import ch.systemsx.cisd.common.filesystem.SimpleFreeSpaceProvider;
+import ch.systemsx.cisd.common.logging.Log4jSimpleLogger;
 import ch.systemsx.cisd.common.properties.PropertyUtils;
 import ch.systemsx.cisd.openbis.common.io.hierarchical_content.api.IHierarchicalContent;
 import ch.systemsx.cisd.openbis.common.io.hierarchical_content.api.IHierarchicalContentNode;
@@ -45,11 +50,22 @@ import ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.archiver.dat
 import ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.archiver.dataaccess.MultiDataSetArchiverDataSetDTO;
 import ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.archiver.dataaccess.MultiDataSetArchiverDataSourceUtil;
 import ch.systemsx.cisd.openbis.dss.generic.shared.ArchiverTaskContext;
+import ch.systemsx.cisd.openbis.dss.generic.shared.IDataSetDirectoryProvider;
+import ch.systemsx.cisd.openbis.dss.generic.shared.IEncapsulatedOpenBISService;
 import ch.systemsx.cisd.openbis.dss.generic.shared.IShareFinder;
+import ch.systemsx.cisd.openbis.dss.generic.shared.IShareIdManager;
+import ch.systemsx.cisd.openbis.dss.generic.shared.IUnarchivingPreparation;
+import ch.systemsx.cisd.openbis.dss.generic.shared.IncomingShareIdProvider;
+import ch.systemsx.cisd.openbis.dss.generic.shared.ServiceProvider;
+import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils;
+import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils.FilterOptions;
+import ch.systemsx.cisd.openbis.dss.generic.shared.utils.Share;
 import ch.systemsx.cisd.openbis.generic.shared.basic.dto.AbstractExternalData;
 import ch.systemsx.cisd.openbis.generic.shared.basic.dto.IDatasetLocation;
 import ch.systemsx.cisd.openbis.generic.shared.basic.dto.PhysicalDataSet;
 import ch.systemsx.cisd.openbis.generic.shared.dto.DatasetDescription;
+import ch.systemsx.cisd.openbis.generic.shared.dto.SimpleDataSetInformationDTO;
+import ch.systemsx.cisd.openbis.generic.shared.translator.SimpleDataSetHelper;
 
 /**
  * @author Jakub Straszewski
@@ -91,8 +107,6 @@ public class MultiDataSetArchiver extends AbstractArchiverProcessingPlugin
 
     public static final Long DEFAULT_MAXIMUM_CONTAINER_SIZE_IN_BYTES = 80L * 1024 * 1024 * 1024;
 
-    private transient IMultiDataSetArchiverDBTransaction transaction;
-
     private transient IMultiDataSetArchiverReadonlyQueryDAO readonlyQuery;
 
     public MultiDataSetArchiver(Properties properties, File storeRoot)
@@ -116,21 +130,25 @@ public class MultiDataSetArchiver extends AbstractArchiverProcessingPlugin
             return result;
         }
 
+        IMultiDataSetArchiverDBTransaction transaction = getTransaction();
+
         try
         {
             verifyDataSetsSize(dataSets);
 
-            DatasetProcessingStatuses archiveResult = archiveDataSets(dataSets, context);
+            DatasetProcessingStatuses archiveResult = archiveDataSets(dataSets, context, transaction);
 
             result.addResults(archiveResult);
 
-            getTransaction().commit();
+            transaction.commit();
+            transaction.close();
         } catch (Exception e)
         {
             operationLog.warn("Archiving of " + dataSets.size() + " data sets failed", e);
             try
             {
-                getTransaction().rollback();
+                transaction.rollback();
+                transaction.close();
             } catch (Exception ex)
             {
                 operationLog.warn("Rollback of multi dataset db transaction failed", ex);
@@ -151,7 +169,7 @@ public class MultiDataSetArchiver extends AbstractArchiverProcessingPlugin
      * <code>dataSets</code> and removes those which are present in the archive already (or not present, depending on the <code>filterOption</code>).
      * For those removed data sets it adds entry with <code>status</code> for <code>operation</code> in <code>result</code>
      */
-    private void filterBasedOnArchiveStatus(LinkedList<? extends IDatasetLocation> dataSets, 
+    private void filterBasedOnArchiveStatus(LinkedList<? extends IDatasetLocation> dataSets,
             DatasetProcessingStatuses result, FilterOption filterOption, Status status, Operation operation)
     {
         for (Iterator<? extends IDatasetLocation> iterator = dataSets.iterator(); iterator.hasNext();)
@@ -214,7 +232,8 @@ public class MultiDataSetArchiver extends AbstractArchiverProcessingPlugin
         }
     }
 
-    private DatasetProcessingStatuses archiveDataSets(List<DatasetDescription> dataSets, ArchiverTaskContext context) throws Exception
+    private DatasetProcessingStatuses archiveDataSets(List<DatasetDescription> dataSets, ArchiverTaskContext context,
+            IMultiDataSetArchiverDBTransaction transaction) throws Exception
     {
         DatasetProcessingStatuses statuses = new DatasetProcessingStatuses();
 
@@ -222,11 +241,11 @@ public class MultiDataSetArchiver extends AbstractArchiverProcessingPlugin
 
         String containerPath = getFileOperations().generateContainerPath(dataSets);
 
-        MultiDataSetArchiverContainerDTO container = getTransaction().createContainer(containerPath);
+        MultiDataSetArchiverContainerDTO container = transaction.createContainer(containerPath);
 
         for (DatasetDescription dataSet : dataSets)
         {
-            getTransaction().insertDataset(dataSet, container);
+            transaction.insertDataset(dataSet, container);
         }
 
         IHierarchicalContent archivedContent = null;
@@ -315,26 +334,90 @@ public class MultiDataSetArchiver extends AbstractArchiverProcessingPlugin
     }
 
     @Override
-    public List<String> getDataSetCodesForUnarchiving(List<String> dataSetCodes)
+    public java.util.List<String> getDataSetCodesForUnarchiving(List<String> dataSetCodes)
     {
         assertAllDataSetsInTheSameContainer(dataSetCodes);
         return getCodesOfAllDataSetsInContainer(dataSetCodes);
     }
 
+    @Override
+    protected IUnarchivingPreparation getUnarchivingPreparation()
+    {
+        Share scratchShare = findScratchShare();
+
+        IDataSetDirectoryProvider directoryProvider = ServiceProvider.getDataStoreService().getDataSetDirectoryProvider();
+
+        return new MultiDataSetUnarchivingPreparations(scratchShare, getShareIdManager(), getService(), directoryProvider);
+    }
+
+    private Share findScratchShare()
+    {
+        String dataStoreCode = ServiceProvider.getConfigProvider().getDataStoreCode();
+        Set<String> incomingShares = IncomingShareIdProvider.getIdsOfIncomingShares();
+        IFreeSpaceProvider freeSpaceProvider = new SimpleFreeSpaceProvider();
+        List<Share> shares =
+                SegmentedStoreUtils.getSharesWithDataSets(storeRoot, dataStoreCode, FilterOptions.ARCHIVING_SCRATCH, incomingShares,
+                        freeSpaceProvider, getService(), new Log4jSimpleLogger(operationLog));
+        if (shares.size() != 1)
+        {
+            throw new ConfigurationFailureException("There should be exactly one unarchiving scratch share configured!");
+        }
+        Share scratchShare = shares.get(0);
+        return scratchShare;
+    }
+
     @Override
     protected IShareFinder getShareFinder()
     {
         return new MultiDataSetArchiverShareFinder();
     }
 
-    @Override
-    protected DatasetProcessingStatuses doUnarchive(List<DatasetDescription> parameterDataSets, ArchiverTaskContext context)
+    public static class MultiDataSetUnarchivingPreparations implements IUnarchivingPreparation
     {
-        for (DatasetDescription dataSet : parameterDataSets)
+        private final Share scratchShare;
+
+        private final IEncapsulatedOpenBISService service;
+
+        private final IShareIdManager shareIdManager;
+
+        private final IDataSetDirectoryProvider directoryProvider;
+
+        MultiDataSetUnarchivingPreparations(Share scratchShare, IShareIdManager shareIdManager, IEncapsulatedOpenBISService service,
+                IDataSetDirectoryProvider directoryProvider)
         {
-            context.getUnarchivingPreparation().prepareForUnarchiving(dataSet);
+            this.shareIdManager = shareIdManager;
+            this.service = service;
+            this.scratchShare = scratchShare;
+            this.directoryProvider = directoryProvider;
         }
 
+        @Override
+        public void prepareForUnarchiving(List<DatasetDescription> dataSets)
+        {
+            for (DatasetDescription dataSet : dataSets)
+            {
+                SimpleDataSetInformationDTO translatedDataSet = SimpleDataSetHelper.translate(dataSet);
+                String dataSetCode = dataSet.getDataSetCode();
+                translatedDataSet.setDataSetShareId(null);
+                String oldShareId = shareIdManager.getShareId(dataSetCode);
+                String newShareId = scratchShare.getShareId();
+                if (newShareId.equals(oldShareId) == false)
+                {
+                    service.updateShareIdAndSize(dataSetCode, newShareId, dataSet.getDataSetSize());
+                    shareIdManager.setShareId(dataSetCode, newShareId);
+                }
+            }
+
+            SegmentedStoreUtils.freeSpace(scratchShare, service, dataSets, directoryProvider, shareIdManager, new Log4jSimpleLogger(operationLog));
+
+        }
+    }
+
+    @Override
+    protected DatasetProcessingStatuses doUnarchive(List<DatasetDescription> parameterDataSets, ArchiverTaskContext context)
+    {
+        context.getUnarchivingPreparation().prepareForUnarchiving(parameterDataSets);
+
         List<String> dataSetCodes = translateToDataSetCodes(parameterDataSets);
         long containerId = assertAllDataSetsInTheSameContainer(dataSetCodes);
         assertNoAvailableDatasets(dataSetCodes);
@@ -413,7 +496,7 @@ public class MultiDataSetArchiver extends AbstractArchiverProcessingPlugin
         long containerId = -1;
         for (String code : dataSetCodes)
         {
-            MultiDataSetArchiverDataSetDTO dataSet = getTransaction().getDataSetForCode(code);
+            MultiDataSetArchiverDataSetDTO dataSet = getReadonlyQuery().getDataSetForCode(code);
             if (dataSet == null)
             {
                 throw new IllegalArgumentException("Dataset " + code
@@ -466,7 +549,7 @@ public class MultiDataSetArchiver extends AbstractArchiverProcessingPlugin
 
     protected boolean isDataSetPresentInArchive(String dataSetCode)
     {
-        MultiDataSetArchiverDataSetDTO dataSetInArchiveDB = getTransaction().getDataSetForCode(dataSetCode);
+        MultiDataSetArchiverDataSetDTO dataSetInArchiveDB = getReadonlyQuery().getDataSetForCode(dataSetCode);
         return dataSetInArchiveDB != null;
     }
 
@@ -483,11 +566,7 @@ public class MultiDataSetArchiver extends AbstractArchiverProcessingPlugin
     @Private
     IMultiDataSetArchiverDBTransaction getTransaction()
     {
-        if (transaction == null)
-        {
-            transaction = new MultiDataSetArchiverDBTransaction();
-        }
-        return transaction;
+        return new MultiDataSetArchiverDBTransaction();
     }
 
     IMultiDataSetArchiverReadonlyQueryDAO getReadonlyQuery()
diff --git a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/dataaccess/IMultiDataSetArchiverDBTransaction.java b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/dataaccess/IMultiDataSetArchiverDBTransaction.java
index d879833bd9f..c03205d059c 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/dataaccess/IMultiDataSetArchiverDBTransaction.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/dataaccess/IMultiDataSetArchiverDBTransaction.java
@@ -42,4 +42,6 @@ public interface IMultiDataSetArchiverDBTransaction
 
     public void rollback();
 
+    public void close();
+
 }
\ No newline at end of file
diff --git a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/dataaccess/MultiDataSetArchiverDBTransaction.java b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/dataaccess/MultiDataSetArchiverDBTransaction.java
index dd3ec72f0d6..50c2ce64f27 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/dataaccess/MultiDataSetArchiverDBTransaction.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/server/plugins/standard/archiver/dataaccess/MultiDataSetArchiverDBTransaction.java
@@ -100,4 +100,12 @@ public class MultiDataSetArchiverDBTransaction implements IMultiDataSetArchiverD
         transaction.rollback();
     }
 
+    /**
+     * @see net.lemnik.eodsql.TransactionQuery#close()
+     */
+    @Override
+    public void close()
+    {
+        transaction.close();
+    }
 }
diff --git a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/ArchiverTaskContext.java b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/ArchiverTaskContext.java
index f8022b16f7d..64d046e55ba 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/ArchiverTaskContext.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/ArchiverTaskContext.java
@@ -16,24 +16,27 @@
 
 package ch.systemsx.cisd.openbis.dss.generic.shared;
 
-import ch.systemsx.cisd.openbis.generic.shared.dto.DatasetDescription;
+import java.util.List;
 
+import ch.systemsx.cisd.openbis.generic.shared.dto.DatasetDescription;
 
 /**
  * Context for perfoming archiving/unarchiving.
- *
+ * 
  * @author Franz-Josef Elmer
  */
 public class ArchiverTaskContext
 {
     private final IDataSetDirectoryProvider directoryProvider;
+
     private IUnarchivingPreparation unarchivingPreparation = new IUnarchivingPreparation()
         {
             @Override
-            public void prepareForUnarchiving(DatasetDescription dataSet)
+            public void prepareForUnarchiving(List<DatasetDescription> dataSet)
             {
             }
         };
+
     private final IHierarchicalContentProvider hierarchicalContentProvider;
 
     public ArchiverTaskContext(IDataSetDirectoryProvider directoryProvider,
@@ -66,6 +69,5 @@ public class ArchiverTaskContext
     {
         return unarchivingPreparation;
     }
-    
-    
+
 }
diff --git a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/IUnarchivingPreparation.java b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/IUnarchivingPreparation.java
index d6689d2a382..e5a87768244 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/IUnarchivingPreparation.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/IUnarchivingPreparation.java
@@ -16,17 +16,22 @@
 
 package ch.systemsx.cisd.openbis.dss.generic.shared;
 
+import java.util.List;
+
 import ch.systemsx.cisd.openbis.generic.shared.dto.DatasetDescription;
 
 /**
  * Interface of classes which prepare a data set before it will be unarchived.
- *
+ * 
  * @author Franz-Josef Elmer
  */
 public interface IUnarchivingPreparation
 {
     /**
-     * Prepares unarchiving of specified data set.
+     * Prepares unarchiving of specified datasets. This method will be called for with all data sets in a batch as an argument before the unarchiving.
+     * It can be used for operations like finding the right share according to the strategy which depends on all data sets or cleaning required amount
+     * of space on target share. For regular archivers it will be called with a singleton list before each data set is unarchived.
      */
-    public void prepareForUnarchiving(DatasetDescription dataSet);
+    public void prepareForUnarchiving(List<DatasetDescription> dataSets);
+
 }
diff --git a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/utils/SegmentedStoreUtils.java b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/utils/SegmentedStoreUtils.java
index 97c94d792d8..058d2f6f7e3 100644
--- a/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/utils/SegmentedStoreUtils.java
+++ b/datastore_server/source/java/ch/systemsx/cisd/openbis/dss/generic/shared/utils/SegmentedStoreUtils.java
@@ -80,6 +80,8 @@ public class SegmentedStoreUtils
 
     private static final Pattern SHARE_ID_PATTERN = Pattern.compile("[0-9]+");
 
+    private static final Long MINIMUM_FREE_SCRATCH_SPACE = 1024L * 1024 * 1024;
+
     private static final Comparator<Share> SHARE_COMPARATOR = new Comparator<Share>()
         {
             @Override
@@ -112,6 +114,11 @@ public class SegmentedStoreUtils
             }
         };
 
+    public static enum FilterOptions
+    {
+        ALL, AVAILABLE_FOR_SHUFFLING, ARCHIVING_SCRATCH
+    }
+
     /**
      * Lists all folders in specified store root directory which match share pattern.
      */
@@ -190,21 +197,20 @@ public class SegmentedStoreUtils
      * Gets a list of all shares of specified store root directory. As a side effect it calculates and updates the size of all data sets if necessary.
      * 
      * @param dataStoreCode Code of the data store to which the root belongs.
-     * @param filterOutToBeIgnoredForShuffling If <code>true</code> no share will be returned which has property
-     *            <code>ignored-for-shuffling == true</code>
+     * @param filterOptions Specifies what kind of shares should be filtered out from the result
      * @param incomingShares Set of IDs of incoming shares. Will be used to mark {@link Share} object in the returned list.
      * @param freeSpaceProvider Provider of free space used for all shares.
      * @param service Access to openBIS API in order to get all data sets and to update data set size.
      * @param log Logger for logging size calculations.
      */
     public static List<Share> getSharesWithDataSets(File storeRoot, String dataStoreCode,
-            boolean filterOutToBeIgnoredForShuffling, Set<String> incomingShares,
+            FilterOptions filterOptions, Set<String> incomingShares,
             IFreeSpaceProvider freeSpaceProvider, IEncapsulatedOpenBISService service,
             ISimpleLogger log)
     {
         final long start = System.currentTimeMillis();
         List<Share> shares =
-                getSharesWithDataSets(storeRoot, dataStoreCode, filterOutToBeIgnoredForShuffling,
+                getSharesWithDataSets(storeRoot, dataStoreCode, filterOptions,
                         freeSpaceProvider, service, log, SystemTimeProvider.SYSTEM_TIME_PROVIDER);
         for (Share share : shares)
         {
@@ -242,7 +248,7 @@ public class SegmentedStoreUtils
         removeCommonDataSets(filteredDataSets, filteredDataSetsInShare);
         long requestedSpace = calculateTotalSizeOfDataSetsToKeep(filteredDataSets);
         long actualFreeSpace = unarchivingScratchShare.calculateFreeSpace();
-        if (actualFreeSpace < requestedSpace)
+        if (isNotEnoughFreeSpace(requestedSpace, actualFreeSpace))
         {
             Collections.sort(filteredDataSetsInShare, MODIFICATION_TIMESTAMP_COMPARATOR);
             List<SimpleDataSetInformationDTO> dataSetsToRemoveFromShare =
@@ -305,7 +311,7 @@ public class SegmentedStoreUtils
         }
         return size;
     }
-    
+
     private static List<String> extractCodes(List<SimpleDataSetInformationDTO> dataSets)
     {
         List<String> codes = new ArrayList<String>();
@@ -322,27 +328,32 @@ public class SegmentedStoreUtils
     {
         long freeSpace = actualFreeSpace;
         List<SimpleDataSetInformationDTO> dataSetsToRemoveFromShare = new ArrayList<SimpleDataSetInformationDTO>();
-        for (int i = 0, n = dataSetsInShare.size(); i < n && requestedSpace >= freeSpace; i++)
+        for (int i = 0, n = dataSetsInShare.size(); i < n && isNotEnoughFreeSpace(requestedSpace, freeSpace); i++)
         {
             SimpleDataSetInformationDTO dataSetInShare = dataSetsInShare.get(i);
             freeSpace += dataSetInShare.getDataSetSize();
             dataSetsToRemoveFromShare.add(dataSetInShare);
         }
-        if (requestedSpace >= freeSpace)
+        if (isNotEnoughFreeSpace(requestedSpace, freeSpace))
         {
             throw new EnvironmentFailureException("Even after removing all removable data sets from share '"
                     + share.getShareId() + "' there would be still only " + FileUtilities.byteCountToDisplaySize(freeSpace)
-                    + " free space which is less than the requested " + FileUtilities.byteCountToDisplaySize(requestedSpace) + ".");
+                    + " free space which is not enough as " + FileUtilities.byteCountToDisplaySize(requestedSpace) + " is requested.");
         }
         return dataSetsToRemoveFromShare;
     }
 
+    private static boolean isNotEnoughFreeSpace(long requestedSpace, long freeSpace)
+    {
+        return requestedSpace + MINIMUM_FREE_SCRATCH_SPACE >= freeSpace;
+    }
+
     static List<Share> getSharesWithDataSets(File storeRoot, String dataStoreCode,
-            boolean filterOutToBeIgnoredForShuffling, IFreeSpaceProvider freeSpaceProvider,
+            FilterOptions filterOptions, IFreeSpaceProvider freeSpaceProvider,
             IEncapsulatedOpenBISService service, ISimpleLogger log, ITimeProvider timeProvider)
     {
         final Map<String, Share> shares =
-                getShares(storeRoot, dataStoreCode, filterOutToBeIgnoredForShuffling,
+                getShares(storeRoot, dataStoreCode, filterOptions,
                         freeSpaceProvider, service, log, timeProvider);
         final List<Share> list = new ArrayList<Share>(shares.values());
         Collections.sort(list, SHARE_COMPARATOR);
@@ -350,7 +361,7 @@ public class SegmentedStoreUtils
     }
 
     private static Map<String, Share> getShares(File storeRoot, String dataStoreCode,
-            boolean filterOutToBeIgnoredForShuffling, IFreeSpaceProvider freeSpaceProvider,
+            FilterOptions filterOptions, IFreeSpaceProvider freeSpaceProvider,
             IEncapsulatedOpenBISService service, ISimpleLogger log, ITimeProvider timeProvider)
     {
         final Map<String, Share> shares = new HashMap<String, Share>();
@@ -360,7 +371,8 @@ public class SegmentedStoreUtils
         {
             final Share share =
                     new ShareFactory().createShare(sharesHolder, file, freeSpaceProvider, log);
-            if (filterOutToBeIgnoredForShuffling == false || share.isIgnoredForShuffling() == false)
+
+            if (isShareSelected(share, filterOptions))
             {
                 shares.put(share.getShareId(), share);
             }
@@ -368,6 +380,21 @@ public class SegmentedStoreUtils
         return shares;
     }
 
+    private static boolean isShareSelected(Share share, FilterOptions filterOptions)
+    {
+        switch (filterOptions)
+        {
+            case ALL:
+                return true;
+            case ARCHIVING_SCRATCH:
+                return share.isUnarchivingScratchShare();
+            case AVAILABLE_FOR_SHUFFLING:
+                return false == (share.isIgnoredForShuffling() || share.isUnarchivingScratchShare());
+            default:
+                throw new IllegalStateException("All cases covered");
+        }
+    }
+
     /**
      * Moves the specified data set to the specified share. The data set is folder in the store its name is the data set code. The destination folder
      * is <code>share</code>. Its name is the share id.
diff --git a/datastore_server/sourceTest/java/ch/systemsx/cisd/openbis/dss/generic/shared/utils/SegmentedStoreUtilsTest.java b/datastore_server/sourceTest/java/ch/systemsx/cisd/openbis/dss/generic/shared/utils/SegmentedStoreUtilsTest.java
index 7f6bb76a670..9f08f3b8dee 100644
--- a/datastore_server/sourceTest/java/ch/systemsx/cisd/openbis/dss/generic/shared/utils/SegmentedStoreUtilsTest.java
+++ b/datastore_server/sourceTest/java/ch/systemsx/cisd/openbis/dss/generic/shared/utils/SegmentedStoreUtilsTest.java
@@ -47,6 +47,7 @@ import ch.systemsx.cisd.openbis.dss.generic.shared.IDataSetDirectoryProvider;
 import ch.systemsx.cisd.openbis.dss.generic.shared.IEncapsulatedOpenBISService;
 import ch.systemsx.cisd.openbis.dss.generic.shared.IShareIdManager;
 import ch.systemsx.cisd.openbis.dss.generic.shared.ProxyShareIdManager;
+import ch.systemsx.cisd.openbis.dss.generic.shared.utils.SegmentedStoreUtils.FilterOptions;
 import ch.systemsx.cisd.openbis.generic.shared.Constants;
 import ch.systemsx.cisd.openbis.generic.shared.basic.dto.DataSetArchivingStatus;
 import ch.systemsx.cisd.openbis.generic.shared.basic.dto.IDatasetLocation;
@@ -151,7 +152,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
 
         assertEquals("", log.toString());
     }
-    
+
     @Test
     public void testFreeSpaceNothingToDo()
     {
@@ -159,10 +160,10 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
         Share share = new Share(shareFolder, 0, freeSpaceProvider);
         share.setUnarchivingScratchShare(true);
         RecordingMatcher<HostAwareFile> recordingFileMatcher = prepareFreeSpace(12L);
-        
+
         SegmentedStoreUtils.freeSpace(share, service, asDatasetDescriptions(ds1), dataSetDirectoryProvider,
                 shareIdManager, log);
-        
+
         assertEquals(shareFolder.getPath(), recordingFileMatcher.recordedObject().getPath());
         assertEquals("INFO: Free space on unarchiving scratch share '1': 12.00 KB, "
                 + "requested space for unarchiving 1 data sets: 11.00 KB\n", log.toString());
@@ -185,10 +186,10 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
         prepareSetArchingStatus(ds1);
         File file = prepareDeleteFromShare(ds1);
         assertEquals(true, file.exists());
-        
+
         SegmentedStoreUtils.freeSpace(share, service, asDatasetDescriptions(ds2, ds4), dataSetDirectoryProvider,
                 shareIdManager, log);
-        
+
         assertEquals(false, file.exists());
         assertEquals(shareFolder.getPath(), recordingFileMatcher.getRecordedObjects().get(0).getPath());
         assertEquals(shareFolder.getPath(), recordingFileMatcher.getRecordedObjects().get(1).getPath());
@@ -203,7 +204,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
                 + "INFO: Free space on unarchiving scratch share '1': 22.00 KB, requested space for "
                 + "unarchiving 2 data sets: 21.00 KB\n", log.toString());
     }
-    
+
     @Test
     public void testFreeSpaceForThreeDataSetsOneAlreadyInShare()
     {
@@ -221,10 +222,10 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
         prepareSetArchingStatus(ds3);
         File file = prepareDeleteFromShare(ds3);
         assertEquals(true, file.exists());
-        
+
         SegmentedStoreUtils.freeSpace(share, service, asDatasetDescriptions(ds1, ds2, ds4), dataSetDirectoryProvider,
                 shareIdManager, log);
-        
+
         assertEquals(false, file.exists());
         assertEquals(shareFolder.getPath(), recordingFileMatcher.getRecordedObjects().get(0).getPath());
         assertEquals(shareFolder.getPath(), recordingFileMatcher.getRecordedObjects().get(1).getPath());
@@ -239,7 +240,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
                 + "INFO: Free space on unarchiving scratch share '1': 24.00 KB, requested space for "
                 + "unarchiving 2 data sets: 21.00 KB\n", log.toString());
     }
-    
+
     @Test
     public void testFreeSpaceRemovingDataSetsButStillNotEnoughFreeSpace()
     {
@@ -254,7 +255,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
         share.addDataSet(ds2);
         share.addDataSet(ds1);
         RecordingMatcher<HostAwareFile> recordingFileMatcher = prepareFreeSpace(2L);
-        
+
         try
         {
             SegmentedStoreUtils.freeSpace(share, service, asDatasetDescriptions(ds4, ds5, ds1), dataSetDirectoryProvider,
@@ -265,11 +266,11 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
             assertEquals("Even after removing all removable data sets from share '1' there would be "
                     + "still only 24.00 KB free space which is less than the requested 25.00 KB.", ex.getMessage());
         }
-        
+
         assertEquals(shareFolder.getPath(), recordingFileMatcher.recordedObject().getPath());
         assertEquals("", log.toString());
     }
-    
+
     @Test
     public void testGetDataSetsPerShare()
     {
@@ -318,7 +319,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
             });
 
         List<Share> shares =
-                SegmentedStoreUtils.getSharesWithDataSets(store, DATA_STORE_CODE, true,
+                SegmentedStoreUtils.getSharesWithDataSets(store, DATA_STORE_CODE, FilterOptions.AVAILABLE_FOR_SHUFFLING,
                         freeSpaceProvider, service, log, timeProvider);
         Share share1 = shares.get(0);
         long freeSpace = share1.calculateFreeSpace();
@@ -548,7 +549,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
 
         fail();
     }
-    
+
     @Test
     public void testMoveDataSetToAnotherShareWhichIsAnUnarchivingScratchShare()
     {
@@ -561,7 +562,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
         FileUtilities.writeToFile(helloFile, "hello world");
         File share2 = new File(workingDirectory, "store/2");
         share2.mkdirs();
-        FileUtilities.writeToFile(new File(share2, ShareFactory.SHARE_PROPS_FILE), 
+        FileUtilities.writeToFile(new File(share2, ShareFactory.SHARE_PROPS_FILE),
                 ShareFactory.UNARCHIVING_SCRATCH_SHARE_PROP + "=true");
         context.checking(new Expectations()
             {
@@ -573,7 +574,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
                     one(shareIdManager).releaseLock("ds-1");
                 }
             });
-        
+
         try
         {
             SegmentedStoreUtils.moveDataSetToAnotherShare(dataSetDirInStore, share2, service,
@@ -585,7 +586,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
                     + "No data sets can be moved from/to such a share.", ex.getMessage());
         }
     }
-    
+
     @Test
     public void testMoveDataSetFromAnUnarchivingScratchShareToAnotherShare()
     {
@@ -594,7 +595,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
         File dataSetDirInStore = new File(share1uuid01, "02/03/ds-1");
         File original = new File(dataSetDirInStore, "original");
         original.mkdirs();
-        FileUtilities.writeToFile(new File(share1, ShareFactory.SHARE_PROPS_FILE), 
+        FileUtilities.writeToFile(new File(share1, ShareFactory.SHARE_PROPS_FILE),
                 ShareFactory.UNARCHIVING_SCRATCH_SHARE_PROP + "=true");
         final File helloFile = new File(original, "hello.txt");
         FileUtilities.writeToFile(helloFile, "hello world");
@@ -720,7 +721,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
         share2.mkdirs();
 
         List<Share> shares =
-                SegmentedStoreUtils.getSharesWithDataSets(store, DATA_STORE_CODE, true,
+                SegmentedStoreUtils.getSharesWithDataSets(store, DATA_STORE_CODE, FilterOptions.AVAILABLE_FOR_SHUFFLING,
                         freeSpaceProvider, service, log, timeProvider);
 
         assertEquals("2", shares.get(0).getShareId());
@@ -738,7 +739,7 @@ public class SegmentedStoreUtilsTest extends AbstractFileSystemTestCase
         share2.mkdirs();
 
         List<Share> shares =
-                SegmentedStoreUtils.getSharesWithDataSets(store, DATA_STORE_CODE, false,
+                SegmentedStoreUtils.getSharesWithDataSets(store, DATA_STORE_CODE, FilterOptions.ALL,
                         freeSpaceProvider, service, log, timeProvider);
 
         assertEquals("1", shares.get(0).getShareId());
-- 
GitLab