backup tar s3 fix

This commit is contained in:
Anton Suhorukov 2023-07-20 11:39:17 +03:00
parent df8228ff01
commit c756261d3b
25 changed files with 304 additions and 189 deletions

View File

@ -36,20 +36,18 @@ global using System.Text;
global using System.Text.RegularExpressions;
global using System.Xml;
global using System.Xml.Linq;
global using System.Xml.XPath;
global using ASC.Api.Utils;
global using ASC.Common;
global using ASC.Common.Caching;
global using ASC.Common.Log;
global using ASC.Common.Threading;
global using ASC.Common.Threading;
global using ASC.Common.Utils;
global using ASC.Core;
global using ASC.Core.Billing;
global using ASC.Core.ChunkedUploader;
global using ASC.Core.Common.Configuration;
global using ASC.Core.Common.EF;
global using ASC.Core.Common.EF.Context;
global using ASC.Core.Common.EF.Model;
global using ASC.Core.Tenants;
global using ASC.Core.Users;
@ -70,6 +68,7 @@ global using ASC.Data.Backup.Utils;
global using ASC.Data.Storage;
global using ASC.Data.Storage.Configuration;
global using ASC.Data.Storage.DiscStorage;
global using ASC.Data.Storage.S3;
global using ASC.Data.Storage.ZipOperators;
global using ASC.EventBus.Events;
global using ASC.Files.Core;

View File

@ -221,7 +221,7 @@ public class BackupWorker
}
}
internal static string GetBackupHash(string path)
internal static string GetBackupHashSHA(string path)
{
using (var sha256 = SHA256.Create())
using (var fileStream = File.OpenRead(path))
@ -230,6 +230,43 @@ public class BackupWorker
var hash = sha256.ComputeHash(fileStream);
return BitConverter.ToString(hash).Replace("-", string.Empty);
}
}
internal static string GetBackupHashMD5(string path, long chunkSize)
{
using (var md5 = MD5.Create())
using (var fileStream = File.OpenRead(path))
{var multipartSplitCount = 0;
var splitCount = fileStream.Length / chunkSize;
var mod = (int)(fileStream.Length - chunkSize * splitCount);
IEnumerable<byte> concatHash = new byte[] { };
for (var i = 0; i < splitCount; i++)
{
var offset = i == 0 ? 0 : chunkSize * i;
var chunk = GetChunk(fileStream, offset, (int)chunkSize);
var hash = md5.ComputeHash(chunk);
concatHash = concatHash.Concat(hash);
multipartSplitCount++;
}
if (mod != 0)
{
var chunk = GetChunk(fileStream, chunkSize * splitCount, mod);
var hash = md5.ComputeHash(chunk);
concatHash = concatHash.Concat(hash);
multipartSplitCount++;
}
var multipartHash = BitConverter.ToString(md5.ComputeHash(concatHash.ToArray())).Replace("-", string.Empty);
return multipartHash + "-" + multipartSplitCount;
}
}
private static byte[] GetChunk(Stream sourceStream, long offset, int count)
{
var buffer = new byte[count];
sourceStream.Position = offset;
sourceStream.Read(buffer, 0, count);
return buffer;
}
private BackupProgress ToBackupProgress(BaseBackupProgressItem progressItem)

View File

@ -31,9 +31,7 @@ namespace ASC.Data.Backup.Services;
public class BackupProgressItem : BaseBackupProgressItem
{
public Dictionary<string, string> StorageParams { get; set; }
public string TempFolder { get; set; }
private const string ArchiveFormat = "tar";
public string TempFolder { get; set; }
private bool _isScheduled;
private Guid _userId;
@ -97,16 +95,21 @@ public class BackupProgressItem : BaseBackupProgressItem
_tempStream = scope.ServiceProvider.GetService<TempStream>();
var dateTime = _coreBaseSettings.Standalone ? DateTime.Now : DateTime.UtcNow;
var backupName = string.Format("{0}_{1:yyyy-MM-dd_HH-mm-ss}.{2}", (await _tenantManager.GetTenantAsync(TenantId)).Alias, dateTime, ArchiveFormat);
var tempFile = CrossPlatform.PathCombine(TempFolder, backupName);
var storagePath = tempFile;
string hash;
var tempFile = "";
var storagePath = "";
try
{
var backupStorage = await _backupStorageFactory.GetBackupStorageAsync(_storageType, TenantId, StorageParams);
var writer = await ZipWriteOperatorFactory.GetWriteOperatorAsync(_tempStream, _storageBasePath, backupName, TempFolder, _userId, backupStorage as IGetterWriteOperator);
var getter = backupStorage as IGetterWriteOperator;
var backupName = string.Format("{0}_{1:yyyy-MM-dd_HH-mm-ss}.{2}", (await _tenantManager.GetTenantAsync(TenantId)).Alias, dateTime, await getter.GetBackupExtensionAsync(_storageBasePath));
tempFile = CrossPlatform.PathCombine(TempFolder, backupName);
storagePath = tempFile;
var writer = await DataOperatorFactory.GetWriteOperatorAsync(_tempStream, _storageBasePath, backupName, TempFolder, _userId, getter);
_backupPortalTask.Init(TenantId, tempFile, _limit, writer);
@ -121,7 +124,7 @@ public class BackupProgressItem : BaseBackupProgressItem
if (writer.NeedUpload)
{
storagePath = await backupStorage.UploadAsync(_storageBasePath, tempFile, _userId);
hash = BackupWorker.GetBackupHash(tempFile);
hash = BackupWorker.GetBackupHashSHA(tempFile);
}
else
{

View File

@ -47,8 +47,8 @@
* in every copy of the program you distribute.
* Pursuant to Section 7 § 3(e) we decline to grant you any rights under trademark law for use of our trademarks.
*
*/
*/
namespace ASC.Data.Backup.Services;
[Transient(Additional = typeof(RestoreProgressItemExtention))]
@ -62,7 +62,8 @@ public class RestoreProgressItem : BaseBackupProgressItem
private readonly NotifyHelper _notifyHelper;
private BackupRepository _backupRepository;
private RestorePortalTask _restorePortalTask;
private readonly CoreBaseSettings _coreBaseSettings;
private readonly CoreBaseSettings _coreBaseSettings;
private readonly SetupInfo _setupInfo;
private string _region;
private string _upgradesPath;
@ -73,7 +74,8 @@ public class RestoreProgressItem : BaseBackupProgressItem
ICache cache,
IServiceScopeFactory serviceScopeFactory,
NotifyHelper notifyHelper,
CoreBaseSettings coreBaseSettings)
CoreBaseSettings coreBaseSettings,
SetupInfo setupInfo)
: base(logger, serviceScopeFactory)
{
_configuration = configuration;
@ -82,7 +84,8 @@ public class RestoreProgressItem : BaseBackupProgressItem
_notifyHelper = notifyHelper;
_coreBaseSettings = coreBaseSettings;
BackupProgressItemEnum = BackupProgressItemEnum.Restore;
BackupProgressItemEnum = BackupProgressItemEnum.Restore;
_setupInfo = setupInfo;
}
public BackupStorageType StorageType { get; set; }
@ -131,12 +134,17 @@ public class RestoreProgressItem : BaseBackupProgressItem
if (!_coreBaseSettings.Standalone)
{
var backupHash = BackupWorker.GetBackupHash(tempFile);
var record = await _backupRepository.GetBackupRecordAsync(backupHash, TenantId);
var shaHash = BackupWorker.GetBackupHashSHA(tempFile);
var record = await _backupRepository.GetBackupRecordAsync(shaHash, TenantId);
if (record == null)
{
throw new Exception(BackupResource.BackupNotFound);
{
var md5Hash = BackupWorker.GetBackupHashMD5(tempFile, S3Storage.ChunkSize);
record = await _backupRepository.GetBackupRecordAsync(md5Hash, TenantId);
if (record == null)
{
throw new Exception(BackupResource.BackupNotFound);
}
}
}

View File

@ -119,6 +119,11 @@ public class ConsumerBackupStorage : IBackupStorage, IGetterWriteOperator
TempPath = title,
UploadId = await _store.InitiateChunkedUploadAsync(Domain, title)
};
return _store.CreateDataWriteOperator(session, _sessionHolder);
return _store.CreateDataWriteOperator(session, _sessionHolder, true);
}
public Task<string> GetBackupExtensionAsync(string storageBasePath)
{
return Task.FromResult(_store.GetBackupExtension(true));
}
}

View File

@ -192,7 +192,6 @@ public class DocumentsBackupStorage : IBackupStorage, IGetterWriteOperator
var fileDao = await GetFileDaoAsync<T>();
try
{
var file = await fileDao.GetFileAsync(fileId);
return file != null && file.RootFolderType != FolderType.TRASH;
@ -229,6 +228,21 @@ public class DocumentsBackupStorage : IBackupStorage, IGetterWriteOperator
}
}
public async Task<string> GetBackupExtensionAsync(string storageBasePath)
{
await _tenantManager.SetCurrentTenantAsync(_tenantId);
if (int.TryParse(storageBasePath, out var fId))
{
var folderDao = GetFolderDao<int>();
return await folderDao.GetBackupExtensionAsync(fId);
}
else
{
var folderDao = GetFolderDao<string>();
return await folderDao.GetBackupExtensionAsync(storageBasePath);
}
}
private async Task<CommonChunkedUploadSession> InitUploadChunkAsync<T>(T folderId, string title)
{
var folderDao = GetFolderDao<T>();

View File

@ -71,4 +71,9 @@ public class LocalBackupStorage : IBackupStorage, IGetterWriteOperator
{
return Task.FromResult<IDataWriteOperator>(null);
}
public Task<string> GetBackupExtensionAsync(string storageBasePath)
{
return Task.FromResult("tar.gz");
}
}

View File

@ -90,7 +90,7 @@ public class RestorePortalTask : PortalTaskBase
_options.DebugBeginRestoreData();
using (var dataReader = new ZipReadOperator(BackupFilePath))
using (var dataReader = DataOperatorFactory.GetReadOperator(BackupFilePath))
{
await using (var entry = dataReader.GetEntry(KeyHelper.GetDumpKey()))
{

View File

@ -96,7 +96,7 @@ public class TransferPortalTask : PortalTaskBase
//save db data to temporary file
var backupTask = _serviceProvider.GetService<BackupPortalTask>();
backupTask.Init(TenantId, backupFilePath, Limit, ZipWriteOperatorFactory.GetDefaultWriteOperator(_tempStream, backupFilePath));
backupTask.Init(TenantId, backupFilePath, Limit, DataOperatorFactory.GetDefaultWriteOperator(_tempStream, backupFilePath));
backupTask.ProcessStorage = false;
backupTask.ProgressChanged += (sender, args) => SetCurrentStepProgress(args.Progress);
foreach (var moduleName in _ignoredModules)

View File

@ -201,11 +201,16 @@ public abstract class BaseStorage : IDataStore
public virtual IDataWriteOperator CreateDataWriteOperator(
CommonChunkedUploadSession chunkedUploadSession,
CommonChunkedUploadSessionHolder sessionHolder)
CommonChunkedUploadSessionHolder sessionHolder, bool isConsumerStorage = false)
{
return new ChunkZipWriteOperator(_tempStream, chunkedUploadSession, sessionHolder);
}
public virtual string GetBackupExtension(bool isConsumerStorage = false)
{
return "tar.gz";
}
#endregion
public abstract Task DeleteAsync(string domain, string path);

View File

@ -30,7 +30,7 @@ global using System.Net;
global using System.Net.Http.Headers;
global using System.Runtime.Serialization;
global using System.Security.Cryptography;
global using System.ServiceModel;
global using System.ServiceModel;
global using System.Text;
global using System.Text.Json;
global using System.Text.Json.Serialization;
@ -39,7 +39,10 @@ global using System.Web;
global using Amazon;
global using Amazon.CloudFront;
global using Amazon.CloudFront.Model;
global using Amazon.Extensions.S3.Encryption;
global using Amazon.Extensions.S3.Encryption.Primitives;
global using Amazon.S3;
global using Amazon.S3.Internal;
global using Amazon.S3.Model;
global using Amazon.S3.Transfer;
global using Amazon.Util;
@ -66,6 +69,7 @@ global using ASC.Data.Storage.GoogleCloud;
global using ASC.Data.Storage.Log;
global using ASC.Data.Storage.RackspaceCloud;
global using ASC.Data.Storage.S3;
global using ASC.Data.Storage.Tar;
global using ASC.Data.Storage.ZipOperators;
global using ASC.EventBus.Events;
global using ASC.Notify.Messages;

View File

@ -33,7 +33,9 @@ public interface IDataStore
{
IDataWriteOperator CreateDataWriteOperator(
CommonChunkedUploadSession chunkedUploadSession,
CommonChunkedUploadSessionHolder sessionHolder);
CommonChunkedUploadSessionHolder sessionHolder,
bool isConsumerStorage = false);
string GetBackupExtension(bool isConsumerStorage = false);
IQuotaController QuotaController { get; set; }

View File

@ -24,17 +24,12 @@
// content are licensed under the terms of the Creative Commons Attribution-ShareAlike 4.0
// International. See the License terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
using Amazon.Extensions.S3.Encryption;
using Amazon.Extensions.S3.Encryption.Primitives;
using Amazon.S3.Internal;
using ASC.Data.Storage.Tar;
namespace ASC.Data.Storage.S3;
[Scope]
public class S3Storage : BaseStorage
{
public static long ChunkSize { get; } = 50 * 1024 * 1024;
public override bool IsSupportChunking => true;
private readonly List<string> _domains = new List<string>();
@ -58,7 +53,6 @@ public class S3Storage : BaseStorage
private EncryptionMethod _encryptionMethod = EncryptionMethod.None;
private string _encryptionKey;
private readonly IConfiguration _configuration;
private readonly CoreBaseSettings _coreBaseSettings;
public S3Storage(
@ -70,13 +64,11 @@ public class S3Storage : BaseStorage
ILoggerProvider factory,
ILogger<S3Storage> options,
IHttpClientFactory clientFactory,
IConfiguration configuration,
TenantQuotaFeatureStatHelper tenantQuotaFeatureStatHelper,
QuotaSocketManager quotaSocketManager,
CoreBaseSettings coreBaseSettings)
: base(tempStream, tenantManager, pathUtils, emailValidationKeyProvider, httpContextAccessor, factory, options, clientFactory, tenantQuotaFeatureStatHelper, quotaSocketManager)
{
_configuration = configuration;
_coreBaseSettings = coreBaseSettings;
}
@ -396,9 +388,9 @@ public class S3Storage : BaseStorage
}
public override IDataWriteOperator CreateDataWriteOperator(CommonChunkedUploadSession chunkedUploadSession,
CommonChunkedUploadSessionHolder sessionHolder)
CommonChunkedUploadSessionHolder sessionHolder, bool isConsumerStorage = false)
{
if (_coreBaseSettings.Standalone)
if (_coreBaseSettings.Standalone || isConsumerStorage)
{
return new S3ZipWriteOperator(_tempStream, chunkedUploadSession, sessionHolder);
}
@ -408,6 +400,18 @@ public class S3Storage : BaseStorage
}
}
public override string GetBackupExtension(bool isConsumerStorage = false)
{
if (_coreBaseSettings.Standalone || isConsumerStorage)
{
return "tar.gz";
}
else
{
return "tar";
}
}
#endregion
public override async Task DeleteAsync(string domain, string path)
@ -1229,7 +1233,7 @@ public class S3Storage : BaseStorage
var uploadId = initResponse.UploadId;
var partSize = GetChunkSize();
var partSize = ChunkSize;
long bytePosition = 0;
for (var i = 1; bytePosition < objectSize; i++)
@ -1341,14 +1345,7 @@ public class S3Storage : BaseStorage
UploadId = uploadId,
PartETags = eTags
};
try
{
await s3.CompleteMultipartUploadAsync(completeRequest);
}
catch(Exception e)
{
}
await s3.CompleteMultipartUploadAsync(completeRequest);
}
public async Task ConcatFileAsync(string pathFile, string tarKey, string destinationDomain, string destinationKey, string uploadId, List<PartETag> eTags, int partNumber)
@ -1459,7 +1456,24 @@ public class S3Storage : BaseStorage
await s3.CompleteMultipartUploadAsync(completeRequest);
}
public async Task<(string uploadId, List<PartETag> eTags, int partNumber)> InitiateConcatAsync(string domain, string key, bool removeEmptyHeader = false)
public async Task RemoveFirstBlockAsync(string domain, string key)
{
using var s3 = GetClient();
var path = MakePath(domain, key);
(var uploadId, var eTags, var partNumber) = await InitiateConcatAsync(domain, key, true, true);
var completeRequest = new CompleteMultipartUploadRequest
{
BucketName = _bucket,
Key = path,
UploadId = uploadId,
PartETags = eTags
};
await s3.CompleteMultipartUploadAsync(completeRequest);
}
public async Task<(string uploadId, List<PartETag> eTags, int partNumber)> InitiateConcatAsync(string domain, string key, bool removeFirstBlock = false, bool lastInit = false)
{
using var s3 = GetClient();
@ -1472,13 +1486,18 @@ public class S3Storage : BaseStorage
};
var initResponse = await s3.InitiateMultipartUploadAsync(initiateRequest);
var eTags = new List<PartETag>();
try
{
long bytePosition = removeEmptyHeader ? 5 * 1024 * 1024 : 0;
var mb5 = 5 * 1024 * 1024;
long bytePosition = removeFirstBlock ? mb5 : 0;
var obj = await s3.GetObjectMetadataAsync(_bucket, key);
var eTags = new List<PartETag>();
var partSize = 5 * 1024 * 1024;
if (obj.ContentLength < partSize)
var objectSize = obj.ContentLength;
var partSize = ChunkSize;
var partNumber = 1;
for (var i = 1; bytePosition < objectSize; i++)
{
var copyRequest = new CopyPartRequest
{
@ -1487,46 +1506,42 @@ public class S3Storage : BaseStorage
SourceBucket = _bucket,
SourceKey = key,
UploadId = initResponse.UploadId,
PartNumber = 1,
FirstByte = bytePosition,
LastByte = obj.ContentLength - 1
LastByte = bytePosition + partSize - 1 >= objectSize ? objectSize - 1 : bytePosition + partSize - 1,
PartNumber = i
};
eTags.Add(new PartETag(1, (await s3.CopyPartAsync(copyRequest)).ETag));
return (initResponse.UploadId, eTags, 2);
}
else
{
var objectSize = obj.ContentLength;
var partNumber = 1;
for (var i = 1; bytePosition < objectSize; i++)
{
var copyRequest = new CopyPartRequest
{
DestinationBucket = _bucket,
DestinationKey = key,
SourceBucket = _bucket,
SourceKey = key,
UploadId = initResponse.UploadId,
FirstByte = bytePosition,
LastByte = bytePosition + partSize - 1 >= objectSize ? objectSize - 1 : bytePosition + partSize - 1,
PartNumber = i
};
partNumber = i + 1;
bytePosition += partSize;
if (objectSize - bytePosition < 5 * 1024 * 1024)
{
copyRequest.LastByte = objectSize - 1;
bytePosition += partSize;
}
eTags.Add(new PartETag(i, (await s3.CopyPartAsync(copyRequest)).ETag));
partNumber = i + 1;
bytePosition += partSize;
var x = objectSize - bytePosition;
if (!lastInit && x < mb5 && x > 0)
{
copyRequest.LastByte = objectSize - 1;
bytePosition += partSize;
}
return (initResponse.UploadId, eTags, partNumber);
eTags.Add(new PartETag(i, (await s3.CopyPartAsync(copyRequest)).ETag));
}
return (initResponse.UploadId, eTags, partNumber);
}
catch
{
return (initResponse.UploadId, new List<PartETag>(), 1);
using var stream = new MemoryStream();
var buffer = new byte[5 * 1024 * 1024];
stream.Write(buffer);
stream.Position = 0;
var uploadRequest = new UploadPartRequest
{
BucketName = _bucket,
Key = key,
UploadId = initResponse.UploadId,
PartNumber = 1,
InputStream = stream
};
eTags.Add(new PartETag(1, (await s3.UploadPartAsync(uploadRequest)).ETag));
return (initResponse.UploadId, eTags, 2);
}
}
@ -1708,18 +1723,6 @@ public class S3Storage : BaseStorage
return el.ETag;
}
private long GetChunkSize()
{
var configSetting = _configuration["files:uploader:chunk-size"];
if (!string.IsNullOrEmpty(configSetting))
{
configSetting = configSetting.Trim();
return long.Parse(configSetting);
}
long defaultValue = 10 * 1024 * 1024;
return defaultValue;
}
private enum EncryptionMethod
{
None,

View File

@ -26,7 +26,7 @@
namespace ASC.Data.Storage.ZipOperators;
public static class ZipWriteOperatorFactory
public static class DataOperatorFactory
{
public static async Task<IDataWriteOperator> GetWriteOperatorAsync(TempStream tempStream, string storageBasePath, string title, string tempFolder, Guid userId, IGetterWriteOperator getter)
{
@ -39,5 +39,17 @@ public static class ZipWriteOperatorFactory
{
return new ZipWriteOperator(tempStream, backupFilePath);
}
public static IDataReadOperator GetReadOperator(string targetFile)
{
try
{
return new ZipReadOperator(targetFile);
}
catch
{
return new TarReadOperator(targetFile);
}
}
}

View File

@ -29,4 +29,5 @@ namespace ASC.Data.Storage.ZipOperators;
public interface IGetterWriteOperator
{
Task<IDataWriteOperator> GetWriteOperatorAsync(string storageBasePath, string title, Guid userId);
Task<string> GetBackupExtensionAsync(string storageBasePath);
}

View File

@ -24,17 +24,12 @@
// content are licensed under the terms of the Creative Commons Attribution-ShareAlike 4.0
// International. See the License terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
using System.IO;
using Google.Apis.Storage.v1.Data;
namespace ASC.Data.Storage.ZipOperators;
internal class S3TarWriteOperator : IDataWriteOperator
public class S3TarWriteOperator : IDataWriteOperator
{
private readonly CommonChunkedUploadSession _chunkedUploadSession;
private readonly CommonChunkedUploadSessionHolder _sessionHolder;
private readonly S3Storage _store;
private bool _first = true;
private readonly string _domain;
private readonly string _key;
@ -50,12 +45,6 @@ internal class S3TarWriteOperator : IDataWriteOperator
_key = _chunkedUploadSession.TempPath;
_domain = string.IsNullOrEmpty(_sessionHolder.TempDomain) ? _sessionHolder.Domain : _sessionHolder.TempDomain;
using var stream = new MemoryStream();
var buffer = new byte[5 * 1024 * 1024];
stream.Write(buffer);
stream.Position = 0;
_sessionHolder.UploadChunkAsync(_chunkedUploadSession, stream, stream.Length).Wait();
}
public async Task WriteEntryAsync(string tarKey, string domain, string path, IDataStore store)
@ -65,7 +54,7 @@ internal class S3TarWriteOperator : IDataWriteOperator
var s3Store = store as S3Storage;
var fullPath = s3Store.MakePath(domain, path);
(var uploadId, var eTags, var chunkNumber) = await GetDataAsync();
(var uploadId, var eTags, var chunkNumber) = await _store.InitiateConcatAsync(_domain, _key);
await _store.ConcatFileAsync(fullPath, tarKey, _domain, _key, uploadId, eTags, chunkNumber);
}
else
@ -85,45 +74,26 @@ internal class S3TarWriteOperator : IDataWriteOperator
public async Task WriteEntryAsync(string tarKey, Stream stream)
{
(var uploadId, var eTags, var chunkNumber) = await GetDataAsync();
(var uploadId, var eTags, var chunkNumber) = await _store.InitiateConcatAsync(_domain, _key);
await _store.ConcatFileStreamAsync(stream, tarKey, _domain, _key, uploadId, eTags, chunkNumber);
}
private async Task<(string uploadId, List<PartETag> eTags, int partNumber)> GetDataAsync()
{
List<PartETag> eTags = null;
var chunkNumber = 0;
string uploadId = null;
if (_first)
{
eTags = _chunkedUploadSession.GetItemOrDefault<Dictionary<int, string>>("ETag").Select(x => new PartETag(x.Key, x.Value)).ToList();
int.TryParse(_chunkedUploadSession.GetItemOrDefault<string>("ChunksUploaded"), out chunkNumber);
chunkNumber++;
uploadId = _chunkedUploadSession.UploadId;
_first = false;
}
else
{
(uploadId, eTags, chunkNumber) = await _store.InitiateConcatAsync(_domain, _key);
}
return (uploadId, eTags, chunkNumber);
}
public async ValueTask DisposeAsync()
{
await _store.AddEndAsync(_domain ,_key);
await _store.RemoveFirstBlockAsync(_domain ,_key);
var contentLength = await _store.GetFileSizeAsync(_domain, _key);
Hash = (await _store.GetFileEtagAsync(_domain, _key)).Trim('\"');
(var uploadId, var eTags, var partNumber) = await _store.InitiateConcatAsync(_domain, _key, removeEmptyHeader: true);
(var uploadId, var eTags, var partNumber) = await _store.InitiateConcatAsync(_domain, _key, lastInit: true);
_chunkedUploadSession.BytesUploaded = contentLength;
_chunkedUploadSession.BytesTotal = contentLength;
_chunkedUploadSession.UploadId = uploadId;
_chunkedUploadSession.Items["ETag"] = eTags.ToDictionary(e => e.PartNumber, e => e.ETag);
_chunkedUploadSession.Items["ChunksUploaded"] = partNumber.ToString();
StoragePath = await _sessionHolder.FinalizeAsync(_chunkedUploadSession);
_chunkedUploadSession.Items["ChunksUploaded"] = (partNumber - 1).ToString();
Hash = "";
StoragePath = await _sessionHolder.FinalizeAsync(_chunkedUploadSession);
}
}

View File

@ -0,0 +1,72 @@
// (c) Copyright Ascensio System SIA 2010-2022
//
// This program is a free software product.
// You can redistribute it and/or modify it under the terms
// of the GNU Affero General Public License (AGPL) version 3 as published by the Free Software
// Foundation. In accordance with Section 7(a) of the GNU AGPL its Section 15 shall be amended
// to the effect that Ascensio System SIA expressly excludes the warranty of non-infringement of
// any third-party rights.
//
// This program is distributed WITHOUT ANY WARRANTY, without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For details, see
// the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html
//
// You can contact Ascensio System SIA at Lubanas st. 125a-25, Riga, Latvia, EU, LV-1021.
//
// The interactive user interfaces in modified source and object code versions of the Program must
// display Appropriate Legal Notices, as required under Section 5 of the GNU AGPL version 3.
//
// Pursuant to Section 7(b) of the License you must retain the original Product logo when
// distributing the program. Pursuant to Section 7(e) we decline to grant you any rights under
// trademark law for use of our trademarks.
//
// All the Product's GUI elements, including illustrations and icon sets, as well as technical writing
// content are licensed under the terms of the Creative Commons Attribution-ShareAlike 4.0
// International. See the License terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
namespace ASC.Data.Storage.ZipOperators;
public class TarReadOperator: IDataReadOperator
{
private readonly string tmpdir;
public TarReadOperator(string targetFile)
{
tmpdir = Path.Combine(Path.GetDirectoryName(targetFile), Path.GetFileNameWithoutExtension(targetFile).Replace('>', '_').Replace(':', '_').Replace('?', '_'));
using (var stream = File.OpenRead(targetFile))
using (var tarOutputStream = TarArchive.CreateInputTarArchive(stream, Encoding.UTF8))
{
tarOutputStream.ExtractContents(tmpdir);
}
File.Delete(targetFile);
}
public Stream GetEntry(string key)
{
var filePath = Path.Combine(tmpdir, key);
return File.Exists(filePath) ? File.Open(filePath, FileMode.Open, FileAccess.ReadWrite, FileShare.Read) : null;
}
public IEnumerable<string> GetEntries(string key)
{
var path = Path.Combine(tmpdir, key);
var files = Directory.EnumerateFiles(path);
return files;
}
public IEnumerable<string> GetDirectories(string key)
{
var path = Path.Combine(tmpdir, key);
var files = Directory.EnumerateDirectories(path);
return files;
}
public void Dispose()
{
if (Directory.Exists(tmpdir))
{
Directory.Delete(tmpdir, true);
}
}
}

View File

@ -378,7 +378,7 @@ public class MigrationCreator
{
var storage = await _storageFactory.GetStorageAsync(_fromTenantId, group.Key);
var file1 = file;
await ActionInvoker.Try(async state =>
await ActionInvoker.TryAsync(async state =>
{
var f = (BackupFileInfo)state;
using var fileStream = await storage.GetReadStreamAsync(f.Domain, f.Path);

View File

@ -216,6 +216,8 @@ public interface IFolderDao<T>
CommonChunkedUploadSession chunkedUploadSession,
CommonChunkedUploadSessionHolder sessionHolder);
Task<string> GetBackupExtensionAsync(T folderId);
#region Only for TMFolderDao
/// <summary>

View File

@ -1543,57 +1543,9 @@ internal class FolderDao : AbstractDao, IFolderDao<int>
return (await _globalStore.GetStoreAsync()).CreateDataWriteOperator(chunkedUploadSession, sessionHolder);
}
private string GetProjectTitle(object folderID)
public async Task<string> GetBackupExtensionAsync(int folderId)
{
return "";
//if (!ApiServer.Available)
//{
// return string.Empty;
//}
//var cacheKey = "documents/folders/" + folderID.ToString();
//var projectTitle = Convert.ToString(cache.Get<string>(cacheKey));
//if (!string.IsNullOrEmpty(projectTitle)) return projectTitle;
//var bunchObjectID = GetBunchObjectID(folderID);
//if (string.IsNullOrEmpty(bunchObjectID))
// throw new Exception("Bunch Object id is null for " + folderID);
//if (!bunchObjectID.StartsWith("projects/project/"))
// return string.Empty;
//var bunchObjectIDParts = bunchObjectID.Split('/');
//if (bunchObjectIDParts.Length < 3)
// throw new Exception("Bunch object id is not supported format");
//var projectID = Convert.ToInt32(bunchObjectIDParts[bunchObjectIDParts.Length - 1]);
//if (HttpContext.Current == null || !SecurityContext.IsAuthenticated)
// return string.Empty;
//var apiServer = new ApiServer();
//var apiUrl = string.Format("{0}project/{1}.json?fields=id,title", SetupInfo.WebApiBaseUrl, projectID);
//var responseApi = JObject.Parse(Encoding.UTF8.GetString(Convert.FromBase64String(apiServer.GetApiResponse(apiUrl, "GET"))))["response"];
//if (responseApi != null && responseApi.HasValues)
//{
// projectTitle = Global.ReplaceInvalidCharsAndTruncate(responseApi["title"].Value<string>());
//}
//else
//{
// return string.Empty;
//}
//if (!string.IsNullOrEmpty(projectTitle))
//{
// cache.Insert(cacheKey, projectTitle, TimeSpan.FromMinutes(15));
//}
//return projectTitle;
return (await _globalStore.GetStoreAsync()).GetBackupExtension();
}
}

View File

@ -67,7 +67,6 @@ public class ChunkedUploadSession<T> : CommonChunkedUploadSession
chunkedUploadSession.TransformItems();
return chunkedUploadSession;
}
}

View File

@ -466,6 +466,13 @@ internal class ProviderFolderDao : ProviderDaoBase, IFolderDao<string>
return await folderDao.CreateDataWriteOperatorAsync(folderId, chunkedUploadSession, sessionHolder);
}
public async Task<string> GetBackupExtensionAsync(string folderId)
{
var selector = _selectorFactory.GetSelector(folderId);
var folderDao = selector.GetFolderDao(folderId);
return await folderDao.GetBackupExtensionAsync(folderId);
}
private IAsyncEnumerable<Folder<string>> FilterByProvider(IAsyncEnumerable<Folder<string>> folders, ProviderFilter provider)
{
if (provider != ProviderFilter.kDrive && provider != ProviderFilter.WebDav && provider != ProviderFilter.Yandex)

View File

@ -439,6 +439,11 @@ internal class SharePointFolderDao : SharePointDaoBase, IFolderDao<string>
{
return Task.FromResult<IDataWriteOperator>(null);
}
public Task<string> GetBackupExtensionAsync(string folderId)
{
return Task.FromResult("tar.gz");
}
}
static file class Queries

View File

@ -504,6 +504,11 @@ internal class SharpBoxFolderDao : SharpBoxDaoBase, IFolderDao<string>
{
return Task.FromResult<IDataWriteOperator>(null);
}
public Task<string> GetBackupExtensionAsync(string folderId)
{
return Task.FromResult("tar.gz");
}
}
static file class Queries

View File

@ -510,6 +510,11 @@ internal class ThirdPartyFolderDao<TFile, TFolder, TItem> : BaseFolderDao, IFold
return Task.FromResult<IDataWriteOperator>(new ChunkZipWriteOperator(_tempStream, chunkedUploadSession, sessionHolder));
}
public Task<string> GetBackupExtensionAsync(string folderId)
{
return Task.FromResult("tar.gz");
}
public Task ReassignFoldersAsync(Guid oldOwnerId, Guid newOwnerId)
{
return Task.CompletedTask;