Merge branch 'lzo-pull-request' of github.com:BBBSnowball/zbackup into BBBSnowball-lzo-pull-request

Conflicts:
	.gitignore
	CMakeLists.txt
	bundle.cc
	chunk_storage.cc
	zbackup.cc
	zbackup.hh
	zbackup.proto
master
Am1GO 2014-12-09 15:21:51 +03:00
commit 9e7ebcb03c
14 changed files with 1247 additions and 60 deletions

View File

@ -1,13 +1,11 @@
# Copyright (c) 2012-2014 Konstantin Isakov <ikm@zbackup.org>
# Part of ZBackup. Licensed under GNU GPLv2 or later + OpenSSL, see LICENSE
# modified by Benjamin Koch <bbbsnowball@gmail.com>
cmake_minimum_required( VERSION 2.8.2 )
project( zbackup )
if( ${CMAKE_VERSION} VERSION_LESS "2.8.9" )
# Use the included FindLibLZMA then
set( CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" )
endif()
set( CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" )
set( CMAKE_BUILD_TYPE Release )
@ -34,6 +32,14 @@ find_package( Threads REQUIRED )
find_package( LibLZMA REQUIRED )
include_directories( ${LIBLZMA_INCLUDE_DIRS} )
find_package( LibLZO COMPONENTS LIBLZO_HAS_LZO1X_DECOMPRESS_SAFE LIBLZO_HAS_LZO1X_1_COMPRESS )
if (LIBLZO_FOUND)
ADD_DEFINITIONS(-DHAVE_LIBLZO)
include_directories( ${LIBLZO_INCLUDE_DIRS} )
else (LIBLZO_FOUND)
set(LIBLZO_LIBRARIES)
endif (LIBLZO_FOUND)
file( GLOB sourceFiles "*.cc" )
add_executable( zbackup ${sourceFiles} ${protoSrcs} ${protoHdrs} )
@ -43,6 +49,7 @@ target_link_libraries( zbackup
${CMAKE_THREAD_LIBS_INIT}
${ZLIB_LIBRARIES}
${LIBLZMA_LIBRARIES}
${LIBLZO_LIBRARIES}
)
install( TARGETS zbackup DESTINATION bin )

View File

@ -10,7 +10,7 @@ This is achieved by sliding a window with a rolling hash over the input at a byt
The program has the following features:
* Parallel LZMA compression of the stored data
* Parallel LZMA or LZO compression of the stored data
* Built-in AES encryption of the stored data
* Possibility to delete old backup data
* Use of a 64-bit rolling hash, keeping the amount of soft collisions to zero
@ -25,6 +25,7 @@ The program has the following features:
* `libssl-dev` for all encryption, hashing and random numbers
* `libprotobuf-dev` and `protobuf-compiler` for data serialization
* `liblzma-dev` for compression
* `liblzo2-dev` for compression (optional)
* `zlib1g-dev` for adler32 calculation
# Quickstart
@ -139,6 +140,26 @@ All in all, as long as the amount of RAM permits, one can go up to several terab
* `AES-128` in `CBC` mode with `PKCS#7` padding is used for encryption. This seems to be a reasonbly safe classic solution. Each encrypted file has a random IV as its first 16 bytes.
* We use Google's [protocol buffers](https://developers.google.com/protocol-buffers/) to represent data structures in binary form. They are very efficient and relatively simple to use.
# Compression
zbackup uses LZMA to compress stored data. It compresses very well, but it will slow down your backup
(unless you have a very fast CPU).
LZO is much faster, but the files will be bigger. If you don't
want your backup process to be cpu-bound, you should consider using LZO. However, there are some caveats:
1. LZO is so fast that other parts of zbackup consume significant portions of the CPU. In fact, it is only
using one core on my machine because compression is the only thing that can run in parallel.
2. I've hacked the LZO support in a day. You shouldn't trust it. Please make sure that restore works before
you assume that your data is safe. That may still be faster than a backup with LZMA ;-)
3. LZMA is still the default, so make sure that you use the `--compression lzo` argument when you init the
repo or whenever you do a backup.
You can mix LZMA and LZO in a repository. Each bundle file has a field that says how it was compressed, so
zbackup will use the right method to decompress it. You could use an old zbackup respository with only LZMA
bundles and start using LZO. However, please think twice before you do that because old versions of zbackup
won't be able to read those bundles.
# Improvements
There's a lot to be improved in the program. It was released with the minimum amount of functionality to be useful. It is also stable. This should hopefully stimulate people to join the development and add all those other fancy features. Here's a list of ideas:

View File

@ -1,7 +1,7 @@
// Copyright (c) 2012-2014 Konstantin Isakov <ikm@zbackup.org>
// Part of ZBackup. Licensed under GNU GPLv2 or later + OpenSSL, see LICENSE
// modified by Benjamin Koch <bbbsnowball@gmail.com>
#include <lzma.h>
#include <stdint.h>
#include "bundle.hh"
@ -11,12 +11,21 @@
#include "hex.hh"
#include "message.hh"
#include "adler32.hh"
#include "compression.hh"
namespace Bundle {
enum
{
FileFormatVersion = 1
FileFormatVersion = 1,
// This means, we don't use LZMA in this file.
FileFormatVersionNotLZMA,
// <- add more versions here
// This is the first version, we do not support.
FileFormatVersionFirstUnsupported
};
void Creator::addChunk( string const & id, void const * data, size_t size )
@ -90,8 +99,18 @@ void Creator::write( std::string const & fileName, EncryptionKey const & key )
os.writeRandomIv();
FileHeader header;
header.set_version( FileFormatVersion );
BundleFileHeader header;
const_sptr<Compression::CompressionMethod> compression = Compression::CompressionMethod::defaultCompression;
header.set_compression_method( compression->getName() );
// The old code only support lzma, so we will bump up the version, if we're
// using lzma. This will make it fail cleanly.
if ( compression->getName() == "lzma" )
header.set_version( FileFormatVersion );
else
header.set_version( FileFormatVersionNotLZMA );
Message::serialize( header, os );
Message::serialize( info, os );
@ -99,16 +118,9 @@ void Creator::write( std::string const & fileName, EncryptionKey const & key )
// Compress
uint32_t preset = 6; // TODO: make this customizable, although 6 seems to be
// the best option
lzma_stream strm = LZMA_STREAM_INIT;
lzma_ret ret;
sptr<Compression::EnDecoder> encoder = compression->createEncoder();
ret = lzma_easy_encoder( &strm, preset, LZMA_CHECK_CRC64 );
CHECK( ret == LZMA_OK, "lzma_easy_encoder error: %d", (int) ret );
strm.next_in = ( uint8_t const * ) payload.data();
strm.avail_in = payload.size();
encoder->setInput( payload.data(), payload.size() );
for ( ; ; )
{
@ -117,30 +129,22 @@ void Creator::write( std::string const & fileName, EncryptionKey const & key )
int size;
if ( !os.Next( &data, &size ) )
{
lzma_end( &strm );
throw exBundleWriteFailed();
}
if ( !size )
continue;
strm.next_out = ( uint8_t * ) data;
strm.avail_out = size;
encoder->setOutput( data, size );
}
// Perform the compression
ret = lzma_code( &strm, LZMA_FINISH );
if ( ret == LZMA_STREAM_END )
if ( encoder->process(true) )
{
if ( strm.avail_out )
os.BackUp( strm.avail_out );
if ( encoder->getAvailableOutput() )
os.BackUp( encoder->getAvailableOutput() );
break;
}
CHECK( ret == LZMA_OK, "lzma_code error: %d", (int) ret );
}
lzma_end( &strm );
os.writeAdler32();
}
@ -149,10 +153,10 @@ Reader::Reader( string const & fileName, EncryptionKey const & key, bool prohibi
{
is.consumeRandomIv();
FileHeader header;
BundleFileHeader header;
Message::parse( header, is );
if ( header.version() != FileFormatVersion )
if ( header.version() >= FileFormatVersionFirstUnsupported )
throw exUnsupportedVersion();
Message::parse( info, is );
@ -167,15 +171,10 @@ Reader::Reader( string const & fileName, EncryptionKey const & key, bool prohibi
if ( prohibitProcessing )
return;
lzma_stream strm = LZMA_STREAM_INIT;
sptr<Compression::EnDecoder> decoder = Compression::CompressionMethod::findCompression(
header.compression_method() )->createDecoder();
lzma_ret ret;
ret = lzma_stream_decoder( &strm, UINT64_MAX, 0 );
CHECK( ret == LZMA_OK,"lzma_stream_decoder error: %d", (int) ret );
strm.next_out = ( uint8_t * ) &payload[ 0 ];
strm.avail_out = payload.size();
decoder->setOutput( &payload[ 0 ], payload.size() );
for ( ; ; )
{
@ -184,36 +183,26 @@ Reader::Reader( string const & fileName, EncryptionKey const & key, bool prohibi
int size;
if ( !is.Next( &data, &size ) )
{
lzma_end( &strm );
throw exBundleReadFailed();
}
if ( !size )
continue;
strm.next_in = ( uint8_t const * ) data;
strm.avail_in = size;
decoder->setInput( data, size );
}
ret = lzma_code( &strm, LZMA_RUN );
if ( ret == LZMA_STREAM_END )
{
if ( strm.avail_in )
is.BackUp( strm.avail_in );
if ( decoder->process(false) ) {
if ( decoder->getAvailableInput() )
is.BackUp( decoder->getAvailableInput() );
break;
}
CHECK( ret == LZMA_OK, "lzma_code error: %d", (int) ret );
if ( !strm.avail_out && strm.avail_in )
if ( !decoder->getAvailableOutput() && decoder->getAvailableInput() )
{
// Apparently we have more data than we were expecting
lzma_end( &strm );
throw exTooMuchData();
}
}
lzma_end( &strm );
is.checkAdler32();
// Populate the map

View File

@ -1,5 +1,6 @@
// Copyright (c) 2012-2014 Konstantin Isakov <ikm@zbackup.org>
// Part of ZBackup. Licensed under GNU GPLv2 or later + OpenSSL, see LICENSE
// modified by Benjamin Koch <bbbsnowball@gmail.com>
#include "check.hh"
#include "chunk_storage.hh"

111
cmake/FindLibLZO.cmake Normal file
View File

@ -0,0 +1,111 @@
# - Find LibLZO
# Find LibLZO headers and library
#
# LIBLZO_FOUND - True if liblzo is found.
# LIBLZO_INCLUDE_DIRS - Directory where liblzo headers are located.
# LIBLZO_LIBRARIES - Lzma libraries to link against.
# LIBLZO_HAS_AUTO_DECODER - True if lzo_auto_decoder() is found (required).
# LIBLZO_HAS_EASY_ENCODER - True if lzo_easy_encoder() is found (required).
# LIBLZO_HAS_LZO_PRESET - True if lzo_lzo_preset() is found (required).
# LIBLZO_VERSION_MAJOR - The major version of lzo
# LIBLZO_VERSION_MINOR - The minor version of lzo
# LIBLZO_VERSION_PATCH - The patch version of lzo
# LIBLZO_VERSION_STRING - version number as a string (ex: "5.0.3")
#=============================================================================
# Copyright 2008 Per Øyvind Karlsen <peroyvind@mandriva.org>
# Copyright 2009 Alexander Neundorf <neundorf@kde.org>
# Copyright 2009 Helio Chissini de Castro <helio@kde.org>
# Copyright 2012 Mario Bensi <mbensi@ipsquad.net>
# Adapted for liblzo (instead of liblzma) by Benjamin Koch <bbbsnowball@gmail.com>
#
# Distributed under the OSI-approved BSD License (the "License"):
#
# CMake - Cross Platform Makefile Generator
# Copyright 2000-2011 Kitware, Inc., Insight Software Consortium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the names of Kitware, Inc., the Insight Software Consortium,
# nor the names of their contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
#
# The above copyright and license notice applies to distributions of
# CMake in source and binary form. Some source files contain additional
# notices of original copyright by their contributors; see each source
# for details. Third-party software packages supplied with CMake under
# compatible licenses provide their own copyright notices documented in
# corresponding subdirectories.
#
# ------------------------------------------------------------------------------
#
# CMake was initially developed by Kitware with the following sponsorship:
#
# * National Library of Medicine at the National Institutes of Health
# as part of the Insight Segmentation and Registration Toolkit (ITK).
#
# * US National Labs (Los Alamos, Livermore, Sandia) ASC Parallel
# Visualization Initiative.
#
# * National Alliance for Medical Image Computing (NAMIC) is funded by the
# National Institutes of Health through the NIH Roadmap for Medical Research,
# Grant U54 EB005149.
#
# * Kitware, Inc.
#=============================================================================
find_path(LIBLZO_INCLUDE_DIR lzo/lzo1x.h )
find_library(LIBLZO_LIBRARY lzo2)
if(LIBLZO_INCLUDE_DIR AND EXISTS "${LIBLZO_INCLUDE_DIR}/lzo/lzoconf.h")
file(STRINGS "${LIBLZO_INCLUDE_DIR}/lzo/lzoconf.h" LIBLZO_HEADER_CONTENTS REGEX "#define LZO_VERSION_STRING.+\"[^\"]+\"")
string(REGEX REPLACE ".*#define LZO_VERSION_STRING.+\"([^\"]+)\".*" "\\1" LIBLZO_VERSION_STRING "${LIBLZO_HEADER_CONTENTS}")
unset(LIBLZO_HEADER_CONTENTS)
endif()
# We're just using two functions.
if (LIBLZO_LIBRARY)
include(CheckLibraryExists)
CHECK_LIBRARY_EXISTS(${LIBLZO_LIBRARY} lzo1x_decompress_safe "" LIBLZO_HAS_LZO1X_DECOMPRESS_SAFE)
CHECK_LIBRARY_EXISTS(${LIBLZO_LIBRARY} lzo1x_1_compress "" LIBLZO_HAS_LZO1X_1_COMPRESS)
endif ()
include(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(LibLZO DEFAULT_MSG LIBLZO_INCLUDE_DIR
LIBLZO_LIBRARY
LIBLZO_HAS_LZO1X_DECOMPRESS_SAFE
LIBLZO_HAS_LZO1X_1_COMPRESS
)
if (LIBLZO_FOUND)
set(LIBLZO_LIBRARIES ${LIBLZO_LIBRARY})
set(LIBLZO_INCLUDE_DIRS ${LIBLZO_INCLUDE_DIR})
endif ()
mark_as_advanced( LIBLZO_INCLUDE_DIR LIBLZO_LIBRARY )

665
compression.cc Normal file
View File

@ -0,0 +1,665 @@
// Copyright (c) 2013 Benjamin Koch <bbbsnowball@gmail.com>
// Part of ZBackup. Licensed under GNU GPLv2 or later + OpenSSL, see LICENSE
#include <string>
#include "compression.hh"
#include "check.hh"
namespace Compression {
EnDecoder::EnDecoder() { }
EnDecoder::~EnDecoder() { }
CompressionMethod::~CompressionMethod() { }
// LZMA
#include <lzma.h>
class LZMAEnDecoder : public EnDecoder
{
protected:
static lzma_stream initValue;
lzma_stream strm;
public:
LZMAEnDecoder()
{
strm = initValue;
}
void setInput( const void* data, size_t size )
{
strm.next_in = (const uint8_t *) data;
strm.avail_in = size;
}
void setOutput( void* data, size_t size )
{
strm.next_out = (uint8_t *) data;
strm.avail_out = size;
}
size_t getAvailableInput()
{
return strm.avail_in;
}
size_t getAvailableOutput()
{
return strm.avail_out;
}
bool process( bool finish )
{
lzma_ret ret = lzma_code( &strm, ( finish ? LZMA_FINISH : LZMA_RUN ) );
CHECK( ret == LZMA_OK || ret == LZMA_STREAM_END, "lzma_code error: %d", (int) ret );
return ( ret == LZMA_STREAM_END );
}
};
lzma_stream LZMAEnDecoder::initValue = LZMA_STREAM_INIT;
class LZMAEncoder : public LZMAEnDecoder
{
public:
LZMAEncoder()
{
uint32_t preset = 6; // TODO: make this customizable, although 6 seems to be
// the best option
lzma_ret ret = lzma_easy_encoder( &strm, preset, LZMA_CHECK_CRC64 );
CHECK( ret == LZMA_OK, "lzma_easy_encoder error: %d", (int) ret );
}
};
class LZMADecoder : public LZMAEnDecoder
{
public:
LZMADecoder()
{
lzma_ret ret = lzma_stream_decoder( &strm, UINT64_MAX, 0 );
CHECK( ret == LZMA_OK,"lzma_stream_decoder error: %d", (int) ret );
}
};
class LZMACompression : public CompressionMethod
{
public:
sptr<EnDecoder> createEncoder() const
{
return new LZMAEncoder();
}
sptr<EnDecoder> createDecoder() const
{
return new LZMADecoder();
}
std::string getName() const { return "lzma"; }
};
// LZO
// liblzo implements a lot of algorithms "for unlimited backward compatibility"
// The web site says:
// "My experiments have shown that LZO1B is good with a large blocksize
// or with very redundant data, LZO1F is good with a small blocksize or
// with binary data and that LZO1X is often the best choice of all.
// LZO1Y and LZO1Z are almost identical to LZO1X - they can achieve a
// better compression ratio on some files.
// Beware, your mileage may vary."
// => I'm using LZO1X, as suggested
#include <string.h>
// Unfortunately, liblzo always works with the whole data, so it doesn't support
// the streaming approach that most other libraries use. This means that we have
// to use a big buffer for the data. The class NoStreamEnDecoder implements this
// so we can use it, if there is another library like liblzo.
// Collect all data and process it in one pass
class NoStreamEnDecoder : public EnDecoder
{
std::string accDataIn, accDataOut;
const char* dataIn;
char* dataOut;
size_t availIn, availOut;
bool processed;
size_t posInAccDataOut;
protected:
// you must implement these:
// Should we try with the existing output buffer which has availOut
// bytes of free space? If you know that this will fail, return false.
// You may peek into dataIn which contains the complete compressed data.
virtual bool shouldTryWith( const char* dataIn, size_t availIn, size_t availOut ) =0;
// We will allocate a buffer for the output data. How big should it be?
// You may peek into dataIn which contains the complete compressed data.
virtual size_t suggestOutputSize( const char* dataIn, size_t availIn ) =0;
// Is this input complete?
// An encoder should return false.
virtual bool isCompleteInput( const char* dataIn, size_t availIn ) =0;
// Process the data in dataIn and put the result into dataOut. You musn't
// write more than availOut bytes! If the output buffer is big enough,
// process the data and store the output size in outputSize. If the output
// buffer is too small, return false and we will give you a bigger one. If
// any other error occurrs, abort the program. We don't have any better
// error handling. Sorry. Do NOT return false for errors that won't be
// remedied by a bigger buffer!
virtual bool doProcess( const char* dataIn, size_t availIn,
char* dataOut, size_t availOut, size_t& outputSize ) =0;
void setUnusedInput( size_t unused )
{
this->dataIn += availIn - unused;
this->availIn = unused;
}
public:
NoStreamEnDecoder()
{
dataIn = dataOut = NULL;
availIn = availOut = posInAccDataOut = 0;
processed = false;
}
void setInput( const void* data, size_t size )
{
dataIn = (const char *) data;
availIn = size;
}
void setOutput( void* data, size_t size )
{
dataOut = (char *) data;
availOut = size;
}
size_t getAvailableInput()
{
return availIn;
}
size_t getAvailableOutput()
{
return availOut;
}
bool process( bool finish )
{
// try to process the input, if we haven't done it, yet
if ( !processed )
{
// data has not been encoded
if ( accDataIn.empty() )
{
// this is the first piece of data
if ( finish || isCompleteInput( dataIn, availIn ) )
{
// special case: all the data has been passed at once
// -> process it without using accDataIn
processFinish( dataIn, availIn );
}
}
// if we didn't process the data, put it into accumulator
if ( !processed )
{
// accumulate data in accDataIn
accDataIn.append( dataIn, availIn );
// If this was the last bit of data, we process it, now.
if ( finish || isCompleteInput( accDataIn.data(), accDataIn.size() ) )
{
processFinish( accDataIn.data(), accDataIn.size() );
}
}
}
// If the input has been processed, try to copy some of it to the output buffer.
if ( processed )
{
// data has been encoded or decoded, remaining output is in accDataOut
// -> copy to output
if (availOut > 0 && accDataOut.size() - posInAccDataOut > 0)
{
size_t sz = availOut;
if ( sz > accDataOut.size() - posInAccDataOut )
sz = accDataOut.size() - posInAccDataOut;
memcpy( dataOut, accDataOut.data() + posInAccDataOut, sz );
dataOut += sz;
availOut -= sz;
posInAccDataOut += sz;
}
// no more data left? -> return true
return ( accDataOut.size() - posInAccDataOut == 0 );
}
else
{
// not yet processed, so we cannot be done
return false;
}
}
private:
void processFinish( const char* dataIn, size_t availIn )
{
// should we try with the existing output buffer?
if ( shouldTryWith( dataIn, availIn, availOut ) )
{
size_t outputSize;
if ( doProcess( dataIn, availIn, dataOut, availOut, outputSize ) )
{
// it worked :-)
processed = true;
availOut -= outputSize;
return ;
}
}
// we use our own buffer
size_t bufferSize = suggestOutputSize( dataIn, availIn );
do {
accDataOut.resize(bufferSize);
size_t outputSize;
//TODO doc says we mustn't modify the pointer returned by data()...
if ( doProcess( dataIn, availIn,
(char*) accDataOut.data(), bufferSize, outputSize ) )
{
// buffer is big enough
accDataOut.resize( outputSize );
processed = true;
return ;
}
// try a bigger one
bufferSize *= 2;
} while (true);
}
};
#include <endian.h>
// like NoStreamEnDecoder, but also adds the uncompressed size before the stream
//NOTE You should make sure that the compression function doesn't overwrite any
// memory, if this information is corrupted! This could be exploited by a
// malicious person and there is nothing I can do about it. I could check for
// an overflow, but when control gets back to this class, it is already too
// late, as one 'ret' instruction is enough to do harm.
class NoStreamAndUnknownSizeDecoder : public NoStreamEnDecoder
{
protected:
// You implement this one:
// If you don't know the real decoded size, don't change outputSize.
virtual bool doProcessNoSize( const char* dataIn, size_t availIn,
char* dataOut, size_t availOut, size_t& outputSize ) =0;
bool shouldTryWith( const char* dataIn, size_t availIn, size_t availOut )
{
return suggestOutputSize( dataIn, availIn ) <= availOut;
}
// Is this input complete?
bool isCompleteInput( const char* dataIn, size_t availIn )
{
if ( availIn < 2*sizeof(uint64_t) )
return false;
dataIn += sizeof(uint64_t);
size_t inputSize = le32toh( *(uint32_t*) dataIn );
return ( availIn >= inputSize + 2*sizeof(uint64_t) );
}
size_t suggestOutputSize( const char* dataIn, size_t availIn )
{
CHECK( availIn >= sizeof(uint64_t), "not enough input data" );
// We're not using size_t because we need a type that has the same size on all
// architectures. A 32-bit host won't be able to open files with more than
// 4GB (actually much less), so 4 byte are enough. Even a 64-bit host would
// have some trouble with allocating 8GB of RAM just for our buffers ;-)
//NOTE If your compiler doesn't accept this cast, your size_t is smaller than
// uint32_t. In that case, you are in trouble...
size_t outputSize = le32toh( *(uint32_t*) dataIn );
return outputSize;
}
bool doProcess( const char* dataIn, size_t availIn,
char* dataOut, size_t availOut, size_t& outputSize )
{
if ( availIn < 2*sizeof( uint64_t ) )
return false;
//NOTE We skip 8 bytes. If we later decide to drop compatibility with 32-bit
// hosts, we can save a 64-bit size. Well, that will be much later, when
// we can easily hold two copies of a 4GB file in main memory :-D
size_t neededOutputSize = le32toh( *(uint32_t*) dataIn );
dataIn += sizeof(uint64_t);
size_t inputSize = le32toh( *(uint32_t*) dataIn );
dataIn += sizeof(uint64_t);
if ( outputSize < neededOutputSize )
return false;
availIn -= 2*sizeof( uint64_t );
// We might not need all of our input data.
setUnusedInput( availIn - inputSize );
availIn = inputSize;
size_t reportedOutputSize = neededOutputSize;
if ( !doProcessNoSize( dataIn, availIn, dataOut, availOut, reportedOutputSize ) )
return false;
CHECK( reportedOutputSize == neededOutputSize,
"Size of decoded data is different than expected" );
outputSize = neededOutputSize;
return true;
}
};
// encoder for NoStreamAndUnknownSizeDecoder
class NoStreamAndUnknownSizeEncoder : public NoStreamEnDecoder
{
protected:
// You implement this one:
virtual bool doProcessNoSize( const char* dataIn, size_t availIn,
char* dataOut, size_t availOut, size_t& outputSize ) =0;
bool shouldTryWith( const char*, size_t, size_t availOut )
{
// If the compression doesn't use any spaces...
return availOut > sizeof( uint64_t );
}
bool isCompleteInput( const char* dataIn, size_t availIn )
{
// We cannot know whether the user wants to send more data.
// -> return false; user must use finish=true to signal end of data
return false;
}
size_t getOverhead()
{
return 2*sizeof( uint64_t );
}
size_t suggestOutputSize( const char*, size_t availIn )
{
// We assume that the compression won't make the data any bigger.
return availIn + getOverhead();
}
bool doProcess( const char* dataIn, size_t availIn,
char* dataOut, size_t availOut, size_t& outputSize )
{
CHECK( availIn <= UINT32_MAX,
"You want to compress more than 4GB of data?! Sorry, we don't support that, yet." );
memcpy(dataOut, "ABCDEFGHIJKLMNOP", 16);
// store size
*(uint32_t*)dataOut = htole32( availIn );
uint32_t* compressedSize = (uint32_t*) ( dataOut + sizeof( uint64_t ) );
// compressed data goes after the size
// We skip more than we actually use; see NoStreamAndUnknownSizeDecoder::doProcess(...).
dataOut += getOverhead();
availOut -= getOverhead();
if ( !doProcessNoSize( dataIn, availIn, dataOut, availOut, outputSize ) )
return false;
CHECK( outputSize <= UINT32_MAX,
"The compressed data is more than 4GB?! Sorry, we don't support that, yet." );
*compressedSize = htole32( (uint32_t) outputSize );
outputSize += getOverhead();
return true;
}
};
#ifdef HAVE_LIBLZO
#include <lzo/lzo1x.h>
// finally, we can implement lzo
class LZO1X_1_Decoder : public NoStreamAndUnknownSizeDecoder
{
protected:
bool doProcessNoSize( const char* dataIn, size_t availIn,
char* dataOut, size_t availOut, size_t& outputSize )
{
// same argument is used for available output size and size of decompressed data
outputSize = availOut;
int ret = lzo1x_decompress_safe( (const lzo_bytep) dataIn, availIn,
(lzo_bytep) dataOut, (lzo_uintp) &outputSize, NULL );
if ( ret == LZO_E_OUTPUT_OVERRUN )
return false;
CHECK( ret >= LZO_E_OK, "lzo1x_decompress_safe failed (code %d)", ret );
return true;
}
};
class LZO1X_1_Compression;
class LZO1X_1_Encoder : public NoStreamAndUnknownSizeEncoder
{
const LZO1X_1_Compression* compression;
static size_t calcMaxCompressedSize(size_t availIn);
public:
LZO1X_1_Encoder(const LZO1X_1_Compression* compression)
{
this->compression = compression;
}
protected:
bool doProcessNoSize( const char* dataIn, size_t availIn,
char* dataOut, size_t availOut, size_t& outputSize );
bool shouldTryWith( const char*, size_t, size_t availOut );
size_t suggestOutputSize( const char*, size_t availIn );
};
class LZO1X_1_Compression : public CompressionMethod
{
static bool initialized;
static void init()
{
//TODO This is not thread-safe. Does it have to be?
if (!initialized)
{
int ret = lzo_init();
CHECK( ret == LZO_E_OK, "lzo_init failed (%d)", ret );
initialized = true;
}
}
public:
sptr<EnDecoder> createEncoder() const
{
init();
return new LZO1X_1_Encoder(this);
}
sptr<EnDecoder> createDecoder() const
{
init();
return new LZO1X_1_Decoder();
}
std::string getName() const { return "lzo1x_1"; }
lzo_voidp getWorkmem( size_t size ) const
{
return new char[size];
}
void giveBackWorkmem( lzo_voidp wrkmem ) const
{
//TODO I think we should keep the memory around and reuse it. After all
// it is only a few kilobytes and we will need it a lot. However, I
// won't risk anything here because I don't know whether this will be
// called by more than one thread.
delete[] (char*)wrkmem;
}
};
bool LZO1X_1_Compression::initialized = false;
size_t LZO1X_1_Encoder::calcMaxCompressedSize( size_t availIn )
{
// It seems that lzo1x_1_compress does NOT check whether the buffer is big enough.
// The documentation refers to example/simple.c which says:
// "Because the input block may be incompressible, we must provide a little more
// output space in case that compression is not possible."
// -> We use the same formula.
return (availIn + availIn / 16 + 64 + 3);
}
bool LZO1X_1_Encoder::shouldTryWith( const char* dataIn, size_t availIn, size_t availOut )
{
return availOut >= suggestOutputSize( dataIn, availIn );
}
size_t LZO1X_1_Encoder::suggestOutputSize( const char*, size_t availIn )
{
// It seems that lzo1x_1_compress does NOT check whether the buffer is big enough.
// The documentation refers to example/simple.c which says:
// "Because the input block may be incompressible, we must provide a little more
// output space in case that compression is not possible."
// -> We use the same formula.
return calcMaxCompressedSize( availIn ) + getOverhead();
}
bool LZO1X_1_Encoder::doProcessNoSize( const char* dataIn, size_t availIn,
char* dataOut, size_t availOut, size_t& outputSize )
{
// It seems that lzo1x_1_compress does NOT check whether the buffer is big enough.
// Therefore, we won't try it unless we are sure that the buffer is big enough.
if ( availOut < calcMaxCompressedSize( availIn ) )
return false;
// same argument is used for available output size (haha, see above)
// and size of decompressed data
outputSize = availOut;
lzo_voidp wrkmem = compression->getWorkmem(LZO1X_1_MEM_COMPRESS);
int ret = lzo1x_1_compress( (const lzo_bytep) dataIn, availIn,
(lzo_bytep) dataOut, (lzo_uintp) &outputSize, wrkmem );
compression->giveBackWorkmem(wrkmem);
if ( ret == LZO_E_OUTPUT_OVERRUN )
return false;
CHECK( ret >= LZO_E_OK, "lzo1x_1_compress failed (code %d)", ret );
return true;
}
#endif // HAVE_LIBLZO
// register them
static const_sptr<CompressionMethod> const compressions[] = {
new LZMACompression(),
# ifdef HAVE_LIBLZO
new LZO1X_1_Compression(),
# endif
// NULL entry marks end of list. Don't remove it!
NULL
};
const_sptr<CompressionMethod> CompressionMethod::defaultCompression = compressions[0];
const_sptr<CompressionMethod> CompressionMethod::findCompression( const std::string& name, bool optional )
{
for ( const const_sptr<CompressionMethod>* c = compressions+0; *c; ++c )
{
if ( (*c)->getName() == name )
{
return (*c);
}
}
if ( !optional )
{
throw exUnsupportedCompressionMethod( name );
}
return NULL;
}
// iterator over compressions
CompressionMethod::iterator::iterator( const const_sptr<CompressionMethod>* ptr ) : ptr( ptr) { }
CompressionMethod::iterator::iterator( const iterator& it ) : ptr(it.ptr) { }
CompressionMethod::iterator& CompressionMethod::iterator::operator =( const iterator& it )
{
this->ptr = it.ptr;
return *this;
}
bool CompressionMethod::iterator::operator ==( const iterator& other ) const
{
// special case: one has ptr==NULL (end iterator returned by end()) and the
// other has *ptr==NULL (end iterator obtained by calling ++)
if ( !ptr && ( !other.ptr || !*other.ptr ) )
return true;
else if ( !other.ptr && ( !ptr || !*ptr ) )
return true;
else
return (ptr == other.ptr);
}
bool CompressionMethod::iterator::operator !=( const iterator& other ) const
{
return !( *this == other );
}
bool CompressionMethod::iterator::atEnd() const
{
return !ptr || !*ptr;
}
CompressionMethod::iterator& CompressionMethod::iterator::operator ++()
{
CHECK( ptr && *ptr, "Cannot increment the end iterator" );
++ptr;
return *this;
}
const_sptr<CompressionMethod> CompressionMethod::iterator::operator *()
{
CHECK( ptr && *ptr, "Cannot dereference the end iterator" );
return *ptr;
}
CompressionMethod::iterator CompressionMethod::begin()
{
return iterator(compressions);
}
CompressionMethod::iterator CompressionMethod::end()
{
return iterator(NULL);
}
}

90
compression.hh Normal file
View File

@ -0,0 +1,90 @@
// Copyright (c) 2013 Benjamin Koch <bbbsnowball@gmail.com>
// Part of ZBackup. Licensed under GNU GPLv2 or later + OpenSSL, see LICENSE
#ifndef COMPRESSION_HH_INCLUDED__
#define COMPRESSION_HH_INCLUDED__
#include "sptr.hh"
#include "ex.hh"
#include "nocopy.hh"
namespace Compression {
DEF_EX( Ex, "Compression exception", std::exception )
DEF_EX_STR( exUnsupportedCompressionMethod, "Unsupported compression method: ", Ex )
// used for encoding or decoding
class EnDecoder: NoCopy
{
protected:
EnDecoder();
public:
virtual ~EnDecoder();
// encoder can read up to size bytes from data
virtual void setInput ( const void* data, size_t size ) =0;
// how many bytes of the last input haven't been used, yet?
virtual size_t getAvailableInput() =0;
// encoder can write up to size bytes to output
virtual void setOutput( void* data, size_t size ) =0;
// how many bytes of free space are remaining in the output buffer
virtual size_t getAvailableOutput() =0;
// process some bytes
// finish: will you pass more data to the encoder via setOutput?
// NOTE You must eventually set finish to true.
// returns, whether all output bytes have been written
virtual bool process( bool finish ) =0;
};
// compression method
class CompressionMethod
{
public:
virtual ~CompressionMethod();
// returns name of compression method
// This name is saved in the file header of the compressed file.
virtual std::string getName() const =0;
virtual sptr<EnDecoder> createEncoder() const =0;
virtual sptr<EnDecoder> createDecoder() const =0;
// find a compression by name
// If optional is false, it will either return a valid CompressionMethod
// object or abort the program. If optional is true, it will return
// NULL, if it cannot find the a compression with that name.
static const_sptr<CompressionMethod> findCompression(
const std::string& name, bool optional = false );
static const_sptr<CompressionMethod> defaultCompression;
class iterator
{
friend class CompressionMethod;
const const_sptr<CompressionMethod>* ptr;
iterator( const const_sptr<CompressionMethod>* ptr );
public:
iterator( const iterator& it );
iterator& operator =( const iterator& it );
bool operator ==( const iterator& other ) const;
bool operator !=( const iterator& other ) const;
bool atEnd() const;
iterator& operator ++();
const_sptr<CompressionMethod> operator *();
};
static iterator begin();
static iterator end();
};
}
#endif

View File

@ -74,6 +74,9 @@ public:
{ if ( &other != this ) { reset(); p = other.p; count = other.count; increment(); }
return * this; }
operator bool( void ) const
{ return !!p; }
bool operator ! ( void ) const
{ return !p; }

50
tests/bundle/bundle.pro Normal file
View File

@ -0,0 +1,50 @@
######################################################################
# Automatically generated by qmake (2.01a) Sun Jul 14 20:54:52 2013
######################################################################
TEMPLATE = app
TARGET =
DEPENDPATH += .
INCLUDEPATH += .
CONFIG = debug
LIBS += -lcrypto -lprotobuf -lz -lprotobuf -llzma -llzo2
DEFINES += __STDC_FORMAT_MACROS
DEFINES += HAVE_LIBLZO
# Input
SOURCES += test_bundle.cc \
../../unbuffered_file.cc \
../../tmp_mgr.cc \
../../page_size.cc \
../../random.cc \
../../encryption_key.cc \
../../encryption.cc \
../../encrypted_file.cc \
../../file.cc \
../../dir.cc \
../../bundle.cc \
../../message.cc \
../../hex.cc \
../../compression.cc \
../../zbackup.pb.cc
HEADERS += \
../../unbuffered_file.hh \
../../tmp_mgr.hh \
../../adler32.hh \
../../page_size.hh \
../../random.hh \
../../encryption_key.hh \
../../encrypted_file.hh \
../../encryption.hh \
../../ex.hh \
../../file.hh \
../../dir.hh \
../../bundle.hh \
../../message.hh \
../../hex.hh \
../../compression.hh \
../../message.hh \
../../zbackup.pb.h

172
tests/bundle/test_bundle.cc Normal file
View File

@ -0,0 +1,172 @@
// Copyright (c) 2013 Benjamin Koch <bbbsnowball@gmail.com>
// Part of ZBackup. Licensed under GNU GPLv2 or later + OpenSSL, see LICENSE
// Based on other tests written by Konstantin Isakov <ikm@zbackup.org>
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include "../../encrypted_file.hh"
#include "../../encryption_key.hh"
#include "../../random.hh"
#include "../../tmp_mgr.hh"
#include "../../check.hh"
#include "../../adler32.hh"
#include "../../bundle.hh"
#include "../../compression.hh"
#include "../../message.hh"
using namespace Compression;
char tmpbuf[100];
void testCompatibility()
{
// The LZO code uses a different file header than the previous code
// because it adds the compression_method field. Nevertheless, it
// must be compatible with previous code.
TmpMgr tmpMgr( "/dev/shm" );
sptr< TemporaryFile > tempFile = tmpMgr.makeTemporaryFile();
std::string fileName = tempFile->getFileName();
EncryptionKey noKey( std::string(), NULL );
// Write old header, read as new header
{
{
EncryptedFile::OutputStream os( fileName.c_str(), noKey, Encryption::ZeroIv );
FileHeader header;
header.set_version( 42 );
Message::serialize( header, os );
}
{
EncryptedFile::InputStream is( fileName.c_str(), noKey, Encryption::ZeroIv );
BundleFileHeader header;
Message::parse( header, is );
CHECK( header.version() == 42, "version is wrong when reading old header with new program" );
CHECK( header.compression_method() == "lzma", "compression_method is wrong when reading old header with new program" );
}
}
// Write new header, read as old header
//NOTE In the real code, this will only work, if the file uses LZMA. If it doesn't, the version
// field is increased and the old code will refuse to read the file.
{
{
EncryptedFile::OutputStream os( fileName.c_str(), noKey, Encryption::ZeroIv );
BundleFileHeader header;
header.set_version( 42 );
Message::serialize( header, os );
}
{
EncryptedFile::InputStream is( fileName.c_str(), noKey, Encryption::ZeroIv );
FileHeader header;
Message::parse( header, is );
CHECK( header.version() == 42, "version is wrong when reading new header with old program" );
// cannot check compression_method because the field doesn't exist
}
}
printf("compatibility test successful.\n");
}
void readAndWrite( EncryptionKey const & key,
const_sptr<CompressionMethod> compression1, const_sptr<CompressionMethod> compression2 )
{
// temporary file for the bundle
TmpMgr tmpMgr( "/dev/shm" );
sptr< TemporaryFile > tempFile = tmpMgr.makeTemporaryFile();
// some chunk data
int chunkCount = rand() % 30;
size_t chunkSize = rand() % 20 ? 64*1024 : 10;
char** chunks = new char*[chunkCount];
string* chunkIds = new string[chunkCount];
CompressionMethod::defaultCompression = compression1;
// write bundle
{
Bundle::Creator bundle;
for (int i=0;i<chunkCount;i++) {
chunks[i] = new char[chunkSize];
Random::genaratePseudo( chunks[i], chunkSize );
//TODO make it look like a real Id (or even let it match the data)
//TODO make sure we don't have any duplicate Ids
sprintf(tmpbuf, "0x%08x", rand());
chunkIds[i] = string(tmpbuf);
bundle.addChunk( chunkIds[i], chunks[i], chunkSize );
}
bundle.write( tempFile->getFileName().c_str(), key );
}
CompressionMethod::defaultCompression = compression2;
// read it and compare
{
Bundle::Reader bundle( tempFile->getFileName().c_str(), key );
for (int i=0;i<chunkCount;i++) {
string data;
size_t size;
bool ret = bundle.get( chunkIds[i], data, size );
CHECK( ret, "bundle.get returned false for chunk %d (%s)", i, chunkIds[i].c_str() );
CHECK( size == chunkSize, "wrong chunk size for chunk %d (%s)", i, chunkIds[i].c_str() );
CHECK( memcmp(data.c_str(), chunks[i], chunkSize) == 0, "wrong chunk data for chunk %d (%s)", i, chunkIds[i].c_str() );
}
}
// clean up
for (int i=0;i<chunkCount;i++)
delete[] chunks[i];
delete[] chunks;
//TODO does that call the destructors?
delete[] chunkIds;
printf(".");
fflush(stdout);
}
int main()
{
EncryptionKeyInfo keyInfo;
EncryptionKey::generate( "blah", keyInfo );
EncryptionKey key( "blah", &keyInfo );
EncryptionKey noKey( std::string(), NULL );
testCompatibility();
std::vector< const_sptr<CompressionMethod> > compressions;
for ( CompressionMethod::iterator it = CompressionMethod::begin(); it!=CompressionMethod::end(); ++it ) {
printf( "supported compression: %s\n", (*it)->getName().c_str() );
compressions.push_back( *it );
}
for ( size_t iteration = 100; iteration--; ) {
// default compression while writing the file
const_sptr<CompressionMethod> compression1 = compressions[ rand() % compressions.size() ];
// default compression while reading the file
// The reader should ignore it and always use the compression that was used for the file.
const_sptr<CompressionMethod> compression2 = compressions[ rand() % compressions.size() ];
readAndWrite( ( rand() & 1 ) ? key : noKey, compression1, compression2 );
}
printf("\n");
return 0;
}

View File

@ -1,5 +1,6 @@
// Copyright (c) 2012-2014 Konstantin Isakov <ikm@zbackup.org>
// Part of ZBackup. Licensed under GNU GPLv2 or later + OpenSSL, see LICENSE
// modified by Benjamin Koch <bbbsnowball@gmail.com>
#include <ctype.h>
#include <stdint.h>
@ -14,6 +15,7 @@
#include "backup_creator.hh"
#include "backup_file.hh"
#include "backup_restorer.hh"
#include "compression.hh"
#include "debug.hh"
#include "dir.hh"
#include "encryption_key.hh"
@ -334,6 +336,8 @@ int main( int argc, char *argv[] )
size_t threads = defaultThreads;
size_t const defaultCacheSizeMb = 40;
size_t cacheSizeMb = defaultCacheSizeMb;
bool printHelp = false;
bool forcedCompressionMethod = false;
vector< char const * > args;
vector< string > passwords;
bitset< BackupExchanger::Flags > exchange;
@ -429,15 +433,55 @@ int main( int argc, char *argv[] )
return EXIT_FAILURE;
}
}
else
if ( strcmp( argv[ x ], "--compression" ) == 0 && x + 1 < argc )
{
forcedCompressionMethod = true;
// next argument names the compression method
++x;
if ( strcmp( argv[ x ], "lzma" ) == 0 )
{
const_sptr<Compression::CompressionMethod> lzma = Compression::CompressionMethod::findCompression( "lzma" );
if ( !lzma )
{
fprintf( stderr, "zbackup is compiled without LZMA support, but the code "
"would support it. If you install liblzma (including development files) "
"and recompile zbackup, you can use LZMA.\n" );
return EXIT_FAILURE;
}
Compression::CompressionMethod::defaultCompression = lzma;
}
else
if ( strcmp( argv[ x ], "lzo" ) == 0 )
{
const_sptr<Compression::CompressionMethod> lzo = Compression::CompressionMethod::findCompression( "lzo1x_1" );
if ( !lzo )
{
fprintf( stderr, "zbackup is compiled without LZO support, but the code "
"would support it. If you install liblzo2 (including development files) "
"and recompile zbackup, you can use LZO.\n" );
return EXIT_FAILURE;
}
Compression::CompressionMethod::defaultCompression = lzo;
}
else
{
fprintf( stderr, "zbackup doesn't support compression method '%s'. You may need a newer version.\n",
argv[ x ] );
return EXIT_FAILURE;
}
}
else
if ( strcmp( argv[ x ], "--help" ) == 0 || strcmp( argv[ x ], "-h" ) == 0 )
{
printHelp = true;
}
else
args.push_back( argv[ x ] );
}
if ( args.size() < 1 ||
( args.size() == 1 &&
( strcmp( args[ 0 ], "-h" ) == 0 || strcmp( args[ 0 ], "--help" ) == 0 )
)
)
if ( args.size() < 1 || printHelp )
{
fprintf( stderr,
"ZBackup, a versatile deduplicating backup tool, version 1.3\n"
@ -454,6 +498,7 @@ int main( int argc, char *argv[] )
" --cache-size <number> MB (default is %zu)\n"
" --exchange [backups|bundles|index] (can be\n"
" specified multiple times)\n"
" --compression <compression> <lzma|lzo> (default is lzma)\n"
" --help|-h show this message\n"
" Commands:\n"
" init <storage path> - initializes new storage;\n"
@ -506,6 +551,8 @@ int main( int argc, char *argv[] )
}
ZBackup zb( ZBackup::deriveStorageDirFromBackupsFile( args[ 1 ] ),
passwords[ 0 ], threads );
if ( !forcedCompressionMethod )
zb.useDefaultCompressionMethod();
zb.backupFromStdin( args[ 1 ] );
}
else
@ -520,6 +567,8 @@ int main( int argc, char *argv[] )
}
ZRestore zr( ZRestore::deriveStorageDirFromBackupsFile( args[ 1 ] ),
passwords[ 0 ], cacheSizeMb * 1048576 );
if ( !forcedCompressionMethod )
zr.useDefaultCompressionMethod();
zr.restoreToStdin( args[ 1 ] );
}
else

View File

@ -1,5 +1,6 @@
// Copyright (c) 2012-2014 Konstantin Isakov <ikm@zbackup.org>
// Part of ZBackup. Licensed under GNU GPLv2 or later + OpenSSL, see LICENSE
// modified by Benjamin Koch <bbbsnowball@gmail.com>
// Protobuffers used in zbackup
@ -38,6 +39,8 @@ message StorageInfo
required uint32 bundle_max_payload_size = 2;
// If present, used for encryption/decryption of all data
optional EncryptionKeyInfo encryption_key = 3;
// Default compression for new bundles
optional string default_compression_method = 4 [default = "lzma"];
}
message BundleInfo
@ -61,6 +64,18 @@ message FileHeader
required uint32 version = 1;
}
message BundleFileHeader
{
// File format version
required uint32 version = 1;
// Compression method that is used for this file
// If the program doesn't support that field, it will try LZMA. If it is
// LZMA, that will work. If it isn't, it will have aborted before because
// the version in FileHeader is higher than it can support.
optional string compression_method = 2 [default = "lzma"];
}
message IndexBundleHeader
{
// Id of the bundle following in the stream. If not present, indicates the

View File

@ -4,6 +4,7 @@
#include "zbackup_base.hh"
#include "storage_info_file.hh"
#include "compression.hh"
using std::string;
@ -77,6 +78,8 @@ void ZBackupBase::initStorage( string const & storageDir,
EncryptionKey::generate( password,
*storageInfo.mutable_encryption_key() );
storageInfo.set_default_compression_method( Compression::CompressionMethod::defaultCompression->getName() );
Paths paths( storageDir );
if ( !Dir::exists( storageDir ) )
@ -119,3 +122,12 @@ string ZBackupBase::deriveStorageDirFromBackupsFile( string const &
else
return realPath.substr( 0, pos );
}
void ZBackupBase::useDefaultCompressionMethod()
{
std::string compression_method_name = storageInfo.default_compression_method();
const_sptr<Compression::CompressionMethod> compression
= Compression::CompressionMethod::findCompression( compression_method_name );
Compression::CompressionMethod::defaultCompression = compression;
}

View File

@ -51,6 +51,8 @@ public:
/// storage dir or throws an exception
static std::string deriveStorageDirFromBackupsFile( std::string const & backupsFile, bool allowOutside = false );
void useDefaultCompressionMethod();
StorageInfo storageInfo;
EncryptionKey encryptionkey;
TmpMgr tmpMgr;