Best throughput tracker with chunksize

This commit is contained in:
2026-04-28 21:54:49 +02:00
parent 157e769268
commit 50e88c8e84
4 changed files with 395 additions and 52 deletions
+235 -28
View File
@@ -17,27 +17,184 @@ extern "C"
}
#endif
const static char *randomsrc = (char *)"/dev/urandom";
const static char* randomsrc = (char*)"/dev/urandom";
Shred::Shred()
{
#ifdef ADAPTIVE_CHUNK_SIZE
// Allocate aligned buffers for maximum chunk size
if (posix_memalign((void**)&caTfngData, 4096, CHUNK_SIZE_MAX) != 0)
{
Logger::logThis()->error("Failed to allocate aligned buffer for tfng data");
caTfngData = nullptr;
}
if (posix_memalign((void**)&caReadBuffer, 4096, CHUNK_SIZE_MAX) != 0)
{
Logger::logThis()->error("Failed to allocate aligned buffer for read buffer");
caReadBuffer = nullptr;
}
// Initialize adaptive tracking variables
currentChunkSize = CHUNK_SIZE_START;
bestChunkSize = CHUNK_SIZE_START;
chunkCounter = 0;
bestThroughputMBps = 0.0;
lastThroughputMBps = 0.0;
bytesWrittenInMeasurement = 0;
throughputIncreasing = true;
Logger::logThis()->info("Adaptive chunk size optimization ENABLED - Starting with " +
to_string(currentChunkSize / (1024 * 1024)) + " MB chunks");
#endif
}
Shred::~Shred()
{
#ifdef ADAPTIVE_CHUNK_SIZE
if (caTfngData != nullptr)
{
free(caTfngData);
caTfngData = nullptr;
}
if (caReadBuffer != nullptr)
{
free(caReadBuffer);
caReadBuffer = nullptr;
}
#endif
}
#ifdef ADAPTIVE_CHUNK_SIZE
/**
* \brief Start performance measurement interval
* \return void
*/
void Shred::startMeasurement()
{
measurementStartTime = std::chrono::high_resolution_clock::now();
bytesWrittenInMeasurement = 0;
chunkCounter = 0;
}
/**
* \brief shred drive with shred
* \param pointer of Drive instance
* \brief Evaluate throughput after measurement interval and adjust chunk size
* \param pointer to Drive instance
* \return void
*/
int Shred::shredDrive(Drive *drive, int *ipSignalFd)
void Shred::evaluateThroughput(Drive* drive)
{
auto measurementEndTime = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = measurementEndTime - measurementStartTime;
double elapsedSeconds = elapsed.count();
if (elapsedSeconds > 0.0)
{
double throughputMBps = (bytesWrittenInMeasurement / (1024.0 * 1024.0)) / elapsedSeconds;
lastThroughputMBps = throughputMBps;
Logger::logThis()->info("Throughput measurement - ChunkSize: " +
to_string(currentChunkSize / (1024 * 1024)) + " MB, " +
"Throughput: " + to_string((int)throughputMBps) + " MB/s, " +
"Best: " + to_string((int)bestThroughputMBps) + " MB/s" +
" - Drive: " + drive->getSerial());
// Check if this is better than our best
if (throughputMBps > bestThroughputMBps)
{
bestThroughputMBps = throughputMBps;
bestChunkSize = currentChunkSize;
throughputIncreasing = true;
Logger::logThis()->info("NEW BEST throughput: " + to_string((int)bestThroughputMBps) +
" MB/s with " + to_string(currentChunkSize / (1024 * 1024)) +
" MB chunks - Drive: " + drive->getSerial());
}
else
{
throughputIncreasing = false;
}
}
// Adjust chunk size for next measurement interval
adjustChunkSize(drive);
// Start new measurement
startMeasurement();
}
/**
* \brief Adjust chunk size based on throughput trend
* \param pointer to Drive instance
* \return void
*/
void Shred::adjustChunkSize(Drive* drive)
{
size_t oldChunkSize = currentChunkSize;
if (throughputIncreasing)
{
// Throughput is improving - increase chunk size
currentChunkSize += CHUNK_SIZE_STEP_UP;
// Clamp to maximum
if (currentChunkSize > CHUNK_SIZE_MAX)
{
currentChunkSize = CHUNK_SIZE_MAX;
Logger::logThis()->info("Reached maximum chunk size: " +
to_string(currentChunkSize / (1024 * 1024)) + " MB" +
" - Drive: " + drive->getSerial());
}
}
else
{
// Throughput decreased - decrease chunk size to find sweet spot
if (currentChunkSize > CHUNK_SIZE_STEP_DOWN)
{
currentChunkSize -= CHUNK_SIZE_STEP_DOWN;
}
// Clamp to minimum
if (currentChunkSize < CHUNK_SIZE_MIN)
{
currentChunkSize = CHUNK_SIZE_MIN;
Logger::logThis()->info("Reached minimum chunk size: " +
to_string(currentChunkSize / (1024 * 1024)) + " MB" +
" - Drive: " + drive->getSerial());
}
}
if (oldChunkSize != currentChunkSize)
{
Logger::logThis()->info("Adjusted chunk size: " +
to_string(oldChunkSize / (1024 * 1024)) + " MB -> " +
to_string(currentChunkSize / (1024 * 1024)) + " MB" +
" - Drive: " + drive->getSerial());
}
}
/**
* \brief Get current chunk size for adaptive mode
* \return current chunk size in bytes
*/
size_t Shred::getCurrentChunkSize() const
{
return currentChunkSize;
}
#endif
/**
* \brief shred drive with shred
* \param pointer of Drive instance
* \param file descriptor for signaling
* \return 0 on success, -1 on error
*/
int Shred::shredDrive(Drive* drive, int* ipSignalFd)
{
ostringstream address;
address << (void const *)&(*drive);
address << (void const*)&(*drive);
Logger::logThis()->info("Shred-Task started - Drive: " + drive->getModelName() + "-" + drive->getSerial() + " @" + address.str());
drive->bWasShredStarted = true; // Mark drive as partly shredded
drive->bWasShredStarted = true;
drive->bWasShredded = false;
drive->setTaskPercentage(0.0);
drive->u32DriveChecksumAfterShredding = UINT32_MAX;
@@ -54,9 +211,18 @@ int Shred::shredDrive(Drive *drive, int *ipSignalFd)
#endif
#ifndef DRYRUN
const char *cpDrivePath = drive->getPath().c_str();
const char* cpDrivePath = drive->getPath().c_str();
unsigned char ucKey[TFNG_KEY_SIZE];
#ifdef ADAPTIVE_CHUNK_SIZE
// Validate buffers were allocated
if (caTfngData == nullptr || caReadBuffer == nullptr)
{
Logger::logThis()->error("Shred-Task: Aligned buffers not allocated! - Drive: " + drive->getSerial());
return -1;
}
#endif
// open random source
randomSrcFileDiscr = open(randomsrc, O_RDONLY | O_LARGEFILE);
if (randomSrcFileDiscr == -1)
@@ -94,41 +260,59 @@ int Shred::shredDrive(Drive *drive, int *ipSignalFd)
this->ulDriveByteSize = getDriveSizeInBytes(driveFileDiscr);
Drive::ShredSpeed shredSpeed = drive->sShredSpeed.load();
shredSpeed.chronoShredTimestamp = std::chrono::system_clock::now(); // set inital timestamp for speed metric
shredSpeed.ulSpeedMetricBytesWritten = 0U; // uses to calculate speed metric
shredSpeed.chronoShredTimestamp = std::chrono::system_clock::now();
shredSpeed.ulSpeedMetricBytesWritten = 0U;
drive->sShredSpeed.store(shredSpeed);
#ifdef LOG_LEVEL_HIGH
Logger::logThis()->info("Shred-Task: Bytes-Size of Drive: " + to_string(this->ulDriveByteSize) + " - Drive: " + drive->getSerial());
#endif
#ifdef ADAPTIVE_CHUNK_SIZE
// Start first measurement interval
startMeasurement();
#endif
for (unsigned int uiShredIterationCounter = 0U; uiShredIterationCounter < SHRED_ITERATIONS; uiShredIterationCounter++)
{
unsigned long ulDriveByteCounter = 0U; // used for one shred-iteration to keep track of the current drive position
unsigned long ulDriveByteCounter = 0U;
if (uiShredIterationCounter == (SHRED_ITERATIONS - 1))
{
// last shred iteration --> overwrite (just the write chunk) bytes with zeros instead with random data
#ifdef ADAPTIVE_CHUNK_SIZE
memset(caTfngData, 0U, CHUNK_SIZE_MAX);
#else
memset(caTfngData, 0U, CHUNK_SIZE);
#endif
}
while (ulDriveByteCounter < ulDriveByteSize)
{
int iBytesToShred = 0; // Bytes that will be overwritten in this chunk-iteration
#ifdef ADAPTIVE_CHUNK_SIZE
size_t activeChunkSize = getCurrentChunkSize();
#else
size_t activeChunkSize = CHUNK_SIZE;
#endif
int iBytesToShred = 0;
if (uiShredIterationCounter != (SHRED_ITERATIONS - 1))
{
// NOT last shred iteration --> generate new random data
#ifdef ADAPTIVE_CHUNK_SIZE
tfng_prng_genrandom(caTfngData, activeChunkSize);
#else
tfng_prng_genrandom(caTfngData, TFNG_DATA_SIZE);
#endif
}
if ((ulDriveByteSize - ulDriveByteCounter) < CHUNK_SIZE)
if ((ulDriveByteSize - ulDriveByteCounter) < activeChunkSize)
{
iBytesToShred = (ulDriveByteSize - ulDriveByteCounter);
}
else
{
iBytesToShred = CHUNK_SIZE;
iBytesToShred = activeChunkSize;
}
int iByteShredded = write(driveFileDiscr, caTfngData, iBytesToShred);
@@ -148,17 +332,28 @@ int Shred::shredDrive(Drive *drive, int *ipSignalFd)
ulDriveByteCounter += iByteShredded;
ulDriveByteOverallCount += iByteShredded;
#ifdef ADAPTIVE_CHUNK_SIZE
bytesWrittenInMeasurement += iByteShredded;
chunkCounter++;
// Evaluate throughput after measurement interval
if (chunkCounter >= CHUNK_MEASURE_INTERVAL)
{
evaluateThroughput(drive);
}
#endif
d32Percent = this->calcProgress();
#ifdef LOG_LEVEL_HIGH
Logger::logThis()->info("Shred-Task: ByteCount: " + to_string(ulDriveByteCounter) + " - iteration: " + to_string((uiShredIterationCounter + 1)) + " - progress: " + to_string(d32Percent) + " - Drive: " + drive->getSerial());
#endif
if ((d32Percent - d32TmpPercent) >= 0.01)
{
// set shred percantage
drive->setTaskPercentage(d32TmpPercent);
d32TmpPercent = d32Percent;
// signal process in shreding
write(*ipSignalFd, "A", 1);
}
@@ -168,26 +363,32 @@ int Shred::shredDrive(Drive *drive, int *ipSignalFd)
d32Percent = 0.00;
d32TmpPercent = 0.00;
ulDriveByteCounter = 0U;
Logger::logThis()->info("Aborted shred for: " + drive->getModelName() + "-" + drive->getSerial());
Logger::logThis()->info("Aborted shred for: " + drive->getModelName() + "-" + drive->getSerial());
cleanup();
return -1;
}
// end one chunk write
}
if (0 != iRewindDrive(driveFileDiscr))
{
Logger::logThis()->error("Shred-Task: Unable to rewind drive! - Drive: " + drive->getSerial());
cleanup();
return -1;
}
// end one shred iteration
}
// end of all shred iteratio
tfng_prng_seedkey(NULL); // reset random generator
#ifdef ADAPTIVE_CHUNK_SIZE
Logger::logThis()->info("Shred completed - Optimal chunk size: " +
to_string(bestChunkSize / (1024 * 1024)) + " MB, " +
"Best throughput: " + to_string((int)bestThroughputMBps) + " MB/s" +
" - Drive: " + drive->getSerial());
#endif
tfng_prng_seedkey(NULL);
drive->bWasShredded = true;
Logger::logThis()->info("Shred-Task finished - Drive: " + drive->getModelName() + "-" + drive->getSerial() + " @" + address.str());
#ifdef ZERO_CHECK
drive->state = Drive::TaskState::CHECK_ACTIVE;
Logger::logThis()->info("Check-Task started - Drive: " + drive->getModelName() + "-" + drive->getSerial() + " @" + address.str());
@@ -219,10 +420,9 @@ int Shred::shredDrive(Drive *drive, int *ipSignalFd)
}
return 0;
}
/**
* \brief calc shredding progress in %
* \param current byte index of the drive
* \param current shred iteration
* \return double percentage
*/
double Shred::calcProgress()
@@ -230,7 +430,7 @@ double Shred::calcProgress()
unsigned int uiMaxShredIteration = SHRED_ITERATIONS;
#ifdef ZERO_CHECK
uiMaxShredIteration++; // increment because we will check after SHRED_ITERATIONS the drive for non-zero bytes
uiMaxShredIteration++;
#endif
if (this->ulDriveByteSize == 0)
return 0.0;
@@ -273,20 +473,27 @@ long Shred::getDriveSizeInBytes(fileDescriptor file)
return liDriveSizeTmp;
}
unsigned int Shred::uiCalcChecksum(fileDescriptor file, Drive *drive, int *ipSignalFd)
unsigned int Shred::uiCalcChecksum(fileDescriptor file, Drive* drive, int* ipSignalFd)
{
unsigned int uiChecksum = 0;
unsigned long ulDriveByteCounter = 0U;
#ifdef ADAPTIVE_CHUNK_SIZE
size_t checkChunkSize = CHUNK_SIZE_MAX;
#else
size_t checkChunkSize = CHUNK_SIZE;
#endif
while (ulDriveByteCounter < ulDriveByteSize)
{
int iBytesToCheck = 0;
if ((ulDriveByteSize - ulDriveByteCounter) < CHUNK_SIZE)
if ((ulDriveByteSize - ulDriveByteCounter) < checkChunkSize)
{
iBytesToCheck = (ulDriveByteSize - ulDriveByteCounter);
}
else
{
iBytesToCheck = CHUNK_SIZE;
iBytesToCheck = checkChunkSize;
}
int iReadBytes = read(file, caReadBuffer, iBytesToCheck);
for (int iReadBytesCounter = 0U; iReadBytesCounter < iReadBytes; iReadBytesCounter++)
@@ -322,4 +529,4 @@ void Shred::cleanup()
{
close(driveFileDiscr);
close(randomSrcFileDiscr);
}
}