Skip to content

Commit

Permalink
Merge pull request #336 from zhujun98/dev
Browse files Browse the repository at this point in the history
Add option to add padding to projections
  • Loading branch information
zhujun98 authored Aug 28, 2024
2 parents 3aab919 + 90d56b9 commit c90c946
Show file tree
Hide file tree
Showing 10 changed files with 297 additions and 179 deletions.
2 changes: 1 addition & 1 deletion common/include/common/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ inline size_t sliceIdFromTimestamp(size_t ts) {
return ts % MAX_NUM_SLICES;
}

inline size_t expandDataSizeForGpu(size_t s, size_t chunk_size) {
inline size_t expandDataSize(size_t s, size_t chunk_size) {
return s % chunk_size == 0 ? s : (s / chunk_size + 1 ) * chunk_size;
}

Expand Down
26 changes: 8 additions & 18 deletions recon/include/recon/application.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -155,19 +155,9 @@ class Application {

void maybeResetDarkAndFlatAcquisition();

void pushDark(Projection<>&& proj) {
darks_.emplace_back(std::move(proj.data));
if (darks_.size() > k_MAX_NUM_DARKS) {
spdlog::warn("Maximum number of dark images received. Data ignored!");
}
}
void pushDark(Projection<>&& proj);

void pushFlat(Projection<>&& proj) {
flats_.emplace_back(std::move(proj.data));
if (flats_.size() > k_MAX_NUM_FLATS) {
spdlog::warn("Maximum number of flat images received. Data ignored!");
}
}
void pushFlat(Projection<>&& proj);

void pushProjection(const Projection<>& proj);

Expand Down Expand Up @@ -242,11 +232,11 @@ class Application {

void stopProcessing();

rpc::ServerState_State getServerState() const { return server_state_; }
[[nodiscard]] rpc::ServerState_State getServerState() const { return server_state_; }

std::optional<rpc::ProjectionData> getProjectionData(int timeout);

bool hasVolume() const { return volume_required_; }
[[nodiscard]] bool hasVolume() const { return volume_required_; }

std::vector<rpc::ReconData> getVolumeData(int timeout);

Expand All @@ -256,10 +246,10 @@ class Application {

// for unittest

const std::vector<RawImageData>& darks() const { return darks_; }
const std::vector<RawImageData>& flats() const { return flats_; }
const MemoryBuffer<float, 3>& rawBuffer() const { return raw_buffer_; }
const Reconstructor* reconstructor() const { return recon_.get(); }
[[nodiscard]] const std::vector<RawImageData>& darks() const { return darks_; }
[[nodiscard]] const std::vector<RawImageData>& flats() const { return flats_; }
[[nodiscard]] const MemoryBuffer<ProDtype, 3>& rawBuffer() const { return raw_buffer_; }
[[nodiscard]] const Reconstructor* reconstructor() const { return recon_.get(); }
};

} // namespace recastx::recon
Expand Down
82 changes: 46 additions & 36 deletions recon/include/recon/buffer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -230,10 +230,11 @@ void SliceBuffer<T, OD>::resize(const ShapeType& shape) {
shape_ = shape;
}


namespace details {

template<typename T, typename D>
inline void copyBuffer(T* dst, const char* src, const std::array<size_t, 2>& shape) {
inline void copyToBuffer(T *dst, const char *src, const std::array<size_t, 2> &shape) {
D v;
for (size_t size = sizeof(D), i = 0; i < shape[0] * shape[1]; ++i) {
memcpy(&v, src, size);
Expand All @@ -243,30 +244,41 @@ inline void copyBuffer(T* dst, const char* src, const std::array<size_t, 2>& sha
}

template<typename T, typename D>
inline void copyBuffer(T* dst,
const std::array<size_t, 2>& dst_shape,
const char* src,
const std::array<size_t, 2>& src_shape) {
if (src_shape == dst_shape) copyBuffer<T, D>(dst, src, dst_shape);

size_t ds_r = src_shape[0] / dst_shape[0];
size_t ds_c = src_shape[1] / dst_shape[1];
inline void copyToBuffer(T *dst,
const std::array<size_t, 2> &dst_shape,
const char *src,
const std::array<size_t, 2> &src_shape,
const std::array<size_t, 2> &downsampling) {
if (src_shape == dst_shape) {
assert(downsampling == (std::array < size_t, 2 > {1, 1}));
copyToBuffer<T, D>(dst, src, dst_shape);
return;
}

auto [ds_r, ds_c] = downsampling;

auto rows_ds = src_shape[0] / ds_r;
auto cols_ds = src_shape[1] / ds_c;

assert(dst_shape[0] >= rows_ds);
assert(dst_shape[1] >= cols_ds);

D v;
for (size_t size = sizeof(D),
rstep = ds_r * size * src_shape[1],
cstep = ds_c * size, i = 0; i < dst_shape[0]; ++i) {
char* ptr = const_cast<char*>(src) + i * rstep;
for (size_t j = 0; j < dst_shape[1]; ++j) {
memcpy(&v, ptr, size);
ptr += cstep;
*(dst++) = static_cast<T>(v);
size_t padding_r = (dst_shape[0] - rows_ds) / 2;
size_t padding_c = (dst_shape[1] - cols_ds) / 2;
for (size_t size = sizeof(D), i = 0; i < rows_ds; ++i) {
char* ptr_src = const_cast<char *>(src) + i * ds_r * size * src_shape[1];
T *ptr_dst = dst + dst_shape[1] * (i + padding_r) + padding_c;
for (size_t j = 0; j < cols_ds; ++j) {
memcpy(&v, ptr_src, size);
ptr_src += ds_c * size;
*(ptr_dst++) = static_cast<T>(v);
}
}
}

} // details


template<typename T, size_t N>
class MemoryBuffer {

Expand Down Expand Up @@ -301,7 +313,8 @@ class MemoryBuffer {
void fillImp(size_t buffer_idx,
size_t data_idx,
const char* src,
const std::array<size_t, N-1>& src_shape);
const std::array<size_t, N-1>& src_shape,
const std::array<size_t, N-1>& downsampling);

void registerChunk(size_t idx);

Expand All @@ -316,10 +329,10 @@ class MemoryBuffer {
void reset();

template<typename D>
void fill(size_t index, const char* src);

template<typename D>
void fill(size_t index, const char* src, const std::array<size_t, N-1>& shape);
void fill(size_t index,
const char* src,
const std::array<size_t, N-1>& shape,
const std::array<size_t, N-1>& downsampling);

bool fetch(int timeout);

Expand Down Expand Up @@ -379,8 +392,8 @@ void MemoryBuffer<T, N>::resize(const std::array<size_t, N>& shape) {
reset();

std::lock_guard lck(data_mtx_);
for (size_t i = 0; i < capacity_; ++i) buffer_[i].resize(shape);
front_.resize(shape);
for (size_t i = 0; i < capacity_; ++i) buffer_[i].resize(shape, 0);
front_.resize(shape, 0);
}

template<typename T, size_t N>
Expand All @@ -407,14 +420,10 @@ void MemoryBuffer<T, N>::reset() {

template<typename T, size_t N>
template<typename D>
void MemoryBuffer<T, N>::fill(size_t index, const char* src) {
auto& shape = front_.shape();
fill<D>(index, src, {shape[1], shape[2]});
}

template<typename T, size_t N>
template<typename D>
void MemoryBuffer<T, N>::fill(size_t index, const char* src, const std::array<size_t, N-1>& shape) {
void MemoryBuffer<T, N>::fill(size_t index,
const char* src,
const std::array<size_t, N-1>& shape,
const std::array<size_t, N-1>& downsampling) {
size_t chunk_size = front_.shape()[0];
size_t chunk_idx = index / chunk_size;
size_t data_idx = index % chunk_size;
Expand Down Expand Up @@ -450,7 +459,7 @@ void MemoryBuffer<T, N>::fill(size_t index, const char* src, const std::array<si

{
std::shared_lock data_lk(data_mtx_);
fillImp<D>(buffer_idx, data_idx, src, shape);
fillImp<D>(buffer_idx, data_idx, src, shape, downsampling);
}

lk.lock();
Expand Down Expand Up @@ -493,13 +502,14 @@ template<typename D>
void MemoryBuffer<T, N>::fillImp(size_t buffer_idx,
size_t data_idx,
const char* src,
const std::array<size_t, N-1>& src_shape) {
const std::array<size_t, N-1>& src_shape,
const std::array<size_t, N-1>& downsampling) {
float* data = buffer_[buffer_idx].data();
std::array<size_t, N-1> dst_shape;
auto& buffer_shape = front_.shape();
std::copy(buffer_shape.begin() + 1, buffer_shape.end(), dst_shape.begin());
size_t data_size = std::accumulate(dst_shape.begin(), dst_shape.end(), 1, std::multiplies<>());
details::copyBuffer<T, D>(&data[data_idx * data_size], dst_shape, src, src_shape);
details::copyToBuffer<T, D>(&data[data_idx * data_size], dst_shape, src, src_shape, downsampling);
}

template<typename T, size_t N>
Expand Down
106 changes: 64 additions & 42 deletions recon/include/recon/preprocessing.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,60 +23,83 @@

namespace recastx::recon {

inline void downsample(const ProImageData &src, ProImageData &dst) {
auto &src_shape = src.shape();
auto &dst_shape = dst.shape();
size_t ds_row = src_shape[0] / dst_shape[0];
size_t ds_col = src_shape[1] / dst_shape[1];

if (ds_row == 0 || ds_col == 0) {
spdlog::critical("Shape of the destination is large than shape of the source: {} x {}, dst: {} x {}",
src_shape[0], src_shape[1], dst_shape[0], dst_shape[1]);
throw std::runtime_error("Downsampling with illegal shapes");
namespace details {

template<typename T>
inline void copyToBuffer(Tensor<T, 2> &dst, const Tensor<T, 2> &src) {
assert(dst.shape() == src.shape());
memcpy(dst.data(), src.data(), src.size() * sizeof(T));
}

template<typename T>
inline void copyToBuffer(Tensor<T, 2> &dst, const Tensor<T, 2> &src, const std::array<size_t, 2> &downsampling) {
const auto &src_shape = src.shape();
const auto &dst_shape = dst.shape();
if (dst_shape == src_shape) {
assert(downsampling == (std::array < size_t, 2 > {1, 1}));
copyToBuffer(dst, src);
return;
}

for (size_t i = 0; i < dst_shape[0]; ++i) {
for (size_t j = 0; j < dst_shape[1]; ++j) {
dst[i * dst_shape[1] + j] = src[i * src_shape[1] * ds_row + ds_col * j];
auto [ds_r, ds_c] = downsampling;

auto rows_ds = src_shape[0] / ds_r;
auto cols_ds = src_shape[1] / ds_c;

assert(dst_shape[0] >= rows_ds);
assert(dst_shape[1] >= cols_ds);

size_t padding_r = (dst_shape[0] - rows_ds) / 2;
size_t padding_c = (dst_shape[1] - cols_ds) / 2;
T* ptr_src = const_cast<T*>(src.data());
T* ptr_dst = dst.data() + dst_shape[1] * padding_r;
for (size_t i = 0; i < rows_ds; ++i) {
for (size_t j = 0; j < cols_ds; ++j) {
ptr_dst[j + padding_c] = ptr_src[ds_c * j];
}
ptr_src += ds_r * src_shape[1];
ptr_dst += dst_shape[1];
}
}

namespace details {
inline ProImageData computeReciprocal(const ProImageData &dark_avg, const ProImageData &flat_avg) {
const auto &shape = dark_avg.shape();
ProImageData reciprocal{shape};
for (size_t i = 0; i < shape[0] * shape[1]; ++i) {
if (dark_avg[i] == flat_avg[i]) {
reciprocal[i] = 1.0f;
} else {
reciprocal[i] = 1.0f / (flat_avg[i] - dark_avg[i]);
}
inline ProImageData computeReciprocal(const ProImageData &dark_avg, const ProImageData &flat_avg) {
const auto &shape = dark_avg.shape();
ProImageData reciprocal{shape};
for (size_t i = 0; i < shape[0] * shape[1]; ++i) {
if (dark_avg[i] == flat_avg[i]) {
reciprocal[i] = 1.0f;
} else {
reciprocal[i] = 1.0f / (flat_avg[i] - dark_avg[i]);
}
return reciprocal;
}
return reciprocal;
}

} // namespace details

inline std::pair<ProImageData, ProImageData> computeReciprocal(
const std::vector<RawImageData> &darks, const std::vector<RawImageData> &flats) {
inline void computeReciprocal(const std::vector<RawImageData> &darks,
const std::vector<RawImageData> &flats,
ProImageData& dark_avg,
ProImageData& reciprocal,
const std::array<size_t, 2>& downsampling) {
assert(!darks.empty() || !flats.empty());

Tensor<float, 2> flat_averaged;
Tensor<float, 2> dark_averaged;
if (darks.empty()) {
auto flat_avg = math::average<ProDtype>(flats);
auto dark_avg = flat_avg * 0;
return {dark_avg, details::computeReciprocal(dark_avg, flat_avg)};
}

if (flats.empty()) {
auto dark_avg = math::average<ProDtype>(darks);
auto flat_avg = dark_avg + 1;
return {dark_avg, details::computeReciprocal(dark_avg, flat_avg)};
flat_averaged = math::average<ProDtype>(flats);
dark_averaged = flat_averaged * 0;
} else if (flats.empty()) {
dark_averaged = math::average<ProDtype>(darks);
flat_averaged = dark_averaged + 1;
} else {
dark_averaged = math::average<ProDtype>(darks);
flat_averaged = math::average<ProDtype>(flats);
}

auto dark_avg = math::average<ProDtype>(darks);
auto flat_avg = math::average<ProDtype>(flats);
return {dark_avg, details::computeReciprocal(dark_avg, flat_avg)};

auto reciprocal_orig = details::computeReciprocal(dark_averaged, flat_averaged);
details::copyToBuffer(dark_avg, dark_averaged, downsampling);
details::copyToBuffer(reciprocal, reciprocal_orig, downsampling);
}

inline void flatField(float *data,
Expand All @@ -102,10 +125,9 @@ inline std::vector<float> defaultAngles(uint32_t n, float angle_range) {
return angles;
}


template<typename T1, typename T2>
inline void copyToSinogram(T1* dst,
const T2& src,
inline void copyToSinogram(T1 *dst,
const T2 &src,
size_t chunk_idx,
size_t chunk_size,
size_t row_count,
Expand Down
28 changes: 1 addition & 27 deletions recon/include/recon/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ class Tensor {

void resize(const ShapeType& shape, T value) {
data_.resize(size(shape), value);
shape_ = shape;
}

T& operator[](size_t pos) { return data_[pos]; }
Expand Down Expand Up @@ -234,33 +235,6 @@ inline auto average(const std::vector<Tensor<T, N>>& src) {

} // namespace math

namespace details {

template<typename T>
inline void copyBuffer(T* dst, const char* src, size_t count) {
std::memcpy(dst, src, count * sizeof(T));
}

template<typename T>
inline void copyBuffer(T* dst,
const std::array<size_t, 2>& dst_shape,
const char* src,
const std::array<size_t, 2>& src_shape) {
size_t ds_r = src_shape[0] / dst_shape[0];
size_t ds_c = src_shape[1] / dst_shape[1];
for (size_t size = sizeof(T),
rstep = ds_r * size * src_shape[1],
cstep = ds_c * size, i = 0; i < dst_shape[0]; ++i) {
char* ptr = const_cast<char*>(src) + i * rstep;
for (size_t j = 0; j < dst_shape[1]; ++j) {
memcpy(dst++, ptr, size);
ptr += cstep;
}
}
}

} // details

} // namespace recastx::recon

#endif // RECON_TENSOR_H
Loading

0 comments on commit c90c946

Please sign in to comment.