Undo / Redo memory conservation strategy: Release recoverable data

starting from the objects of lowest ObjectID.
(convex hulls are recoverable as well as the indexed triangle sets
inside the TriangleMeshes or the triangle connectivity information).

Now the top most snapshot (the temp one taken before Undo jump) will
never be released.
This commit is contained in:
bubnikv 2019-07-18 11:51:06 +02:00
parent cc1338ce6a
commit cd95b52dcd
5 changed files with 162 additions and 13 deletions

View file

@ -430,6 +430,8 @@ private:
// floats of the first edge matches all six floats of the second edge.
void stl_check_facets_exact(stl_file *stl)
{
assert(stl->facet_start.size() == stl->neighbors_start.size());
stl->stats.connected_edges = 0;
stl->stats.connected_facets_1_edge = 0;
stl->stats.connected_facets_2_edge = 0;

View file

@ -526,8 +526,23 @@ private:
ModelVolume() : ObjectBase(-1), config(-1), object(nullptr) {
assert(this->id().invalid()); assert(this->config.id().invalid());
}
template<class Archive> void serialize(Archive &ar) {
ar(name, config, m_mesh, m_type, m_material_id, m_convex_hull, m_transformation, m_is_splittable);
template<class Archive> void load(Archive &ar) {
bool has_convex_hull;
ar(name, config, m_mesh, m_type, m_material_id, m_transformation, m_is_splittable, has_convex_hull);
assert(m_mesh);
if (has_convex_hull) {
cereal::load_optional(ar, m_convex_hull);
if (! m_convex_hull && ! m_mesh->empty())
// The convex hull was released from the Undo / Redo stack to conserve memory. Recalculate it.
this->calculate_convex_hull();
} else
m_convex_hull.reset();
}
template<class Archive> void save(Archive &ar) const {
bool has_convex_hull = m_convex_hull.get() != nullptr;
ar(name, config, m_mesh, m_type, m_material_id, m_transformation, m_is_splittable, has_convex_hull);
if (has_convex_hull)
cereal::save_optional(ar, m_convex_hull);
}
};
@ -747,4 +762,9 @@ void check_model_ids_equal(const Model &model1, const Model &model2);
} // namespace Slic3r
namespace cereal
{
template <class Archive> struct specialize<Archive, Slic3r::ModelVolume, cereal::specialization::member_load_save> {};
}
#endif /* slic3r_Model_hpp_ */

View file

@ -613,6 +613,34 @@ size_t TriangleMesh::memsize() const
return memsize;
}
// Release optional data from the mesh if the object is on the Undo / Redo stack only. Returns the amount of memory released.
size_t TriangleMesh::release_optional()
{
size_t memsize_released = sizeof(stl_neighbors) * this->stl.neighbors_start.size() + this->its.memsize();
// The indexed triangle set may be recalculated using the stl_generate_shared_vertices() function.
this->its.clear();
// The neighbors structure may be recalculated using the stl_check_facets_exact() function.
this->stl.neighbors_start.clear();
return memsize_released;
}
// Restore optional data possibly released by release_optional().
void TriangleMesh::restore_optional()
{
if (! this->stl.facet_start.empty()) {
// Save the old stats before calling stl_check_faces_exact, as it may modify the statistics.
stl_stats stats = this->stl.stats;
if (this->stl.neighbors_start.empty()) {
stl_reallocate(&this->stl);
stl_check_facets_exact(&this->stl);
}
if (this->its.vertices.empty())
stl_generate_shared_vertices(&this->stl, this->its);
// Restore the old statistics.
this->stl.stats = stats;
}
}
void TriangleMeshSlicer::init(const TriangleMesh *_mesh, throw_on_cancel_callback_type throw_on_cancel)
{
mesh = _mesh;

View file

@ -67,8 +67,12 @@ public:
size_t facets_count() const { return this->stl.stats.number_of_facets; }
bool empty() const { return this->facets_count() == 0; }
bool is_splittable() const;
// Estimate of the memory occupied by this structure.
// Estimate of the memory occupied by this structure, important for keeping an eye on the Undo / Redo stack allocation.
size_t memsize() const;
// Release optional data from the mesh if the object is on the Undo / Redo stack only. Returns the amount of memory released.
size_t release_optional();
// Restore optional data possibly released by release_optional().
void restore_optional();
stl_file stl;
indexed_triangle_set its;

View file

@ -25,6 +25,12 @@
#ifndef NDEBUG
// #define SLIC3R_UNDOREDO_DEBUG
#endif /* NDEBUG */
#if 0
// Stop at a fraction of the normal Undo / Redo stack size.
#define UNDO_REDO_DEBUG_LOW_MEM_FACTOR 10000
#else
#define UNDO_REDO_DEBUG_LOW_MEM_FACTOR 1
#endif
namespace Slic3r {
namespace UndoRedo {
@ -74,6 +80,9 @@ public:
// Is the object captured by this history mutable or immutable?
virtual bool is_mutable() const = 0;
virtual bool is_immutable() const = 0;
// The object is optional, it may be released if the Undo / Redo stack memory grows over the limits.
virtual bool is_optional() const { return false; }
// If it is an immutable object, return its pointer. There is a map assigning a temporary ObjectID to the immutable object pointer.
virtual const void* immutable_object_ptr() const { return nullptr; }
// If the history is empty, the ObjectHistory object could be released.
@ -85,6 +94,10 @@ public:
// Release all data after the given timestamp. For the ImmutableObjectHistory, the shared pointer is NOT released.
// Return the amount of memory released.
virtual size_t release_after_timestamp(size_t timestamp) = 0;
// Release all optional data of this history.
virtual size_t release_optional() = 0;
// Restore optional data possibly released by release_optional.
virtual void restore_optional() = 0;
// Estimated size in memory, to be used to drop least recently used snapshots.
virtual size_t memsize() const = 0;
@ -175,11 +188,13 @@ template<typename T>
class ImmutableObjectHistory : public ObjectHistory<Interval>
{
public:
ImmutableObjectHistory(std::shared_ptr<const T> shared_object) : m_shared_object(shared_object) {}
ImmutableObjectHistory(std::shared_ptr<const T> shared_object, bool optional) : m_shared_object(shared_object), m_optional(optional) {}
~ImmutableObjectHistory() override {}
bool is_mutable() const override { return false; }
bool is_immutable() const override { return true; }
bool is_optional() const override { return m_optional; }
// If it is an immutable object, return its pointer. There is a map assigning a temporary ObjectID to the immutable object pointer.
const void* immutable_object_ptr() const { return (const void*)m_shared_object.get(); }
// Estimated size in memory, to be used to drop least recently used snapshots.
@ -216,6 +231,37 @@ public:
return timestamp >= it->begin() && timestamp < it->end();
}
// Release all optional data of this history.
size_t release_optional() override {
size_t mem_released = 0;
if (m_optional) {
bool released = false;
if (this->is_serialized()) {
mem_released += m_serialized.size();
m_serialized.clear();
released = true;
} else if (m_shared_object.use_count() == 1) {
mem_released += m_shared_object->memsize();
m_shared_object.reset();
released = true;
}
if (released) {
mem_released += m_history.size() * sizeof(Interval);
m_history.clear();
}
} else if (m_shared_object.use_count() == 1) {
// The object is in memory, but it is not shared with the scene. Let the object decide whether there is any optional data to release.
const_cast<T*>(m_shared_object.get())->release_optional();
}
return mem_released;
}
// Restore optional data possibly released by this->release_optional().
void restore_optional() override {
if (m_shared_object.use_count() == 1)
const_cast<T*>(m_shared_object.get())->restore_optional();
}
bool is_serialized() const { return m_shared_object.get() == nullptr; }
const std::string& serialized_data() const { return m_serialized; }
std::shared_ptr<const T>& shared_ptr(StackImpl &stack);
@ -240,6 +286,8 @@ private:
// Either the source object is held by a shared pointer and the m_serialized field is empty,
// or the shared pointer is null and the object is being serialized into m_serialized.
std::shared_ptr<const T> m_shared_object;
// If this object is optional, then it may be deleted from the Undo / Redo stack and recalculated from other data (for example mesh convex hull).
bool m_optional;
std::string m_serialized;
};
@ -375,6 +423,11 @@ public:
return std::string(it->data(), it->data() + it->size());
}
// Currently all mutable snapshots are mandatory.
size_t release_optional() override { return 0; }
// Currently there is no way to release optional data from the mutable objects.
void restore_optional() override {}
#ifdef SLIC3R_UNDOREDO_DEBUG
std::string format() override {
std::string out = typeid(T).name();
@ -430,7 +483,7 @@ class StackImpl
public:
// Stack needs to be initialized. An empty stack is not valid, there must be a "New Project" status stored at the beginning.
// Initially enable Undo / Redo stack to occupy maximum 10% of the total system physical memory.
StackImpl() : m_memory_limit(std::min(Slic3r::total_physical_memory() / 10, size_t(1 * 16384 * 65536))), m_active_snapshot_time(0), m_current_time(0) {}
StackImpl() : m_memory_limit(std::min(Slic3r::total_physical_memory() / 10, size_t(1 * 16384 * 65536 / UNDO_REDO_DEBUG_LOW_MEM_FACTOR))), m_active_snapshot_time(0), m_current_time(0) {}
void set_memory_limit(size_t memsize) { m_memory_limit = memsize; }
@ -461,9 +514,9 @@ public:
//protected:
template<typename T, typename T_AS> ObjectID save_mutable_object(const T &object);
template<typename T> ObjectID save_immutable_object(std::shared_ptr<const T> &object);
template<typename T> ObjectID save_immutable_object(std::shared_ptr<const T> &object, bool optional);
template<typename T> T* load_mutable_object(const Slic3r::ObjectID id);
template<typename T> std::shared_ptr<const T> load_immutable_object(const Slic3r::ObjectID id);
template<typename T> std::shared_ptr<const T> load_immutable_object(const Slic3r::ObjectID id, bool optional);
template<typename T, typename T_AS> void load_mutable_object(const Slic3r::ObjectID id, T &target);
#ifdef SLIC3R_UNDOREDO_DEBUG
@ -620,7 +673,11 @@ namespace cereal
// store just the ObjectID to this stream.
template <class T> void save(BinaryOutputArchive &ar, const std::shared_ptr<const T> &ptr)
{
ar(cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar).save_immutable_object<T>(const_cast<std::shared_ptr<const T>&>(ptr)));
ar(cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar).save_immutable_object<T>(const_cast<std::shared_ptr<const T>&>(ptr), false));
}
template <class T> void save_optional(BinaryOutputArchive &ar, const std::shared_ptr<const T> &ptr)
{
ar(cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar).save_immutable_object<T>(const_cast<std::shared_ptr<const T>&>(ptr), true));
}
// Load ObjectBase derived class from the Undo / Redo stack as a separate object
@ -630,7 +687,14 @@ namespace cereal
Slic3r::UndoRedo::StackImpl &stack = cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar);
size_t id;
ar(id);
ptr = stack.load_immutable_object<T>(Slic3r::ObjectID(id));
ptr = stack.load_immutable_object<T>(Slic3r::ObjectID(id), false);
}
template <class T> void load_optional(BinaryInputArchive &ar, std::shared_ptr<const T> &ptr)
{
Slic3r::UndoRedo::StackImpl &stack = cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar);
size_t id;
ar(id);
ptr = stack.load_immutable_object<T>(Slic3r::ObjectID(id), true);
}
}
@ -675,14 +739,16 @@ template<typename T, typename T_AS> ObjectID StackImpl::save_mutable_object(cons
return object.id();
}
template<typename T> ObjectID StackImpl::save_immutable_object(std::shared_ptr<const T> &object)
template<typename T> ObjectID StackImpl::save_immutable_object(std::shared_ptr<const T> &object, bool optional)
{
// First allocate a temporary ObjectID for this pointer.
ObjectID object_id = this->immutable_object_id(object);
// and find or allocate a history stack for the ObjectID associated to this shared_ptr.
auto it_object_history = m_objects.find(object_id);
if (it_object_history == m_objects.end())
it_object_history = m_objects.emplace_hint(it_object_history, object_id, std::unique_ptr<ImmutableObjectHistory<T>>(new ImmutableObjectHistory<T>(object)));
it_object_history = m_objects.emplace_hint(it_object_history, object_id, std::unique_ptr<ImmutableObjectHistory<T>>(new ImmutableObjectHistory<T>(object, optional)));
else
assert(it_object_history->second.get()->is_optional() == optional);
// Then save the interval.
static_cast<ImmutableObjectHistory<T>*>(it_object_history->second.get())->save(m_active_snapshot_time, m_current_time);
return object_id;
@ -695,13 +761,16 @@ template<typename T> T* StackImpl::load_mutable_object(const Slic3r::ObjectID id
return target;
}
template<typename T> std::shared_ptr<const T> StackImpl::load_immutable_object(const Slic3r::ObjectID id)
template<typename T> std::shared_ptr<const T> StackImpl::load_immutable_object(const Slic3r::ObjectID id, bool optional)
{
// First find a history stack for the ObjectID of this object instance.
auto it_object_history = m_objects.find(id);
assert(it_object_history != m_objects.end());
assert(optional || it_object_history != m_objects.end());
if (it_object_history == m_objects.end())
return std::shared_ptr<const T>();
auto *object_history = static_cast<ImmutableObjectHistory<T>*>(it_object_history->second.get());
assert(object_history->has_snapshot(m_active_snapshot_time));
object_history->restore_optional();
return object_history->shared_ptr(*this);
}
@ -869,12 +938,32 @@ void StackImpl::release_least_recently_used()
#ifdef SLIC3R_UNDOREDO_DEBUG
bool released = false;
#endif
// First try to release the optional immutable data (for example the convex hulls),
// or the shared vertices of triangle meshes.
for (auto it = m_objects.begin(); current_memsize > m_memory_limit && it != m_objects.end();) {
const void *ptr = it->second->immutable_object_ptr();
size_t mem_released = it->second->release_optional();
if (it->second->empty()) {
if (ptr != nullptr)
// Release the immutable object from the ptr to ObjectID map.
m_shared_ptr_to_object_id.erase(ptr);
mem_released += it->second->memsize();
it = m_objects.erase(it);
} else
++ it;
assert(current_memsize >= mem_released);
if (current_memsize >= mem_released)
current_memsize -= mem_released;
else
current_memsize = 0;
}
while (current_memsize > m_memory_limit && m_snapshots.size() >= 3) {
// From which side to remove a snapshot?
assert(m_snapshots.front().timestamp < m_active_snapshot_time);
size_t mem_released = 0;
if (m_snapshots[1].timestamp == m_active_snapshot_time) {
// Remove the last snapshot.
#if 0
for (auto it = m_objects.begin(); it != m_objects.end();) {
mem_released += it->second->release_after_timestamp(m_snapshots.back().timestamp);
if (it->second->empty()) {
@ -888,6 +977,12 @@ void StackImpl::release_least_recently_used()
}
m_snapshots.pop_back();
m_snapshots.back().name = topmost_snapshot_name;
#else
// Rather don't release the last snapshot as it will be very confusing to the user
// as of why he cannot jump to the top most state. The Undo / Redo stack maximum size
// should be set low enough to accomodate for the top most snapshot.
break;
#endif
} else {
// Remove the first snapshot.
for (auto it = m_objects.begin(); it != m_objects.end();) {