Initial port of the new ensure vertical thickness algorithm from PrusaSlicer (#2382)
* Initial port of the new ensure vertical thickness algorithm from PrusaSlicer. Based on prusa3d/PrusaSlicer@1a0d8f5130 * Remove code related to "Detect narrow internal solid infill" as it's handled by the new ensuring code * Support different internal solid infill pattern * Ignore removed options --------- Co-authored-by: Pavel Mikuš <pavel.mikus.mail@seznam.cz> Co-authored-by: SoftFever <softfeverever@gmail.com>
This commit is contained in:
parent
ef831ab8b1
commit
075a08bca8
37 changed files with 3792 additions and 489 deletions
7
src/ankerl/README.txt
Normal file
7
src/ankerl/README.txt
Normal file
|
@ -0,0 +1,7 @@
|
|||
THIS DIRECTORY CONTAINS PIECES OF THE
|
||||
ankerl::unordered_dense::{map, set}
|
||||
https://github.com/martinus/unordered_dense
|
||||
unordered_dense 3.1.1 10782bfc651c2bb75b11bf90491f50da122e5432
|
||||
SOURCE DISTRIBUTION.
|
||||
|
||||
THIS IS NOT THE COMPLETE unordered_dense DISTRIBUTION. ONLY FILES NEEDED FOR COMPILING PRUSASLICER WERE PUT INTO THE PRUSASLICER SOURCE DISTRIBUTION.
|
1584
src/ankerl/unordered_dense.h
Normal file
1584
src/ankerl/unordered_dense.h
Normal file
File diff suppressed because it is too large
Load diff
|
@ -4103,19 +4103,40 @@ void AddPolyNodeToPaths(const PolyNode& polynode, NodeType nodetype, Paths& path
|
|||
for (int i = 0; i < polynode.ChildCount(); ++i)
|
||||
AddPolyNodeToPaths(*polynode.Childs[i], nodetype, paths);
|
||||
}
|
||||
|
||||
void AddPolyNodeToPaths(PolyNode&& polynode, NodeType nodetype, Paths& paths)
|
||||
{
|
||||
bool match = true;
|
||||
if (nodetype == ntClosed) match = !polynode.IsOpen();
|
||||
else if (nodetype == ntOpen) return;
|
||||
|
||||
if (!polynode.Contour.empty() && match)
|
||||
paths.emplace_back(std::move(polynode.Contour));
|
||||
for (int i = 0; i < polynode.ChildCount(); ++i)
|
||||
AddPolyNodeToPaths(std::move(*polynode.Childs[i]), nodetype, paths);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
void PolyTreeToPaths(const PolyTree& polytree, Paths& paths)
|
||||
{
|
||||
paths.resize(0);
|
||||
paths.clear();
|
||||
paths.reserve(polytree.Total());
|
||||
AddPolyNodeToPaths(polytree, ntAny, paths);
|
||||
}
|
||||
|
||||
void PolyTreeToPaths(PolyTree&& polytree, Paths& paths)
|
||||
{
|
||||
paths.clear();
|
||||
paths.reserve(polytree.Total());
|
||||
AddPolyNodeToPaths(std::move(polytree), ntAny, paths);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
void ClosedPathsFromPolyTree(const PolyTree& polytree, Paths& paths)
|
||||
{
|
||||
paths.resize(0);
|
||||
paths.clear();
|
||||
paths.reserve(polytree.Total());
|
||||
AddPolyNodeToPaths(polytree, ntClosed, paths);
|
||||
}
|
||||
|
@ -4123,7 +4144,7 @@ void ClosedPathsFromPolyTree(const PolyTree& polytree, Paths& paths)
|
|||
|
||||
void OpenPathsFromPolyTree(PolyTree& polytree, Paths& paths)
|
||||
{
|
||||
paths.resize(0);
|
||||
paths.clear();
|
||||
paths.reserve(polytree.Total());
|
||||
//Open paths are top level only, so ...
|
||||
for (int i = 0; i < polytree.ChildCount(); ++i)
|
||||
|
|
|
@ -204,6 +204,7 @@ void MinkowskiSum(const Path& pattern, const Paths& paths, Paths& solution, bool
|
|||
void MinkowskiDiff(const Path& poly1, const Path& poly2, Paths& solution);
|
||||
|
||||
void PolyTreeToPaths(const PolyTree& polytree, Paths& paths);
|
||||
void PolyTreeToPaths(PolyTree&& polytree, Paths& paths);
|
||||
void ClosedPathsFromPolyTree(const PolyTree& polytree, Paths& paths);
|
||||
void OpenPathsFromPolyTree(PolyTree& polytree, Paths& paths);
|
||||
|
||||
|
|
132
src/libslic3r/Algorithm/PathSorting.hpp
Normal file
132
src/libslic3r/Algorithm/PathSorting.hpp
Normal file
|
@ -0,0 +1,132 @@
|
|||
///|/ Copyright (c) Prusa Research 2023 Pavel Mikuš @Godrak
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#ifndef SRC_LIBSLIC3R_PATH_SORTING_HPP_
|
||||
#define SRC_LIBSLIC3R_PATH_SORTING_HPP_
|
||||
|
||||
#include "AABBTreeLines.hpp"
|
||||
#include "BoundingBox.hpp"
|
||||
#include "Line.hpp"
|
||||
#include "ankerl/unordered_dense.h"
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <libslic3r/Point.hpp>
|
||||
#include <libslic3r/Polygon.hpp>
|
||||
#include <libslic3r/ExPolygon.hpp>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace Algorithm {
|
||||
|
||||
//Sorts the paths such that all paths between begin and last_seed are printed first, in some order. The rest of the paths is sorted
|
||||
// such that the paths that are touching some of the already printed are printed first, sorted secondary by the distance to the last point of the last
|
||||
// printed path.
|
||||
// begin, end, and last_seed are random access iterators. touch_limit_distance is used to check if the paths are touching - if any part of the path gets this close
|
||||
// to the second, then they touch.
|
||||
// convert_to_lines is a lambda that should accept the path as argument and return it as Lines vector, in correct order.
|
||||
template<typename RandomAccessIterator, typename ToLines>
|
||||
void sort_paths(RandomAccessIterator begin, RandomAccessIterator end, Point start, double touch_limit_distance, ToLines convert_to_lines)
|
||||
{
|
||||
size_t paths_count = std::distance(begin, end);
|
||||
if (paths_count <= 1)
|
||||
return;
|
||||
|
||||
auto paths_touch = [touch_limit_distance](const AABBTreeLines::LinesDistancer<Line> &left,
|
||||
const AABBTreeLines::LinesDistancer<Line> &right) {
|
||||
for (const Line &l : left.get_lines()) {
|
||||
if (right.distance_from_lines<false>(l.a) < touch_limit_distance) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (right.distance_from_lines<false>(left.get_lines().back().b) < touch_limit_distance) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (const Line &l : right.get_lines()) {
|
||||
if (left.distance_from_lines<false>(l.a) < touch_limit_distance) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (left.distance_from_lines<false>(right.get_lines().back().b) < touch_limit_distance) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
std::vector<AABBTreeLines::LinesDistancer<Line>> distancers(paths_count);
|
||||
for (size_t path_idx = 0; path_idx < paths_count; path_idx++) {
|
||||
distancers[path_idx] = AABBTreeLines::LinesDistancer<Line>{convert_to_lines(*std::next(begin, path_idx))};
|
||||
}
|
||||
|
||||
std::vector<std::unordered_set<size_t>> dependencies(paths_count);
|
||||
for (size_t path_idx = 0; path_idx < paths_count; path_idx++) {
|
||||
for (size_t next_path_idx = path_idx + 1; next_path_idx < paths_count; next_path_idx++) {
|
||||
if (paths_touch(distancers[path_idx], distancers[next_path_idx])) {
|
||||
dependencies[next_path_idx].insert(path_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Point current_point = start;
|
||||
|
||||
std::vector<std::pair<size_t, bool>> correct_order_and_direction(paths_count);
|
||||
size_t unsorted_idx = 0;
|
||||
size_t null_idx = size_t(-1);
|
||||
size_t next_idx = null_idx;
|
||||
bool reverse = false;
|
||||
while (unsorted_idx < paths_count) {
|
||||
next_idx = null_idx;
|
||||
double lines_dist = std::numeric_limits<double>::max();
|
||||
for (size_t path_idx = 0; path_idx < paths_count; path_idx++) {
|
||||
if (!dependencies[path_idx].empty())
|
||||
continue;
|
||||
|
||||
double ldist = distancers[path_idx].distance_from_lines<false>(current_point);
|
||||
if (ldist < lines_dist) {
|
||||
const auto &lines = distancers[path_idx].get_lines();
|
||||
double dist_a = (lines.front().a - current_point).cast<double>().squaredNorm();
|
||||
double dist_b = (lines.back().b - current_point).cast<double>().squaredNorm();
|
||||
next_idx = path_idx;
|
||||
reverse = dist_b < dist_a;
|
||||
lines_dist = ldist;
|
||||
}
|
||||
}
|
||||
|
||||
// we have valid next_idx, sort it, update dependencies, update current point
|
||||
correct_order_and_direction[next_idx] = {unsorted_idx, reverse};
|
||||
unsorted_idx++;
|
||||
current_point = reverse ? distancers[next_idx].get_lines().front().a : distancers[next_idx].get_lines().back().b;
|
||||
|
||||
dependencies[next_idx].insert(null_idx); // prevent it from being selected again
|
||||
for (size_t path_idx = 0; path_idx < paths_count; path_idx++) {
|
||||
dependencies[path_idx].erase(next_idx);
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t path_idx = 0; path_idx < paths_count; path_idx++) {
|
||||
if (correct_order_and_direction[path_idx].second) {
|
||||
std::next(begin, path_idx)->reverse();
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < correct_order_and_direction.size() - 1; i++) {
|
||||
bool swapped = false;
|
||||
for (size_t j = 0; j < correct_order_and_direction.size() - i - 1; j++) {
|
||||
if (correct_order_and_direction[j].first > correct_order_and_direction[j + 1].first) {
|
||||
std::swap(correct_order_and_direction[j], correct_order_and_direction[j + 1]);
|
||||
std::iter_swap(std::next(begin, j), std::next(begin, j + 1));
|
||||
swapped = true;
|
||||
}
|
||||
}
|
||||
if (swapped == false) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::Algorithm
|
||||
|
||||
#endif /*SRC_LIBSLIC3R_PATH_SORTING_HPP_*/
|
548
src/libslic3r/Algorithm/RegionExpansion.cpp
Normal file
548
src/libslic3r/Algorithm/RegionExpansion.cpp
Normal file
|
@ -0,0 +1,548 @@
|
|||
///|/ Copyright (c) Prusa Research 2022 - 2023 Vojtěch Bubník @bubnikv
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include "RegionExpansion.hpp"
|
||||
|
||||
#include <libslic3r/AABBTreeIndirect.hpp>
|
||||
#include <libslic3r/ClipperZUtils.hpp>
|
||||
#include <libslic3r/ClipperUtils.hpp>
|
||||
#include <libslic3r/Utils.hpp>
|
||||
|
||||
#include <numeric>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace Algorithm {
|
||||
|
||||
// Calculating radius discretization according to ClipperLib offsetter code, see void ClipperOffset::DoOffset(double delta)
|
||||
inline double clipper_round_offset_error(double offset, double arc_tolerance)
|
||||
{
|
||||
static constexpr const double def_arc_tolerance = 0.25;
|
||||
const double y =
|
||||
arc_tolerance <= 0 ?
|
||||
def_arc_tolerance :
|
||||
arc_tolerance > offset * def_arc_tolerance ?
|
||||
offset * def_arc_tolerance :
|
||||
arc_tolerance;
|
||||
double steps = std::min(M_PI / std::acos(1. - y / offset), offset * M_PI);
|
||||
return offset * (1. - cos(M_PI / steps));
|
||||
}
|
||||
|
||||
RegionExpansionParameters RegionExpansionParameters::build(
|
||||
// Scaled expansion value
|
||||
float full_expansion,
|
||||
// Expand by waves of expansion_step size (expansion_step is scaled).
|
||||
float expansion_step,
|
||||
// Don't take more than max_nr_steps for small expansion_step.
|
||||
size_t max_nr_expansion_steps)
|
||||
{
|
||||
assert(full_expansion > 0);
|
||||
assert(expansion_step > 0);
|
||||
assert(max_nr_expansion_steps > 0);
|
||||
|
||||
RegionExpansionParameters out;
|
||||
// Initial expansion of src to make the source regions intersect with boundary regions just a bit.
|
||||
// The expansion should not be too tiny, but also small enough, so the following expansion will
|
||||
// compensate for tiny_expansion and bring the wave back to the boundary without producing
|
||||
// ugly cusps where it touches the boundary.
|
||||
out.tiny_expansion = std::min(0.25f * full_expansion, scaled<float>(0.05f));
|
||||
size_t nsteps = size_t(ceil((full_expansion - out.tiny_expansion) / expansion_step));
|
||||
if (max_nr_expansion_steps > 0)
|
||||
nsteps = std::min(nsteps, max_nr_expansion_steps);
|
||||
assert(nsteps > 0);
|
||||
out.initial_step = (full_expansion - out.tiny_expansion) / nsteps;
|
||||
if (nsteps > 1 && 0.25 * out.initial_step < out.tiny_expansion) {
|
||||
// Decrease the step size by lowering number of steps.
|
||||
nsteps = std::max<size_t>(1, (floor((full_expansion - out.tiny_expansion) / (4. * out.tiny_expansion))));
|
||||
out.initial_step = (full_expansion - out.tiny_expansion) / nsteps;
|
||||
}
|
||||
if (0.25 * out.initial_step < out.tiny_expansion || nsteps == 1) {
|
||||
out.tiny_expansion = 0.2f * full_expansion;
|
||||
out.initial_step = 0.8f * full_expansion;
|
||||
}
|
||||
out.other_step = out.initial_step;
|
||||
out.num_other_steps = nsteps - 1;
|
||||
|
||||
// Accuracy of the offsetter for wave propagation.
|
||||
out.arc_tolerance = scaled<double>(0.1);
|
||||
out.shortest_edge_length = out.initial_step * ClipperOffsetShortestEdgeFactor;
|
||||
|
||||
// Maximum inflation of seed contours over the boundary. Used to trim boundary to speed up
|
||||
// clipping during wave propagation. Needs to be in sync with the offsetter accuracy.
|
||||
// Clipper positive round offset should rather offset less than more.
|
||||
// Still a little bit of additional offset was added.
|
||||
out.max_inflation = (out.tiny_expansion + nsteps * out.initial_step) * 1.1;
|
||||
// (clipper_round_offset_error(out.tiny_expansion, co.ArcTolerance) + nsteps * clipper_round_offset_error(out.initial_step, co.ArcTolerance) * 1.5; // Account for uncertainty
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
// similar to expolygons_to_zpaths(), but each contour is expanded before converted to zpath.
|
||||
// The expanded contours are then opened (the first point is repeated at the end).
|
||||
static ClipperLib_Z::Paths expolygons_to_zpaths_expanded_opened(
|
||||
const ExPolygons &src, const float expansion, coord_t &base_idx)
|
||||
{
|
||||
ClipperLib_Z::Paths out;
|
||||
out.reserve(2 * std::accumulate(src.begin(), src.end(), size_t(0),
|
||||
[](const size_t acc, const ExPolygon &expoly) { return acc + expoly.num_contours(); }));
|
||||
ClipperLib::ClipperOffset offsetter;
|
||||
offsetter.ShortestEdgeLength = expansion * ClipperOffsetShortestEdgeFactor;
|
||||
ClipperLib::Paths expansion_cache;
|
||||
for (const ExPolygon &expoly : src) {
|
||||
for (size_t icontour = 0; icontour < expoly.num_contours(); ++ icontour) {
|
||||
// Execute reorients the contours so that the outer most contour has a positive area. Thus the output
|
||||
// contours will be CCW oriented even though the input paths are CW oriented.
|
||||
// Offset is applied after contour reorientation, thus the signum of the offset value is reversed.
|
||||
offsetter.Clear();
|
||||
offsetter.AddPath(expoly.contour_or_hole(icontour).points, ClipperLib::jtSquare, ClipperLib::etClosedPolygon);
|
||||
expansion_cache.clear();
|
||||
offsetter.Execute(expansion_cache, icontour == 0 ? expansion : -expansion);
|
||||
append(out, ClipperZUtils::to_zpaths<true>(expansion_cache, base_idx));
|
||||
}
|
||||
++ base_idx;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Paths were created by splitting closed polygons into open paths and then by clipping them.
|
||||
// Thus some pieces of the clipped polygons may now become split at the ends of the source polygons.
|
||||
// Those ends are sorted lexicographically in "splits".
|
||||
// Reconnect those split pieces.
|
||||
static inline void merge_splits(ClipperLib_Z::Paths &paths, std::vector<std::pair<ClipperLib_Z::IntPoint, int>> &splits)
|
||||
{
|
||||
for (auto it_path = paths.begin(); it_path != paths.end(); ) {
|
||||
ClipperLib_Z::Path &path = *it_path;
|
||||
assert(path.size() >= 2);
|
||||
bool merged = false;
|
||||
if (path.size() >= 2) {
|
||||
const ClipperLib_Z::IntPoint &front = path.front();
|
||||
const ClipperLib_Z::IntPoint &back = path.back();
|
||||
// The path before clipping was supposed to cross the clipping boundary or be fully out of it.
|
||||
// Thus the clipped contour is supposed to become open, with one exception: The anchor expands into a closed hole.
|
||||
if (front.x() != back.x() || front.y() != back.y()) {
|
||||
// Look up the ends in "splits", possibly join the contours.
|
||||
// "splits" maps into the other piece connected to the same end point.
|
||||
auto find_end = [&splits](const ClipperLib_Z::IntPoint &pt) -> std::pair<ClipperLib_Z::IntPoint, int>* {
|
||||
auto it = std::lower_bound(splits.begin(), splits.end(), pt,
|
||||
[](const auto &l, const auto &r){ return ClipperZUtils::zpoint_lower(l.first, r); });
|
||||
return it != splits.end() && it->first == pt ? &(*it) : nullptr;
|
||||
};
|
||||
auto *end = find_end(front);
|
||||
bool end_front = true;
|
||||
if (! end) {
|
||||
end_front = false;
|
||||
end = find_end(back);
|
||||
}
|
||||
if (end) {
|
||||
// This segment ends at a split point of the source closed contour before clipping.
|
||||
if (end->second == -1) {
|
||||
// Open end was found, not matched yet.
|
||||
end->second = int(it_path - paths.begin());
|
||||
} else {
|
||||
// Open end was found and matched with end->second
|
||||
ClipperLib_Z::Path &other_path = paths[end->second];
|
||||
polylines_merge(other_path, other_path.front() == end->first, std::move(path), end_front);
|
||||
if (std::next(it_path) == paths.end()) {
|
||||
paths.pop_back();
|
||||
break;
|
||||
}
|
||||
path = std::move(paths.back());
|
||||
paths.pop_back();
|
||||
merged = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (! merged)
|
||||
++ it_path;
|
||||
}
|
||||
}
|
||||
|
||||
using AABBTreeBBoxes = AABBTreeIndirect::Tree<2, coord_t>;
|
||||
|
||||
static AABBTreeBBoxes build_aabb_tree_over_expolygons(const ExPolygons &expolygons)
|
||||
{
|
||||
// Calculate bounding boxes of internal slices.
|
||||
std::vector<AABBTreeIndirect::BoundingBoxWrapper> bboxes;
|
||||
bboxes.reserve(expolygons.size());
|
||||
for (size_t i = 0; i < expolygons.size(); ++ i)
|
||||
bboxes.emplace_back(i, get_extents(expolygons[i].contour));
|
||||
// Build AABB tree over bounding boxes of boundary expolygons.
|
||||
AABBTreeBBoxes out;
|
||||
out.build_modify_input(bboxes);
|
||||
return out;
|
||||
}
|
||||
|
||||
static int sample_in_expolygons(
|
||||
// AABB tree over boundary expolygons
|
||||
const AABBTreeBBoxes &aabb_tree,
|
||||
const ExPolygons &expolygons,
|
||||
const Point &sample)
|
||||
{
|
||||
int out = -1;
|
||||
AABBTreeIndirect::traverse(aabb_tree,
|
||||
[&sample](const AABBTreeBBoxes::Node &node) {
|
||||
return node.bbox.contains(sample);
|
||||
},
|
||||
[&expolygons, &sample, &out](const AABBTreeBBoxes::Node &node) {
|
||||
assert(node.is_leaf());
|
||||
assert(node.is_valid());
|
||||
if (expolygons[node.idx].contains(sample)) {
|
||||
out = int(node.idx);
|
||||
// Stop traversal.
|
||||
return false;
|
||||
}
|
||||
// Continue traversal.
|
||||
return true;
|
||||
});
|
||||
return out;
|
||||
}
|
||||
|
||||
std::vector<WaveSeed> wave_seeds(
|
||||
// Source regions that are supposed to touch the boundary.
|
||||
const ExPolygons &src,
|
||||
// Boundaries of source regions touching the "boundary" regions will be expanded into the "boundary" region.
|
||||
const ExPolygons &boundary,
|
||||
// Initial expansion of src to make the source regions intersect with boundary regions just a bit.
|
||||
float tiny_expansion,
|
||||
// Sort output by boundary ID and source ID.
|
||||
bool sorted)
|
||||
{
|
||||
assert(tiny_expansion > 0);
|
||||
|
||||
if (src.empty() || boundary.empty())
|
||||
return {};
|
||||
|
||||
using Intersection = ClipperZUtils::ClipperZIntersectionVisitor::Intersection;
|
||||
using Intersections = ClipperZUtils::ClipperZIntersectionVisitor::Intersections;
|
||||
|
||||
ClipperLib_Z::Paths segments;
|
||||
Intersections intersections;
|
||||
|
||||
coord_t idx_boundary_begin = 1;
|
||||
coord_t idx_boundary_end = idx_boundary_begin;
|
||||
coord_t idx_src_end;
|
||||
|
||||
{
|
||||
ClipperLib_Z::Clipper zclipper;
|
||||
ClipperZUtils::ClipperZIntersectionVisitor visitor(intersections);
|
||||
zclipper.ZFillFunction(visitor.clipper_callback());
|
||||
// as closed contours
|
||||
zclipper.AddPaths(ClipperZUtils::expolygons_to_zpaths(boundary, idx_boundary_end), ClipperLib_Z::ptClip, true);
|
||||
// as open contours
|
||||
std::vector<std::pair<ClipperLib_Z::IntPoint, int>> zsrc_splits;
|
||||
{
|
||||
idx_src_end = idx_boundary_end;
|
||||
ClipperLib_Z::Paths zsrc = expolygons_to_zpaths_expanded_opened(src, tiny_expansion, idx_src_end);
|
||||
zclipper.AddPaths(zsrc, ClipperLib_Z::ptSubject, false);
|
||||
zsrc_splits.reserve(zsrc.size());
|
||||
for (const ClipperLib_Z::Path &path : zsrc) {
|
||||
assert(path.size() >= 2);
|
||||
assert(path.front() == path.back());
|
||||
zsrc_splits.emplace_back(path.front(), -1);
|
||||
}
|
||||
std::sort(zsrc_splits.begin(), zsrc_splits.end(), [](const auto &l, const auto &r){ return ClipperZUtils::zpoint_lower(l.first, r.first); });
|
||||
}
|
||||
ClipperLib_Z::PolyTree polytree;
|
||||
zclipper.Execute(ClipperLib_Z::ctIntersection, polytree, ClipperLib_Z::pftNonZero, ClipperLib_Z::pftNonZero);
|
||||
ClipperLib_Z::PolyTreeToPaths(std::move(polytree), segments);
|
||||
merge_splits(segments, zsrc_splits);
|
||||
}
|
||||
|
||||
// AABBTree over bounding boxes of boundaries.
|
||||
// Only built if necessary, that is if any of the seed contours is closed, thus there is no intersection point
|
||||
// with the boundary and all Z coordinates of the closed contour point to the source contour.
|
||||
AABBTreeBBoxes aabb_tree;
|
||||
|
||||
// Sort paths into their respective islands.
|
||||
// Each src x boundary will be processed (wave expanded) independently.
|
||||
// Multiple pieces of a single src may intersect the same boundary.
|
||||
WaveSeeds out;
|
||||
out.reserve(segments.size());
|
||||
int iseed = 0;
|
||||
for (const ClipperLib_Z::Path &path : segments) {
|
||||
assert(path.size() >= 2);
|
||||
const ClipperLib_Z::IntPoint &front = path.front();
|
||||
const ClipperLib_Z::IntPoint &back = path.back();
|
||||
// Both ends of a seed segment are supposed to be inside a single boundary expolygon.
|
||||
// Thus as long as the seed contour is not closed, it should be open at a boundary point.
|
||||
assert((front == back && front.z() >= idx_boundary_end && front.z() < idx_src_end) ||
|
||||
//(front.z() < 0 && back.z() < 0));
|
||||
// Hope that at least one end of an open polyline is clipped by the boundary, thus an intersection point is created.
|
||||
(front.z() < 0 || back.z() < 0));
|
||||
const Intersection *intersection = nullptr;
|
||||
auto intersection_point_valid = [idx_boundary_end, idx_src_end](const Intersection &is) {
|
||||
return is.first >= 1 && is.first < idx_boundary_end &&
|
||||
is.second >= idx_boundary_end && is.second < idx_src_end;
|
||||
};
|
||||
if (front.z() < 0) {
|
||||
const Intersection &is = intersections[- front.z() - 1];
|
||||
assert(intersection_point_valid(is));
|
||||
if (intersection_point_valid(is))
|
||||
intersection = &is;
|
||||
}
|
||||
if (! intersection && back.z() < 0) {
|
||||
const Intersection &is = intersections[- back.z() - 1];
|
||||
assert(intersection_point_valid(is));
|
||||
if (intersection_point_valid(is))
|
||||
intersection = &is;
|
||||
}
|
||||
if (intersection) {
|
||||
// The path intersects the boundary contour at least at one side.
|
||||
out.push_back({ uint32_t(intersection->second - idx_boundary_end), uint32_t(intersection->first - 1), ClipperZUtils::from_zpath(path) });
|
||||
} else {
|
||||
// This should be a closed contour.
|
||||
assert(front == back && front.z() >= idx_boundary_end && front.z() < idx_src_end);
|
||||
// Find a source boundary expolygon of one sample of this closed path.
|
||||
if (aabb_tree.empty())
|
||||
aabb_tree = build_aabb_tree_over_expolygons(boundary);
|
||||
int boundary_id = sample_in_expolygons(aabb_tree, boundary, Point(front.x(), front.y()));
|
||||
// Boundary that contains the sample point was found.
|
||||
assert(boundary_id >= 0);
|
||||
if (boundary_id >= 0)
|
||||
out.push_back({ uint32_t(front.z() - idx_boundary_end), uint32_t(boundary_id), ClipperZUtils::from_zpath(path) });
|
||||
}
|
||||
++ iseed;
|
||||
}
|
||||
|
||||
if (sorted)
|
||||
// Sort the seeds by their intersection boundary and source contour.
|
||||
std::sort(out.begin(), out.end(), lower_by_boundary_and_src);
|
||||
return out;
|
||||
}
|
||||
|
||||
static ClipperLib::Paths wavefront_initial(ClipperLib::ClipperOffset &co, const ClipperLib::Paths &polylines, float offset)
|
||||
{
|
||||
ClipperLib::Paths out;
|
||||
out.reserve(polylines.size());
|
||||
ClipperLib::Paths out_this;
|
||||
for (const ClipperLib::Path &path : polylines) {
|
||||
assert(path.size() >= 2);
|
||||
co.Clear();
|
||||
co.AddPath(path, jtRound, path.front() == path.back() ? ClipperLib::etClosedLine : ClipperLib::etOpenRound);
|
||||
co.Execute(out_this, offset);
|
||||
append(out, std::move(out_this));
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Input polygons may consist of multiple expolygons, even nested expolygons.
|
||||
// After inflation some polygons may thus overlap, however the overlap is being resolved during the successive
|
||||
// clipping operation, thus it is not being done here.
|
||||
static ClipperLib::Paths wavefront_step(ClipperLib::ClipperOffset &co, const ClipperLib::Paths &polygons, float offset)
|
||||
{
|
||||
ClipperLib::Paths out;
|
||||
out.reserve(polygons.size());
|
||||
ClipperLib::Paths out_this;
|
||||
for (const ClipperLib::Path &polygon : polygons) {
|
||||
co.Clear();
|
||||
// Execute reorients the contours so that the outer most contour has a positive area. Thus the output
|
||||
// contours will be CCW oriented even though the input paths are CW oriented.
|
||||
// Offset is applied after contour reorientation, thus the signum of the offset value is reversed.
|
||||
co.AddPath(polygon, jtRound, ClipperLib::etClosedPolygon);
|
||||
bool ccw = ClipperLib::Orientation(polygon);
|
||||
co.Execute(out_this, ccw ? offset : - offset);
|
||||
if (! ccw) {
|
||||
// Reverse the resulting contours.
|
||||
for (ClipperLib::Path &path : out_this)
|
||||
std::reverse(path.begin(), path.end());
|
||||
}
|
||||
append(out, std::move(out_this));
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
static ClipperLib::Paths wavefront_clip(const ClipperLib::Paths &wavefront, const Polygons &clipping)
|
||||
{
|
||||
ClipperLib::Clipper clipper;
|
||||
clipper.AddPaths(wavefront, ClipperLib::ptSubject, true);
|
||||
clipper.AddPaths(ClipperUtils::PolygonsProvider(clipping), ClipperLib::ptClip, true);
|
||||
ClipperLib::Paths out;
|
||||
clipper.Execute(ClipperLib::ctIntersection, out, ClipperLib::pftPositive, ClipperLib::pftPositive);
|
||||
return out;
|
||||
}
|
||||
|
||||
static Polygons propagate_wave_from_boundary(
|
||||
ClipperLib::ClipperOffset &co,
|
||||
// Seed of the wave: Open polylines very close to the boundary.
|
||||
const ClipperLib::Paths &seed,
|
||||
// Boundary inside which the waveform will propagate.
|
||||
const ExPolygon &boundary,
|
||||
// How much to inflate the seed lines to produce the first wave area.
|
||||
const float initial_step,
|
||||
// How much to inflate the first wave area and the successive wave areas in each step.
|
||||
const float other_step,
|
||||
// Number of inflate steps after the initial step.
|
||||
const size_t num_other_steps,
|
||||
// Maximum inflation of seed contours over the boundary. Used to trim boundary to speed up
|
||||
// clipping during wave propagation.
|
||||
const float max_inflation)
|
||||
{
|
||||
assert(! seed.empty() && seed.front().size() >= 2);
|
||||
Polygons clipping = ClipperUtils::clip_clipper_polygons_with_subject_bbox(boundary, get_extents<true>(seed).inflated(max_inflation));
|
||||
ClipperLib::Paths polygons = wavefront_clip(wavefront_initial(co, seed, initial_step), clipping);
|
||||
// Now offset the remaining
|
||||
for (size_t ioffset = 0; ioffset < num_other_steps; ++ ioffset)
|
||||
polygons = wavefront_clip(wavefront_step(co, polygons, other_step), clipping);
|
||||
return to_polygons(polygons);
|
||||
}
|
||||
|
||||
// Resulting regions are sorted by boundary id and source id.
|
||||
std::vector<RegionExpansion> propagate_waves(const WaveSeeds &seeds, const ExPolygons &boundary, const RegionExpansionParameters ¶ms)
|
||||
{
|
||||
std::vector<RegionExpansion> out;
|
||||
ClipperLib::Paths paths;
|
||||
ClipperLib::ClipperOffset co;
|
||||
co.ArcTolerance = params.arc_tolerance;
|
||||
co.ShortestEdgeLength = params.shortest_edge_length;
|
||||
for (auto it_seed = seeds.begin(); it_seed != seeds.end();) {
|
||||
auto it = it_seed;
|
||||
paths.clear();
|
||||
for (; it != seeds.end() && it->boundary == it_seed->boundary && it->src == it_seed->src; ++ it)
|
||||
paths.emplace_back(it->path);
|
||||
// Propagate the wavefront while clipping it with the trimmed boundary.
|
||||
// Collect the expanded polygons, merge them with the source polygons.
|
||||
RegionExpansion re;
|
||||
for (Polygon &polygon : propagate_wave_from_boundary(co, paths, boundary[it_seed->boundary], params.initial_step, params.other_step, params.num_other_steps, params.max_inflation))
|
||||
out.push_back({ std::move(polygon), it_seed->src, it_seed->boundary });
|
||||
it_seed = it;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
std::vector<RegionExpansion> propagate_waves(const ExPolygons &src, const ExPolygons &boundary, const RegionExpansionParameters ¶ms)
|
||||
{
|
||||
return propagate_waves(wave_seeds(src, boundary, params.tiny_expansion, true), boundary, params);
|
||||
}
|
||||
|
||||
std::vector<RegionExpansion> propagate_waves(const ExPolygons &src, const ExPolygons &boundary,
|
||||
// Scaled expansion value
|
||||
float expansion,
|
||||
// Expand by waves of expansion_step size (expansion_step is scaled).
|
||||
float expansion_step,
|
||||
// Don't take more than max_nr_steps for small expansion_step.
|
||||
size_t max_nr_steps)
|
||||
{
|
||||
return propagate_waves(src, boundary, RegionExpansionParameters::build(expansion, expansion_step, max_nr_steps));
|
||||
}
|
||||
|
||||
// Returns regions per source ExPolygon expanded into boundary.
|
||||
std::vector<RegionExpansionEx> propagate_waves_ex(const WaveSeeds &seeds, const ExPolygons &boundary, const RegionExpansionParameters ¶ms)
|
||||
{
|
||||
std::vector<RegionExpansion> expanded = propagate_waves(seeds, boundary, params);
|
||||
assert(std::is_sorted(seeds.begin(), seeds.end(), [](const auto &l, const auto &r){ return l.boundary < r.boundary || (l.boundary == r.boundary && l.src < r.src); }));
|
||||
Polygons acc;
|
||||
std::vector<RegionExpansionEx> out;
|
||||
for (auto it = expanded.begin(); it != expanded.end(); ) {
|
||||
auto it2 = it;
|
||||
acc.clear();
|
||||
for (; it2 != expanded.end() && it2->boundary_id == it->boundary_id && it2->src_id == it->src_id; ++ it2)
|
||||
acc.emplace_back(std::move(it2->polygon));
|
||||
size_t size = it2 - it;
|
||||
if (size == 1)
|
||||
out.push_back({ ExPolygon{std::move(acc.front())}, it->src_id, it->boundary_id });
|
||||
else {
|
||||
ExPolygons expolys = union_ex(acc);
|
||||
reserve_more_power_of_2(out, expolys.size());
|
||||
for (ExPolygon &ex : expolys)
|
||||
out.push_back({ std::move(ex), it->src_id, it->boundary_id });
|
||||
}
|
||||
it = it2;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Returns regions per source ExPolygon expanded into boundary.
|
||||
std::vector<RegionExpansionEx> propagate_waves_ex(
|
||||
// Source regions that are supposed to touch the boundary.
|
||||
// Boundaries of source regions touching the "boundary" regions will be expanded into the "boundary" region.
|
||||
const ExPolygons &src,
|
||||
const ExPolygons &boundary,
|
||||
// Scaled expansion value
|
||||
float full_expansion,
|
||||
// Expand by waves of expansion_step size (expansion_step is scaled).
|
||||
float expansion_step,
|
||||
// Don't take more than max_nr_steps for small expansion_step.
|
||||
size_t max_nr_expansion_steps)
|
||||
{
|
||||
auto params = RegionExpansionParameters::build(full_expansion, expansion_step, max_nr_expansion_steps);
|
||||
return propagate_waves_ex(wave_seeds(src, boundary, params.tiny_expansion, true), boundary, params);
|
||||
}
|
||||
|
||||
std::vector<Polygons> expand_expolygons(const ExPolygons &src, const ExPolygons &boundary,
|
||||
// Scaled expansion value
|
||||
float expansion,
|
||||
// Expand by waves of expansion_step size (expansion_step is scaled).
|
||||
float expansion_step,
|
||||
// Don't take more than max_nr_steps for small expansion_step.
|
||||
size_t max_nr_steps)
|
||||
{
|
||||
std::vector<Polygons> out(src.size(), Polygons{});
|
||||
for (RegionExpansion &r : propagate_waves(src, boundary, expansion, expansion_step, max_nr_steps))
|
||||
out[r.src_id].emplace_back(std::move(r.polygon));
|
||||
return out;
|
||||
}
|
||||
|
||||
std::vector<ExPolygon> merge_expansions_into_expolygons(ExPolygons &&src, std::vector<RegionExpansion> &&expanded)
|
||||
{
|
||||
// expanded regions will be merged into source regions, thus they will be re-sorted by source id.
|
||||
std::sort(expanded.begin(), expanded.end(), [](const auto &l, const auto &r) { return l.src_id < r.src_id; });
|
||||
uint32_t last = 0;
|
||||
Polygons acc;
|
||||
ExPolygons out;
|
||||
out.reserve(src.size());
|
||||
for (auto it = expanded.begin(); it != expanded.end();) {
|
||||
for (; last < it->src_id; ++ last)
|
||||
out.emplace_back(std::move(src[last]));
|
||||
acc.clear();
|
||||
assert(it->src_id == last);
|
||||
for (; it != expanded.end() && it->src_id == last; ++ it)
|
||||
acc.emplace_back(std::move(it->polygon));
|
||||
//FIXME offset & merging could be more efficient, for example one does not need to copy the source expolygon
|
||||
ExPolygon &src_ex = src[last ++];
|
||||
assert(! src_ex.contour.empty());
|
||||
#if 0
|
||||
{
|
||||
static int iRun = 0;
|
||||
BoundingBox bbox = get_extents(acc);
|
||||
bbox.merge(get_extents(src_ex));
|
||||
SVG svg(debug_out_path("expand_merge_expolygons-failed-union=%d.svg", iRun ++).c_str(), bbox);
|
||||
svg.draw(acc);
|
||||
svg.draw_outline(acc, "black", scale_(0.05));
|
||||
svg.draw(src_ex, "red");
|
||||
svg.Close();
|
||||
}
|
||||
#endif
|
||||
Point sample = src_ex.contour.front();
|
||||
append(acc, to_polygons(std::move(src_ex)));
|
||||
ExPolygons merged = union_safety_offset_ex(acc);
|
||||
// Expanding one expolygon by waves should not change connectivity of the source expolygon:
|
||||
// Single expolygon should be produced possibly with increased number of holes.
|
||||
if (merged.size() > 1) {
|
||||
// assert(merged.size() == 1);
|
||||
// There is something wrong with the initial waves. Most likely the bridge was not valid at all
|
||||
// or the boundary region was very close to some bridge edge, but not really touching.
|
||||
// Pick only a single merged expolygon, which contains one sample point of the source expolygon.
|
||||
auto aabb_tree = build_aabb_tree_over_expolygons(merged);
|
||||
int id = sample_in_expolygons(aabb_tree, merged, sample);
|
||||
assert(id != -1);
|
||||
if (id != -1)
|
||||
out.emplace_back(std::move(merged[id]));
|
||||
} else if (merged.size() == 1)
|
||||
out.emplace_back(std::move(merged.front()));
|
||||
}
|
||||
for (; last < uint32_t(src.size()); ++ last)
|
||||
out.emplace_back(std::move(src[last]));
|
||||
return out;
|
||||
}
|
||||
|
||||
std::vector<ExPolygon> expand_merge_expolygons(ExPolygons &&src, const ExPolygons &boundary, const RegionExpansionParameters ¶ms)
|
||||
{
|
||||
// expanded regions are sorted by boundary id and source id
|
||||
std::vector<RegionExpansion> expanded = propagate_waves(src, boundary, params);
|
||||
return merge_expansions_into_expolygons(std::move(src), std::move(expanded));
|
||||
}
|
||||
|
||||
} // Algorithm
|
||||
} // Slic3r
|
122
src/libslic3r/Algorithm/RegionExpansion.hpp
Normal file
122
src/libslic3r/Algorithm/RegionExpansion.hpp
Normal file
|
@ -0,0 +1,122 @@
|
|||
///|/ Copyright (c) Prusa Research 2022 - 2023 Vojtěch Bubník @bubnikv
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#ifndef SRC_LIBSLIC3R_ALGORITHM_REGION_EXPANSION_HPP_
|
||||
#define SRC_LIBSLIC3R_ALGORITHM_REGION_EXPANSION_HPP_
|
||||
|
||||
#include <cstdint>
|
||||
#include <libslic3r/Point.hpp>
|
||||
#include <libslic3r/Polygon.hpp>
|
||||
#include <libslic3r/ExPolygon.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace Algorithm {
|
||||
|
||||
struct RegionExpansionParameters
|
||||
{
|
||||
// Initial expansion of src to make the source regions intersect with boundary regions just a bit.
|
||||
float tiny_expansion;
|
||||
// How much to inflate the seed lines to produce the first wave area.
|
||||
float initial_step;
|
||||
// How much to inflate the first wave area and the successive wave areas in each step.
|
||||
float other_step;
|
||||
// Number of inflate steps after the initial step.
|
||||
size_t num_other_steps;
|
||||
// Maximum inflation of seed contours over the boundary. Used to trim boundary to speed up
|
||||
// clipping during wave propagation.
|
||||
float max_inflation;
|
||||
|
||||
// Accuracy of the offsetter for wave propagation.
|
||||
double arc_tolerance;
|
||||
double shortest_edge_length;
|
||||
|
||||
static RegionExpansionParameters build(
|
||||
// Scaled expansion value
|
||||
float full_expansion,
|
||||
// Expand by waves of expansion_step size (expansion_step is scaled).
|
||||
float expansion_step,
|
||||
// Don't take more than max_nr_steps for small expansion_step.
|
||||
size_t max_nr_expansion_steps);
|
||||
};
|
||||
|
||||
struct WaveSeed {
|
||||
uint32_t src;
|
||||
uint32_t boundary;
|
||||
Points path;
|
||||
};
|
||||
using WaveSeeds = std::vector<WaveSeed>;
|
||||
|
||||
inline bool lower_by_boundary_and_src(const WaveSeed &l, const WaveSeed &r)
|
||||
{
|
||||
return l.boundary < r.boundary || (l.boundary == r.boundary && l.src < r.src);
|
||||
}
|
||||
|
||||
inline bool lower_by_src_and_boundary(const WaveSeed &l, const WaveSeed &r)
|
||||
{
|
||||
return l.src < r.src || (l.src == r.src && l.boundary < r.boundary);
|
||||
}
|
||||
|
||||
// Expand src slightly outwards to intersect boundaries, trim the offsetted src polylines by the boundaries.
|
||||
// Return the trimmed paths annotated with their origin (source of the path, index of the boundary region).
|
||||
WaveSeeds wave_seeds(
|
||||
// Source regions that are supposed to touch the boundary.
|
||||
const ExPolygons &src,
|
||||
// Boundaries of source regions touching the "boundary" regions will be expanded into the "boundary" region.
|
||||
const ExPolygons &boundary,
|
||||
// Initial expansion of src to make the source regions intersect with boundary regions just a bit.
|
||||
float tiny_expansion,
|
||||
bool sorted);
|
||||
|
||||
struct RegionExpansion
|
||||
{
|
||||
Polygon polygon;
|
||||
uint32_t src_id;
|
||||
uint32_t boundary_id;
|
||||
};
|
||||
|
||||
std::vector<RegionExpansion> propagate_waves(const WaveSeeds &seeds, const ExPolygons &boundary, const RegionExpansionParameters ¶ms);
|
||||
std::vector<RegionExpansion> propagate_waves(const ExPolygons &src, const ExPolygons &boundary, const RegionExpansionParameters ¶ms);
|
||||
|
||||
std::vector<RegionExpansion> propagate_waves(const ExPolygons &src, const ExPolygons &boundary,
|
||||
// Scaled expansion value
|
||||
float expansion,
|
||||
// Expand by waves of expansion_step size (expansion_step is scaled).
|
||||
float expansion_step,
|
||||
// Don't take more than max_nr_steps for small expansion_step.
|
||||
size_t max_nr_steps);
|
||||
|
||||
struct RegionExpansionEx
|
||||
{
|
||||
ExPolygon expolygon;
|
||||
uint32_t src_id;
|
||||
uint32_t boundary_id;
|
||||
};
|
||||
|
||||
std::vector<RegionExpansionEx> propagate_waves_ex(const WaveSeeds &seeds, const ExPolygons &boundary, const RegionExpansionParameters ¶ms);
|
||||
|
||||
std::vector<RegionExpansionEx> propagate_waves_ex(const ExPolygons &src, const ExPolygons &boundary,
|
||||
// Scaled expansion value
|
||||
float expansion,
|
||||
// Expand by waves of expansion_step size (expansion_step is scaled).
|
||||
float expansion_step,
|
||||
// Don't take more than max_nr_steps for small expansion_step.
|
||||
size_t max_nr_steps);
|
||||
|
||||
std::vector<Polygons> expand_expolygons(const ExPolygons &src, const ExPolygons &boundary,
|
||||
// Scaled expansion value
|
||||
float expansion,
|
||||
// Expand by waves of expansion_step size (expansion_step is scaled).
|
||||
float expansion_step,
|
||||
// Don't take more than max_nr_steps for small expansion_step.
|
||||
size_t max_nr_steps);
|
||||
|
||||
// Merge src with expansions, return the merged expolygons.
|
||||
std::vector<ExPolygon> merge_expansions_into_expolygons(ExPolygons &&src, std::vector<RegionExpansion> &&expanded);
|
||||
|
||||
std::vector<ExPolygon> expand_merge_expolygons(ExPolygons &&src, const ExPolygons &boundary, const RegionExpansionParameters ¶ms);
|
||||
|
||||
} // Algorithm
|
||||
} // Slic3r
|
||||
|
||||
#endif /* SRC_LIBSLIC3R_ALGORITHM_REGION_EXPANSION_HPP_ */
|
|
@ -23,6 +23,35 @@
|
|||
namespace Slic3r::Arachne
|
||||
{
|
||||
|
||||
WallToolPathsParams make_paths_params(const int layer_id, const PrintObjectConfig &print_object_config, const PrintConfig &print_config)
|
||||
{
|
||||
WallToolPathsParams input_params;
|
||||
{
|
||||
const double min_nozzle_diameter = *std::min_element(print_config.nozzle_diameter.values.begin(), print_config.nozzle_diameter.values.end());
|
||||
if (const auto &min_feature_size_opt = print_object_config.min_feature_size)
|
||||
input_params.min_feature_size = min_feature_size_opt.value * 0.01 * min_nozzle_diameter;
|
||||
|
||||
if (layer_id == 0) {
|
||||
if (const auto &initial_layer_min_bead_width_opt = print_object_config.initial_layer_min_bead_width)
|
||||
input_params.min_bead_width = initial_layer_min_bead_width_opt.value * 0.01 * min_nozzle_diameter;
|
||||
} else {
|
||||
if (const auto &min_bead_width_opt = print_object_config.min_bead_width)
|
||||
input_params.min_bead_width = min_bead_width_opt.value * 0.01 * min_nozzle_diameter;
|
||||
}
|
||||
|
||||
if (const auto &wall_transition_filter_deviation_opt = print_object_config.wall_transition_filter_deviation)
|
||||
input_params.wall_transition_filter_deviation = wall_transition_filter_deviation_opt.value * 0.01 * min_nozzle_diameter;
|
||||
|
||||
if (const auto &wall_transition_length_opt = print_object_config.wall_transition_length)
|
||||
input_params.wall_transition_length = wall_transition_length_opt.value * 0.01 * min_nozzle_diameter;
|
||||
|
||||
input_params.wall_transition_angle = print_object_config.wall_transition_angle.value;
|
||||
input_params.wall_distribution_count = print_object_config.wall_distribution_count.value;
|
||||
}
|
||||
|
||||
return input_params;
|
||||
}
|
||||
|
||||
WallToolPaths::WallToolPaths(const Polygons& outline, const coord_t bead_width_0, const coord_t bead_width_x,
|
||||
const size_t inset_count, const coord_t wall_0_inset, const coordf_t layer_height, const WallToolPathsParams ¶ms)
|
||||
: outline(outline)
|
||||
|
|
|
@ -31,6 +31,8 @@ public:
|
|||
int wall_distribution_count;
|
||||
};
|
||||
|
||||
WallToolPathsParams make_paths_params(const int layer_id, const PrintObjectConfig &print_object_config, const PrintConfig &print_config);
|
||||
|
||||
class WallToolPaths
|
||||
{
|
||||
public:
|
||||
|
|
|
@ -23,24 +23,9 @@ public:
|
|||
BoundingBoxBase(const PointClass &p1, const PointClass &p2, const PointClass &p3) :
|
||||
min(p1), max(p1), defined(false) { merge(p2); merge(p3); }
|
||||
|
||||
template<class It, class = IteratorOnly<It> >
|
||||
BoundingBoxBase(It from, It to) : min(PointClass::Zero()), max(PointClass::Zero())
|
||||
{
|
||||
if (from == to) {
|
||||
this->defined = false;
|
||||
// throw Slic3r::InvalidArgument("Empty point set supplied to BoundingBoxBase constructor");
|
||||
} else {
|
||||
auto it = from;
|
||||
this->min = it->template cast<typename PointClass::Scalar>();
|
||||
this->max = this->min;
|
||||
for (++ it; it != to; ++ it) {
|
||||
auto vec = it->template cast<typename PointClass::Scalar>();
|
||||
this->min = this->min.cwiseMin(vec);
|
||||
this->max = this->max.cwiseMax(vec);
|
||||
}
|
||||
this->defined = (this->min(0) < this->max(0)) && (this->min(1) < this->max(1));
|
||||
}
|
||||
}
|
||||
template<class It, class = IteratorOnly<It>>
|
||||
BoundingBoxBase(It from, It to)
|
||||
{ construct(*this, from, to); }
|
||||
|
||||
BoundingBoxBase(const std::vector<PointClass> &points)
|
||||
: BoundingBoxBase(points.begin(), points.end())
|
||||
|
@ -97,6 +82,30 @@ public:
|
|||
os << "[" << bbox.max(0) - bbox.min(0) << " x " << bbox.max(1) - bbox.min(1) << "] from (" << bbox.min(0) << ", " << bbox.min(1) << ")";
|
||||
return os;
|
||||
}
|
||||
|
||||
private:
|
||||
// to access construct()
|
||||
friend BoundingBox get_extents<false>(const Points &pts);
|
||||
friend BoundingBox get_extents<true>(const Points &pts);
|
||||
|
||||
// if IncludeBoundary, then a bounding box is defined even for a single point.
|
||||
// otherwise a bounding box is only defined if it has a positive area.
|
||||
// The output bounding box is expected to be set to "undefined" initially.
|
||||
template<bool IncludeBoundary = false, class BoundingBoxType, class It, class = IteratorOnly<It>>
|
||||
static void construct(BoundingBoxType &out, It from, It to)
|
||||
{
|
||||
if (from != to) {
|
||||
auto it = from;
|
||||
out.min = it->template cast<typename PointClass::Scalar>();
|
||||
out.max = out.min;
|
||||
for (++ it; it != to; ++ it) {
|
||||
auto vec = it->template cast<typename PointClass::Scalar>();
|
||||
out.min = out.min.cwiseMin(vec);
|
||||
out.max = out.max.cwiseMax(vec);
|
||||
}
|
||||
out.defined = IncludeBoundary || (out.min.x() < out.max.x() && out.min.y() < out.max.y());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <class PointClass>
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Pavel Mikuš @Godrak, Vojtěch Bubník @bubnikv
|
||||
///|/ Copyright (c) Slic3r 2014 - 2015 Alessandro Ranellucci @alranel
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#ifndef slic3r_BridgeDetector_hpp_
|
||||
#define slic3r_BridgeDetector_hpp_
|
||||
|
||||
|
@ -73,12 +78,9 @@ private:
|
|||
|
||||
|
||||
//return ideal bridge direction and unsupported bridge endpoints distance.
|
||||
inline std::tuple<Vec2d, double> detect_bridging_direction(const Polygons &to_cover, const Polygons &anchors_area)
|
||||
inline std::tuple<Vec2d, double> detect_bridging_direction(const Lines &floating_edges, const Polygons &overhang_area)
|
||||
{
|
||||
Polygons overhang_area = diff(to_cover, anchors_area);
|
||||
Polylines floating_polylines = diff_pl(to_polylines(overhang_area), expand(anchors_area, float(SCALED_EPSILON)));
|
||||
|
||||
if (floating_polylines.empty()) {
|
||||
if (floating_edges.empty()) {
|
||||
// consider this area anchored from all sides, pick bridging direction that will likely yield shortest bridges
|
||||
auto [pc1, pc2] = compute_principal_components(overhang_area);
|
||||
if (pc2 == Vec2f::Zero()) { // overhang may be smaller than resolution. In this case, any direction is ok
|
||||
|
@ -89,7 +91,6 @@ inline std::tuple<Vec2d, double> detect_bridging_direction(const Polygons &to_co
|
|||
}
|
||||
|
||||
// Overhang is not fully surrounded by anchors, in that case, find such direction that will minimize the number of bridge ends/180turns in the air
|
||||
Lines floating_edges = to_lines(floating_polylines);
|
||||
std::unordered_map<double, Vec2d> directions{};
|
||||
for (const Line &l : floating_edges) {
|
||||
Vec2d normal = l.normal().cast<double>().normalized();
|
||||
|
@ -123,6 +124,13 @@ inline std::tuple<Vec2d, double> detect_bridging_direction(const Polygons &to_co
|
|||
return {result_dir, min_cost};
|
||||
};
|
||||
|
||||
//return ideal bridge direction and unsupported bridge endpoints distance.
|
||||
inline std::tuple<Vec2d, double> detect_bridging_direction(const Polygons &to_cover, const Polygons &anchors_area)
|
||||
{
|
||||
Polygons overhang_area = diff(to_cover, anchors_area);
|
||||
Lines floating_edges = to_lines(diff_pl(to_polylines(overhang_area), expand(anchors_area, float(SCALED_EPSILON))));
|
||||
return detect_bridging_direction(floating_edges, overhang_area);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
///|/ Copyright (c) Prusa Research 2021 - 2023 Vojtěch Bubník @bubnikv, Lukáš Matěna @lukasmatena, Lukáš Hejl @hejllukas
|
||||
///|/ Copyright (c) SuperSlicer 2023 Remi Durand @supermerill
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include "clipper/clipper_z.hpp"
|
||||
|
||||
#include "ClipperUtils.hpp"
|
||||
|
@ -1856,7 +1861,7 @@ ExtrusionEntityCollection make_brim(const Print &print, PrintTryCancel try_cance
|
|||
// perform operation
|
||||
ClipperLib_Z::PolyTree loops_trimmed_tree;
|
||||
clipper.Execute(ClipperLib_Z::ctDifference, loops_trimmed_tree, ClipperLib_Z::pftNonZero, ClipperLib_Z::pftNonZero);
|
||||
ClipperLib_Z::PolyTreeToPaths(loops_trimmed_tree, loops_trimmed);
|
||||
ClipperLib_Z::PolyTreeToPaths(std::move(loops_trimmed_tree), loops_trimmed);
|
||||
}
|
||||
|
||||
// Third, produce the extrusions, sorted by the source loop indices.
|
||||
|
|
|
@ -1,3 +1,10 @@
|
|||
#/|/ Copyright (c) Prusa Research 2018 - 2023 Tomáš Mészáros @tamasmeszaros, Oleksandra Iushchenko @YuSanka, Enrico Turri @enricoturri1966, Vojtěch Bubník @bubnikv, Pavel Mikuš @Godrak, Lukáš Hejl @hejllukas, Lukáš Matěna @lukasmatena, Filip Sykala @Jony01, David Kocík @kocikdav, Vojtěch Král @vojtechkral
|
||||
#/|/ Copyright (c) BambuStudio 2023 manch1n @manch1n
|
||||
#/|/ Copyright (c) 2023 Mimoja @Mimoja
|
||||
#/|/ Copyright (c) 2022 ole00 @ole00
|
||||
#/|/
|
||||
#/|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
#/|/
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
project(libslic3r)
|
||||
|
||||
|
@ -26,6 +33,9 @@ set(lisbslic3r_sources
|
|||
AABBTreeLines.hpp
|
||||
AABBMesh.hpp
|
||||
AABBMesh.cpp
|
||||
Algorithm/PathSorting.hpp
|
||||
Algorithm/RegionExpansion.hpp
|
||||
Algorithm/RegionExpansion.cpp
|
||||
AnyPtr.hpp
|
||||
BoundingBox.cpp
|
||||
BoundingBox.hpp
|
||||
|
@ -45,6 +55,7 @@ set(lisbslic3r_sources
|
|||
ClipperUtils.hpp
|
||||
Clipper2Utils.cpp
|
||||
Clipper2Utils.hpp
|
||||
ClipperZUtils.hpp
|
||||
Config.cpp
|
||||
Config.hpp
|
||||
CurveAnalyzer.cpp
|
||||
|
|
|
@ -1,3 +1,14 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Tomáš Mészáros @tamasmeszaros, Vojtěch Bubník @bubnikv, Pavel Mikuš @Godrak, Lukáš Matěna @lukasmatena, Lukáš Hejl @hejllukas, Filip Sykala @Jony01
|
||||
///|/ Copyright (c) Slic3r 2013 - 2015 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2015 Maksim Derbasov @ntfshard
|
||||
///|/
|
||||
///|/ ported from lib/Slic3r/Geometry/Clipper.pm:
|
||||
///|/ Copyright (c) Prusa Research 2016 - 2022 Vojtěch Bubník @bubnikv
|
||||
///|/ Copyright (c) Slic3r 2011 - 2014 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2012 - 2013 Mike Sheldrake @mesheldrake
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include "ClipperUtils.hpp"
|
||||
#include "Geometry.hpp"
|
||||
#include "ShortestPath.hpp"
|
||||
|
@ -19,8 +30,6 @@
|
|||
#define CLIPPERUTILS_PROFILE_BLOCK(name)
|
||||
#endif
|
||||
|
||||
#define CLIPPER_OFFSET_SHORTEST_EDGE_FACTOR (0.005f)
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
#ifdef CLIPPER_UTILS_DEBUG
|
||||
|
@ -274,7 +283,7 @@ static ClipperLib::Paths raw_offset(PathsProvider &&paths, float offset, Clipper
|
|||
co.ArcTolerance = miterLimit;
|
||||
else
|
||||
co.MiterLimit = miterLimit;
|
||||
co.ShortestEdgeLength = double(std::abs(offset * CLIPPER_OFFSET_SHORTEST_EDGE_FACTOR));
|
||||
co.ShortestEdgeLength = std::abs(offset * ClipperOffsetShortestEdgeFactor);
|
||||
for (const ClipperLib::Path &path : paths) {
|
||||
co.Clear();
|
||||
// Execute reorients the contours so that the outer most contour has a positive area. Thus the output
|
||||
|
@ -425,7 +434,7 @@ static int offset_expolygon_inner(const Slic3r::ExPolygon &expoly, const float d
|
|||
co.ArcTolerance = miterLimit;
|
||||
else
|
||||
co.MiterLimit = miterLimit;
|
||||
co.ShortestEdgeLength = double(std::abs(delta * CLIPPER_OFFSET_SHORTEST_EDGE_FACTOR));
|
||||
co.ShortestEdgeLength = std::abs(delta * ClipperOffsetShortestEdgeFactor);
|
||||
co.AddPath(expoly.contour.points, joinType, ClipperLib::etClosedPolygon);
|
||||
co.Execute(contours, delta);
|
||||
}
|
||||
|
@ -446,7 +455,7 @@ static int offset_expolygon_inner(const Slic3r::ExPolygon &expoly, const float d
|
|||
co.ArcTolerance = miterLimit;
|
||||
else
|
||||
co.MiterLimit = miterLimit;
|
||||
co.ShortestEdgeLength = double(std::abs(delta * CLIPPER_OFFSET_SHORTEST_EDGE_FACTOR));
|
||||
co.ShortestEdgeLength = std::abs(delta * ClipperOffsetShortestEdgeFactor);
|
||||
co.AddPath(hole.points, joinType, ClipperLib::etClosedPolygon);
|
||||
ClipperLib::Paths out2;
|
||||
// Execute reorients the contours so that the outer most contour has a positive area. Thus the output
|
||||
|
@ -714,6 +723,8 @@ Slic3r::ExPolygons diff_ex(const Slic3r::Polygons &subject, const Slic3r::Polygo
|
|||
{ return _clipper_ex(ClipperLib::ctDifference, ClipperUtils::PolygonsProvider(subject), ClipperUtils::PolygonsProvider(clip), do_safety_offset); }
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::Polygons &subject, const Slic3r::Surfaces &clip, ApplySafetyOffset do_safety_offset)
|
||||
{ return _clipper_ex(ClipperLib::ctDifference, ClipperUtils::PolygonsProvider(subject), ClipperUtils::SurfacesProvider(clip), do_safety_offset); }
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::Polygon &subject, const Slic3r::ExPolygons &clip, ApplySafetyOffset do_safety_offset)
|
||||
{ return _clipper_ex(ClipperLib::ctDifference, ClipperUtils::SinglePathProvider(subject.points), ClipperUtils::ExPolygonsProvider(clip), do_safety_offset); }
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::Polygons &subject, const Slic3r::ExPolygons &clip, ApplySafetyOffset do_safety_offset)
|
||||
{ return _clipper_ex(ClipperLib::ctDifference, ClipperUtils::PolygonsProvider(subject), ClipperUtils::ExPolygonsProvider(clip), do_safety_offset); }
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::ExPolygon &subject, const Slic3r::Polygon &clip, ApplySafetyOffset do_safety_offset)
|
||||
|
@ -734,6 +745,8 @@ Slic3r::ExPolygons diff_ex(const Slic3r::Surfaces &subject, const Slic3r::Surfac
|
|||
{ return _clipper_ex(ClipperLib::ctDifference, ClipperUtils::SurfacesProvider(subject), ClipperUtils::SurfacesProvider(clip), do_safety_offset); }
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::SurfacesPtr &subject, const Slic3r::Polygons &clip, ApplySafetyOffset do_safety_offset)
|
||||
{ return _clipper_ex(ClipperLib::ctDifference, ClipperUtils::SurfacesPtrProvider(subject), ClipperUtils::PolygonsProvider(clip), do_safety_offset); }
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::SurfacesPtr &subject, const Slic3r::ExPolygons &clip, ApplySafetyOffset do_safety_offset)
|
||||
{ return _clipper_ex(ClipperLib::ctDifference, ClipperUtils::SurfacesPtrProvider(subject), ClipperUtils::ExPolygonsProvider(clip), do_safety_offset); }
|
||||
// BBS
|
||||
inline Slic3r::ExPolygons diff_ex(const Slic3r::Polygon& subject, const Slic3r::Polygons& clip, ApplySafetyOffset do_safety_offset)
|
||||
{
|
||||
|
@ -1108,7 +1121,7 @@ ClipperLib::Path mittered_offset_path_scaled(const Points &contour, const std::v
|
|||
};
|
||||
|
||||
// Minimum edge length, squared.
|
||||
double lmin = *std::max_element(deltas.begin(), deltas.end()) * CLIPPER_OFFSET_SHORTEST_EDGE_FACTOR;
|
||||
double lmin = *std::max_element(deltas.begin(), deltas.end()) * ClipperOffsetShortestEdgeFactor;
|
||||
double l2min = lmin * lmin;
|
||||
// Minimum angle to consider two edges to be parallel.
|
||||
// Vojtech's estimate.
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Tomáš Mészáros @tamasmeszaros, Vojtěch Bubník @bubnikv, Pavel Mikuš @Godrak, Lukáš Matěna @lukasmatena, Lukáš Hejl @hejllukas, Filip Sykala @Jony01
|
||||
///|/ Copyright (c) Slic3r 2013 - 2015 Alessandro Ranellucci @alranel
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#ifndef slic3r_ClipperUtils_hpp_
|
||||
#define slic3r_ClipperUtils_hpp_
|
||||
|
||||
|
@ -27,6 +32,9 @@ static constexpr const Slic3r::ClipperLib::JoinType DefaultLineJoinType = Sl
|
|||
// Miter limit is ignored for jtSquare.
|
||||
static constexpr const double DefaultLineMiterLimit = 0.;
|
||||
|
||||
// Decimation factor applied on input contour when doing offset, multiplied by the offset distance.
|
||||
static constexpr const double ClipperOffsetShortestEdgeFactor = 0.005;
|
||||
|
||||
enum class ApplySafetyOffset {
|
||||
No,
|
||||
Yes
|
||||
|
@ -371,6 +379,8 @@ inline Slic3r::Polygons shrink(const Slic3r::Polygons &polygons, const float d
|
|||
{ assert(delta > 0); return offset(polygons, -delta, joinType, miterLimit); }
|
||||
inline Slic3r::ExPolygons shrink_ex(const Slic3r::Polygons &polygons, const float delta, ClipperLib::JoinType joinType = DefaultJoinType, double miterLimit = DefaultMiterLimit)
|
||||
{ assert(delta > 0); return offset_ex(polygons, -delta, joinType, miterLimit); }
|
||||
inline Slic3r::ExPolygons shrink_ex(const Slic3r::ExPolygons &polygons, const float delta, ClipperLib::JoinType joinType = DefaultJoinType, double miterLimit = DefaultMiterLimit)
|
||||
{ assert(delta > 0); return offset_ex(polygons, -delta, joinType, miterLimit); }
|
||||
|
||||
// Wherever applicable, please use the opening() / closing() variants instead, they convey their purpose better.
|
||||
// Input polygons for negative offset shall be "normalized": There must be no overlap / intersections between the input polygons.
|
||||
|
@ -436,7 +446,8 @@ Slic3r::ExPolygons diff_ex(const Slic3r::Surfaces &subject, const Slic3r::ExPoly
|
|||
Slic3r::ExPolygons diff_ex(const Slic3r::ExPolygons &subject, const Slic3r::Surfaces &clip, ApplySafetyOffset do_safety_offset = ApplySafetyOffset::No);
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::Surfaces &subject, const Slic3r::Surfaces &clip, ApplySafetyOffset do_safety_offset = ApplySafetyOffset::No);
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::SurfacesPtr &subject, const Slic3r::Polygons &clip, ApplySafetyOffset do_safety_offset = ApplySafetyOffset::No);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polyline& subject, const Slic3r::Polygons& clip);
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::SurfacesPtr &subject, const Slic3r::ExPolygons &clip, ApplySafetyOffset do_safety_offset = ApplySafetyOffset::No);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polyline &subject, const Slic3r::Polygons &clip);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polylines &subject, const Slic3r::Polygons &clip);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polyline &subject, const Slic3r::ExPolygon &clip);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polylines &subject, const Slic3r::ExPolygon &clip);
|
||||
|
@ -643,6 +654,6 @@ Polygons variable_offset_outer(const ExPolygon &expoly, const std::vector<std::
|
|||
ExPolygons variable_offset_outer_ex(const ExPolygon &expoly, const std::vector<std::vector<float>> &deltas, double miter_limit = 2.);
|
||||
ExPolygons variable_offset_inner_ex(const ExPolygon &expoly, const std::vector<std::vector<float>> &deltas, double miter_limit = 2.);
|
||||
|
||||
}
|
||||
} // namespace Slic3r
|
||||
|
||||
#endif
|
||||
#endif // slic3r_ClipperUtils_hpp_
|
||||
|
|
147
src/libslic3r/ClipperZUtils.hpp
Normal file
147
src/libslic3r/ClipperZUtils.hpp
Normal file
|
@ -0,0 +1,147 @@
|
|||
///|/ Copyright (c) Prusa Research 2022 - 2023 Vojtěch Bubník @bubnikv
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#ifndef slic3r_ClipperZUtils_hpp_
|
||||
#define slic3r_ClipperZUtils_hpp_
|
||||
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
|
||||
#include <clipper/clipper_z.hpp>
|
||||
#include <libslic3r/Point.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
namespace ClipperZUtils {
|
||||
|
||||
using ZPoint = ClipperLib_Z::IntPoint;
|
||||
using ZPoints = ClipperLib_Z::Path;
|
||||
using ZPath = ClipperLib_Z::Path;
|
||||
using ZPaths = ClipperLib_Z::Paths;
|
||||
|
||||
inline bool zpoint_lower(const ZPoint &l, const ZPoint &r)
|
||||
{
|
||||
return l.x() < r.x() || (l.x() == r.x() && (l.y() < r.y() || (l.y() == r.y() && l.z() < r.z())));
|
||||
}
|
||||
|
||||
// Convert a single path to path with a given Z coordinate.
|
||||
// If Open, then duplicate the first point at the end.
|
||||
template<bool Open = false>
|
||||
inline ZPath to_zpath(const Points &path, coord_t z)
|
||||
{
|
||||
ZPath out;
|
||||
if (! path.empty()) {
|
||||
out.reserve((path.size() + Open) ? 1 : 0);
|
||||
for (const Point &p : path)
|
||||
out.emplace_back(p.x(), p.y(), z);
|
||||
if (Open)
|
||||
out.emplace_back(out.front());
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Convert multiple paths to paths with a given Z coordinate.
|
||||
// If Open, then duplicate the first point of each path at its end.
|
||||
template<bool Open = false>
|
||||
inline ZPaths to_zpaths(const VecOfPoints &paths, coord_t z)
|
||||
{
|
||||
ZPaths out;
|
||||
out.reserve(paths.size());
|
||||
for (const Points &path : paths)
|
||||
out.emplace_back(to_zpath<Open>(path, z));
|
||||
return out;
|
||||
}
|
||||
|
||||
// Convert multiple expolygons into z-paths with Z specified by an index of the source expolygon
|
||||
// offsetted by base_index.
|
||||
// If Open, then duplicate the first point of each path at its end.
|
||||
template<bool Open = false>
|
||||
inline ZPaths expolygons_to_zpaths(const ExPolygons &src, coord_t &base_idx)
|
||||
{
|
||||
ZPaths out;
|
||||
out.reserve(std::accumulate(src.begin(), src.end(), size_t(0),
|
||||
[](const size_t acc, const ExPolygon &expoly) { return acc + expoly.num_contours(); }));
|
||||
for (const ExPolygon &expoly : src) {
|
||||
out.emplace_back(to_zpath<Open>(expoly.contour.points, base_idx));
|
||||
for (const Polygon &hole : expoly.holes)
|
||||
out.emplace_back(to_zpath<Open>(hole.points, base_idx));
|
||||
++ base_idx;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Convert a single path to path with a given Z coordinate.
|
||||
// If Open, then duplicate the first point at the end.
|
||||
template<bool Open = false>
|
||||
inline Points from_zpath(const ZPoints &path)
|
||||
{
|
||||
Points out;
|
||||
if (! path.empty()) {
|
||||
out.reserve((path.size() + Open) ? 1 : 0);
|
||||
for (const ZPoint &p : path)
|
||||
out.emplace_back(p.x(), p.y());
|
||||
if (Open)
|
||||
out.emplace_back(out.front());
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Convert multiple paths to paths with a given Z coordinate.
|
||||
// If Open, then duplicate the first point of each path at its end.
|
||||
template<bool Open = false>
|
||||
inline void from_zpaths(const ZPaths &paths, VecOfPoints &out)
|
||||
{
|
||||
out.reserve(out.size() + paths.size());
|
||||
for (const ZPoints &path : paths)
|
||||
out.emplace_back(from_zpath<Open>(path));
|
||||
}
|
||||
template<bool Open = false>
|
||||
inline VecOfPoints from_zpaths(const ZPaths &paths)
|
||||
{
|
||||
VecOfPoints out;
|
||||
from_zpaths(paths, out);
|
||||
return out;
|
||||
}
|
||||
|
||||
class ClipperZIntersectionVisitor {
|
||||
public:
|
||||
using Intersection = std::pair<coord_t, coord_t>;
|
||||
using Intersections = std::vector<Intersection>;
|
||||
ClipperZIntersectionVisitor(Intersections &intersections) : m_intersections(intersections) {}
|
||||
void reset() { m_intersections.clear(); }
|
||||
void operator()(const ZPoint &e1bot, const ZPoint &e1top, const ZPoint &e2bot, const ZPoint &e2top, ZPoint &pt) {
|
||||
coord_t srcs[4]{ e1bot.z(), e1top.z(), e2bot.z(), e2top.z() };
|
||||
coord_t *begin = srcs;
|
||||
coord_t *end = srcs + 4;
|
||||
//FIXME bubble sort manually?
|
||||
std::sort(begin, end);
|
||||
end = std::unique(begin, end);
|
||||
if (begin + 1 == end) {
|
||||
// Self intersection may happen on source contour. Just copy the Z value.
|
||||
pt.z() = *begin;
|
||||
} else {
|
||||
assert(begin + 2 == end);
|
||||
if (begin + 2 <= end) {
|
||||
// store a -1 based negative index into the "intersections" vector here.
|
||||
m_intersections.emplace_back(srcs[0], srcs[1]);
|
||||
pt.z() = -coord_t(m_intersections.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
ClipperLib_Z::ZFillCallback clipper_callback() {
|
||||
return [this](const ZPoint &e1bot, const ZPoint &e1top,
|
||||
const ZPoint &e2bot, const ZPoint &e2top, ZPoint &pt)
|
||||
{ return (*this)(e1bot, e1top, e2bot, e2top, pt); };
|
||||
}
|
||||
|
||||
const std::vector<std::pair<coord_t, coord_t>>& intersections() const { return m_intersections; }
|
||||
|
||||
private:
|
||||
std::vector<std::pair<coord_t, coord_t>> &m_intersections;
|
||||
};
|
||||
|
||||
} // namespace ClipperZUtils
|
||||
} // namespace Slic3r
|
||||
|
||||
#endif // slic3r_ClipperZUtils_hpp_
|
|
@ -301,7 +301,7 @@ template <class T>
|
|||
class ConfigOptionSingle : public ConfigOption {
|
||||
public:
|
||||
T value;
|
||||
explicit ConfigOptionSingle(T value) : value(value) {}
|
||||
explicit ConfigOptionSingle(T value) : value(std::move(value)) {}
|
||||
operator T() const { return this->value; }
|
||||
|
||||
void set(const ConfigOption *rhs) override
|
||||
|
@ -858,8 +858,8 @@ using ConfigOptionIntsNullable = ConfigOptionIntsTempl<true>;
|
|||
class ConfigOptionString : public ConfigOptionSingle<std::string>
|
||||
{
|
||||
public:
|
||||
ConfigOptionString() : ConfigOptionSingle<std::string>("") {}
|
||||
explicit ConfigOptionString(const std::string &value) : ConfigOptionSingle<std::string>(value) {}
|
||||
ConfigOptionString() : ConfigOptionSingle<std::string>(std::string{}) {}
|
||||
explicit ConfigOptionString(std::string value) : ConfigOptionSingle<std::string>(std::move(value)) {}
|
||||
|
||||
static ConfigOptionType static_type() { return coString; }
|
||||
ConfigOptionType type() const override { return static_type(); }
|
||||
|
|
|
@ -24,8 +24,6 @@
|
|||
#include "FillConcentricInternal.hpp"
|
||||
#include "FillConcentric.hpp"
|
||||
|
||||
#define NARROW_INFILL_AREA_THRESHOLD 3
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
struct SurfaceFillParams
|
||||
|
@ -123,14 +121,304 @@ struct SurfaceFill {
|
|||
ExPolygons no_overlap_expolygons;
|
||||
};
|
||||
|
||||
// BBS: used to judge whether the internal solid infill area is narrow
|
||||
static bool is_narrow_infill_area(const ExPolygon& expolygon)
|
||||
{
|
||||
ExPolygons offsets = offset_ex(expolygon, -scale_(NARROW_INFILL_AREA_THRESHOLD));
|
||||
if (offsets.empty())
|
||||
return true;
|
||||
|
||||
return false;
|
||||
// Detect narrow infill regions
|
||||
// Based on the anti-vibration algorithm from PrusaSlicer:
|
||||
// https://github.com/prusa3d/PrusaSlicer/blob/94290e09d75f23719c3d2ab2398737c8be4c3fd6/src/libslic3r/Fill/FillEnsuring.cpp#L100-L289
|
||||
void split_solid_surface(size_t layer_id, const SurfaceFill &fill, ExPolygons &normal_infill, ExPolygons &narrow_infill)
|
||||
{
|
||||
assert(fill.surface.surface_type == stInternalSolid);
|
||||
|
||||
switch (fill.params.pattern) {
|
||||
case ipRectilinear:
|
||||
case ipMonotonic:
|
||||
case ipMonotonicLine:
|
||||
case ipAlignedRectilinear:
|
||||
// Only support straight line based infill
|
||||
break;
|
||||
|
||||
default:
|
||||
// For all other types, don't split
|
||||
return;
|
||||
}
|
||||
|
||||
Polygons normal_fill_areas; // Areas that filled with normal infill
|
||||
|
||||
constexpr double connect_extrusions = true;
|
||||
|
||||
auto segments_overlap = [](coord_t alow, coord_t ahigh, coord_t blow, coord_t bhigh) {
|
||||
return (alow >= blow && alow <= bhigh) || (ahigh >= blow && ahigh <= bhigh) || (blow >= alow && blow <= ahigh) ||
|
||||
(bhigh >= alow && bhigh <= ahigh);
|
||||
};
|
||||
|
||||
const coord_t scaled_spacing = scaled<coord_t>(fill.params.spacing);
|
||||
double distance_limit_reconnection = 2.0 * double(scaled_spacing);
|
||||
double squared_distance_limit_reconnection = distance_limit_reconnection * distance_limit_reconnection;
|
||||
// Calculate infill direction, see Fill::_infill_direction
|
||||
double base_angle = fill.params.angle + float(M_PI / 2.);
|
||||
// For pattern other than ipAlignedRectilinear, the angle are alternated
|
||||
if (fill.params.pattern != ipAlignedRectilinear) {
|
||||
size_t idx = layer_id / fill.surface.thickness_layers;
|
||||
base_angle += (idx & 1) ? float(M_PI / 2.) : 0;
|
||||
}
|
||||
const double aligning_angle = -base_angle + PI;
|
||||
|
||||
for (const ExPolygon &expolygon : fill.expolygons) {
|
||||
Polygons filled_area = to_polygons(expolygon);
|
||||
polygons_rotate(filled_area, aligning_angle);
|
||||
BoundingBox bb = get_extents(filled_area);
|
||||
|
||||
Polygons inner_area = intersection(filled_area, opening(filled_area, 2 * scaled_spacing, 3 * scaled_spacing));
|
||||
|
||||
inner_area = shrink(inner_area, scaled_spacing * 0.5 - scaled<double>(fill.params.overlap));
|
||||
|
||||
AABBTreeLines::LinesDistancer<Line> area_walls{to_lines(inner_area)};
|
||||
|
||||
const size_t n_vlines = (bb.max.x() - bb.min.x() + scaled_spacing - 1) / scaled_spacing;
|
||||
std::vector<Line> vertical_lines(n_vlines);
|
||||
coord_t y_min = bb.min.y();
|
||||
coord_t y_max = bb.max.y();
|
||||
for (size_t i = 0; i < n_vlines; i++) {
|
||||
coord_t x = bb.min.x() + i * double(scaled_spacing);
|
||||
vertical_lines[i].a = Point{x, y_min};
|
||||
vertical_lines[i].b = Point{x, y_max};
|
||||
}
|
||||
if (vertical_lines.size() > 0) {
|
||||
vertical_lines.push_back(vertical_lines.back());
|
||||
vertical_lines.back().a = Point{coord_t(bb.min.x() + n_vlines * double(scaled_spacing) + scaled_spacing * 0.5), y_min};
|
||||
vertical_lines.back().b = Point{vertical_lines.back().a.x(), y_max};
|
||||
}
|
||||
|
||||
std::vector<std::vector<Line>> polygon_sections(n_vlines);
|
||||
|
||||
for (size_t i = 0; i < n_vlines; i++) {
|
||||
const auto intersections = area_walls.intersections_with_line<true>(vertical_lines[i]);
|
||||
|
||||
for (int intersection_idx = 0; intersection_idx < int(intersections.size()) - 1; intersection_idx++) {
|
||||
const auto &a = intersections[intersection_idx];
|
||||
const auto &b = intersections[intersection_idx + 1];
|
||||
if (area_walls.outside((a.first + b.first) / 2) < 0) {
|
||||
if (std::abs(a.first.y() - b.first.y()) > scaled_spacing) {
|
||||
polygon_sections[i].emplace_back(a.first, b.first);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Node
|
||||
{
|
||||
int section_idx;
|
||||
int line_idx;
|
||||
int skips_taken = 0;
|
||||
bool neighbours_explored = false;
|
||||
std::vector<std::pair<int, int>> neighbours{};
|
||||
};
|
||||
|
||||
coord_t length_filter = scale_(4);
|
||||
size_t skips_allowed = 2;
|
||||
size_t min_removal_conut = 5;
|
||||
for (int section_idx = 0; section_idx < int(polygon_sections.size()); ++section_idx) {
|
||||
for (int line_idx = 0; line_idx < int(polygon_sections[section_idx].size()); ++line_idx) {
|
||||
if (const Line &line = polygon_sections[section_idx][line_idx]; line.a != line.b && line.length() < length_filter) {
|
||||
std::set<std::pair<int, int>> to_remove{{section_idx, line_idx}};
|
||||
std::vector<Node> to_visit{{section_idx, line_idx}};
|
||||
|
||||
bool initial_touches_long_lines = false;
|
||||
if (section_idx > 0) {
|
||||
for (int prev_line_idx = 0; prev_line_idx < int(polygon_sections[section_idx - 1].size()); ++prev_line_idx) {
|
||||
if (const Line &nl = polygon_sections[section_idx - 1][prev_line_idx];
|
||||
nl.a != nl.b && segments_overlap(line.a.y(), line.b.y(), nl.a.y(), nl.b.y())) {
|
||||
initial_touches_long_lines = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (!to_visit.empty()) {
|
||||
Node curr = to_visit.back();
|
||||
const Line &curr_l = polygon_sections[curr.section_idx][curr.line_idx];
|
||||
if (curr.neighbours_explored) {
|
||||
bool is_valid_for_removal = (curr_l.length() < length_filter) &&
|
||||
((int(to_remove.size()) - curr.skips_taken > int(min_removal_conut)) ||
|
||||
(curr.neighbours.empty() && !initial_touches_long_lines));
|
||||
if (!is_valid_for_removal) {
|
||||
for (const auto &n : curr.neighbours) {
|
||||
if (to_remove.find(n) != to_remove.end()) {
|
||||
is_valid_for_removal = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!is_valid_for_removal) {
|
||||
to_remove.erase({curr.section_idx, curr.line_idx});
|
||||
}
|
||||
to_visit.pop_back();
|
||||
} else {
|
||||
to_visit.back().neighbours_explored = true;
|
||||
int curr_index = to_visit.size() - 1;
|
||||
bool can_use_skip = curr_l.length() <= length_filter && curr.skips_taken < int(skips_allowed);
|
||||
if (curr.section_idx + 1 < int(polygon_sections.size())) {
|
||||
for (int lidx = 0; lidx < int(polygon_sections[curr.section_idx + 1].size()); ++lidx) {
|
||||
if (const Line &nl = polygon_sections[curr.section_idx + 1][lidx];
|
||||
nl.a != nl.b && segments_overlap(curr_l.a.y(), curr_l.b.y(), nl.a.y(), nl.b.y()) &&
|
||||
(nl.length() < length_filter || can_use_skip)) {
|
||||
to_visit[curr_index].neighbours.push_back({curr.section_idx + 1, lidx});
|
||||
to_remove.insert({curr.section_idx + 1, lidx});
|
||||
Node next_node{curr.section_idx + 1, lidx, curr.skips_taken + (nl.length() >= length_filter)};
|
||||
to_visit.push_back(next_node);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto &pair : to_remove) {
|
||||
Line &l = polygon_sections[pair.first][pair.second];
|
||||
l.a = l.b;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t section_idx = 0; section_idx < polygon_sections.size(); section_idx++) {
|
||||
polygon_sections[section_idx].erase(std::remove_if(polygon_sections[section_idx].begin(), polygon_sections[section_idx].end(),
|
||||
[](const Line &s) { return s.a == s.b; }),
|
||||
polygon_sections[section_idx].end());
|
||||
std::sort(polygon_sections[section_idx].begin(), polygon_sections[section_idx].end(),
|
||||
[](const Line &a, const Line &b) { return a.a.y() < b.b.y(); });
|
||||
}
|
||||
|
||||
Polygons reconstructed_area{};
|
||||
// reconstruct polygon from polygon sections
|
||||
{
|
||||
struct TracedPoly
|
||||
{
|
||||
Points lows;
|
||||
Points highs;
|
||||
};
|
||||
|
||||
std::vector<std::vector<Line>> polygon_sections_w_width = polygon_sections;
|
||||
for (auto &slice : polygon_sections_w_width) {
|
||||
for (Line &l : slice) {
|
||||
l.a -= Point{0.0, 0.5 * scaled_spacing};
|
||||
l.b += Point{0.0, 0.5 * scaled_spacing};
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<TracedPoly> current_traced_polys;
|
||||
for (const auto &polygon_slice : polygon_sections_w_width) {
|
||||
std::unordered_set<const Line *> used_segments;
|
||||
for (TracedPoly &traced_poly : current_traced_polys) {
|
||||
auto candidates_begin = std::upper_bound(polygon_slice.begin(), polygon_slice.end(), traced_poly.lows.back(),
|
||||
[](const Point &low, const Line &seg) { return seg.b.y() > low.y(); });
|
||||
auto candidates_end = std::upper_bound(polygon_slice.begin(), polygon_slice.end(), traced_poly.highs.back(),
|
||||
[](const Point &high, const Line &seg) { return seg.a.y() > high.y(); });
|
||||
|
||||
bool segment_added = false;
|
||||
for (auto candidate = candidates_begin; candidate != candidates_end && !segment_added; candidate++) {
|
||||
if (used_segments.find(&(*candidate)) != used_segments.end()) {
|
||||
continue;
|
||||
}
|
||||
if (connect_extrusions && (traced_poly.lows.back() - candidates_begin->a).cast<double>().squaredNorm() <
|
||||
squared_distance_limit_reconnection) {
|
||||
traced_poly.lows.push_back(candidates_begin->a);
|
||||
} else {
|
||||
traced_poly.lows.push_back(traced_poly.lows.back() + Point{scaled_spacing / 2, 0});
|
||||
traced_poly.lows.push_back(candidates_begin->a - Point{scaled_spacing / 2, 0});
|
||||
traced_poly.lows.push_back(candidates_begin->a);
|
||||
}
|
||||
|
||||
if (connect_extrusions && (traced_poly.highs.back() - candidates_begin->b).cast<double>().squaredNorm() <
|
||||
squared_distance_limit_reconnection) {
|
||||
traced_poly.highs.push_back(candidates_begin->b);
|
||||
} else {
|
||||
traced_poly.highs.push_back(traced_poly.highs.back() + Point{scaled_spacing / 2, 0});
|
||||
traced_poly.highs.push_back(candidates_begin->b - Point{scaled_spacing / 2, 0});
|
||||
traced_poly.highs.push_back(candidates_begin->b);
|
||||
}
|
||||
segment_added = true;
|
||||
used_segments.insert(&(*candidates_begin));
|
||||
}
|
||||
|
||||
if (!segment_added) {
|
||||
// Zero or multiple overlapping segments. Resolving this is nontrivial,
|
||||
// so we just close this polygon and maybe open several new. This will hopefully happen much less often
|
||||
traced_poly.lows.push_back(traced_poly.lows.back() + Point{scaled_spacing / 2, 0});
|
||||
traced_poly.highs.push_back(traced_poly.highs.back() + Point{scaled_spacing / 2, 0});
|
||||
Polygon &new_poly = reconstructed_area.emplace_back(std::move(traced_poly.lows));
|
||||
new_poly.points.insert(new_poly.points.end(), traced_poly.highs.rbegin(), traced_poly.highs.rend());
|
||||
traced_poly.lows.clear();
|
||||
traced_poly.highs.clear();
|
||||
}
|
||||
}
|
||||
|
||||
current_traced_polys.erase(std::remove_if(current_traced_polys.begin(), current_traced_polys.end(),
|
||||
[](const TracedPoly &tp) { return tp.lows.empty(); }),
|
||||
current_traced_polys.end());
|
||||
|
||||
for (const auto &segment : polygon_slice) {
|
||||
if (used_segments.find(&segment) == used_segments.end()) {
|
||||
TracedPoly &new_tp = current_traced_polys.emplace_back();
|
||||
new_tp.lows.push_back(segment.a - Point{scaled_spacing / 2, 0});
|
||||
new_tp.lows.push_back(segment.a);
|
||||
new_tp.highs.push_back(segment.b - Point{scaled_spacing / 2, 0});
|
||||
new_tp.highs.push_back(segment.b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add not closed polys
|
||||
for (TracedPoly &traced_poly : current_traced_polys) {
|
||||
Polygon &new_poly = reconstructed_area.emplace_back(std::move(traced_poly.lows));
|
||||
new_poly.points.insert(new_poly.points.end(), traced_poly.highs.rbegin(), traced_poly.highs.rend());
|
||||
}
|
||||
}
|
||||
|
||||
polygons_append(normal_fill_areas, reconstructed_area);
|
||||
}
|
||||
|
||||
polygons_rotate(normal_fill_areas, -aligning_angle);
|
||||
|
||||
// Do the split
|
||||
ExPolygons normal_fill_areas_ex = union_safety_offset_ex(normal_fill_areas);
|
||||
ExPolygons narrow_fill_areas = diff_ex(fill.expolygons, normal_fill_areas_ex);
|
||||
|
||||
// Merge very small areas that is smaller than a single line width to the normal infill if they touches
|
||||
for (auto iter = narrow_fill_areas.begin(); iter != narrow_fill_areas.end();) {
|
||||
auto shrinked_expoly = offset_ex(*iter, -scaled_spacing * 0.5);
|
||||
if (shrinked_expoly.empty()) {
|
||||
// Too small! Check if it touches any normal infills
|
||||
auto expanede_exploy = offset_ex(*iter, scaled_spacing * 0.3);
|
||||
Polygons normal_fill_area_clipped = ClipperUtils::clip_clipper_polygons_with_subject_bbox(normal_fill_areas_ex, get_extents(expanede_exploy));
|
||||
auto touch_check = intersection_ex(normal_fill_area_clipped, expanede_exploy);
|
||||
if (!touch_check.empty()) {
|
||||
normal_fill_areas_ex.emplace_back(*iter);
|
||||
iter = narrow_fill_areas.erase(iter);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
iter++;
|
||||
}
|
||||
|
||||
if (narrow_fill_areas.empty()) {
|
||||
// No split needed
|
||||
return;
|
||||
}
|
||||
|
||||
// Expand the normal infills a little bit to avoid gaps between normal and narrow infills
|
||||
normal_infill = intersection_ex(offset_ex(normal_fill_areas_ex, scaled_spacing * 0.1), fill.expolygons);
|
||||
narrow_infill = narrow_fill_areas;
|
||||
|
||||
#ifdef DEBUG_SURFACE_SPLIT
|
||||
{
|
||||
BoundingBox bbox = get_extents(fill.expolygons);
|
||||
bbox.offset(scale_(1.));
|
||||
::Slic3r::SVG svg(debug_out_path("surface_split_%d.svg", layer_id), bbox);
|
||||
svg.draw(to_lines(fill.expolygons), "red", scale_(0.1));
|
||||
svg.draw(normal_infill, "blue", 0.5);
|
||||
svg.draw(narrow_infill, "green", 0.5);
|
||||
svg.Close();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
std::vector<SurfaceFill> group_fills(const Layer &layer)
|
||||
|
@ -357,25 +645,20 @@ std::vector<SurfaceFill> group_fills(const Layer &layer)
|
|||
}
|
||||
|
||||
// BBS: detect narrow internal solid infill area and use ipConcentricInternal pattern instead
|
||||
if (layer.object()->config().detect_narrow_internal_solid_infill) {
|
||||
/*if (layer.object()->config().detect_narrow_internal_solid_infill)*/ {
|
||||
size_t surface_fills_size = surface_fills.size();
|
||||
for (size_t i = 0; i < surface_fills_size; i++) {
|
||||
if (surface_fills[i].surface.surface_type != stInternalSolid)
|
||||
continue;
|
||||
|
||||
size_t expolygons_size = surface_fills[i].expolygons.size();
|
||||
std::vector<size_t> narrow_expolygons_index;
|
||||
narrow_expolygons_index.reserve(expolygons_size);
|
||||
// BBS: get the index list of narrow expolygon
|
||||
for (size_t j = 0; j < expolygons_size; j++)
|
||||
if (is_narrow_infill_area(surface_fills[i].expolygons[j]))
|
||||
narrow_expolygons_index.push_back(j);
|
||||
ExPolygons normal_infill;
|
||||
ExPolygons narrow_infill;
|
||||
split_solid_surface(layer.id(), surface_fills[i], normal_infill, narrow_infill);
|
||||
|
||||
if (narrow_expolygons_index.size() == 0) {
|
||||
if (narrow_infill.empty()) {
|
||||
// BBS: has no narrow expolygon
|
||||
continue;
|
||||
}
|
||||
else if (narrow_expolygons_index.size() == expolygons_size) {
|
||||
} else if (normal_infill.empty()) {
|
||||
// BBS: all expolygons are narrow, directly change the fill pattern
|
||||
surface_fills[i].params.pattern = ipConcentricInternal;
|
||||
}
|
||||
|
@ -389,14 +672,10 @@ std::vector<SurfaceFill> group_fills(const Layer &layer)
|
|||
surface_fills.back().surface.thickness = surface_fills[i].surface.thickness;
|
||||
surface_fills.back().region_id_group = surface_fills[i].region_id_group;
|
||||
surface_fills.back().no_overlap_expolygons = surface_fills[i].no_overlap_expolygons;
|
||||
for (size_t j = 0; j < narrow_expolygons_index.size(); j++) {
|
||||
// BBS: move the narrow expolygons to new surface_fills.back();
|
||||
surface_fills.back().expolygons.emplace_back(std::move(surface_fills[i].expolygons[narrow_expolygons_index[j]]));
|
||||
}
|
||||
for (int j = narrow_expolygons_index.size() - 1; j >= 0; j--) {
|
||||
// BBS: delete the narrow expolygons from old surface_fills
|
||||
surface_fills[i].expolygons.erase(surface_fills[i].expolygons.begin() + narrow_expolygons_index[j]);
|
||||
}
|
||||
// BBS: move the narrow expolygons to new surface_fills.back();
|
||||
surface_fills.back().expolygons = std::move(narrow_infill);
|
||||
// BBS: delete the narrow expolygons from old surface_fills
|
||||
surface_fills[i].expolygons = std::move(normal_infill);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -454,19 +733,11 @@ void Layer::make_fills(FillAdaptive::Octree* adaptive_fill_octree, FillAdaptive:
|
|||
f->layer_id = this->id();
|
||||
f->z = this->print_z;
|
||||
f->angle = surface_fill.params.angle;
|
||||
f->adapt_fill_octree = (surface_fill.params.pattern == ipSupportCubic) ? support_fill_octree : adaptive_fill_octree;
|
||||
f->adapt_fill_octree = (surface_fill.params.pattern == ipSupportCubic) ? support_fill_octree : adaptive_fill_octree;
|
||||
f->print_config = &this->object()->print()->config();
|
||||
f->print_object_config = &this->object()->config();
|
||||
|
||||
if (surface_fill.params.pattern == ipConcentricInternal) {
|
||||
FillConcentricInternal *fill_concentric = dynamic_cast<FillConcentricInternal *>(f.get());
|
||||
assert(fill_concentric != nullptr);
|
||||
fill_concentric->print_config = &this->object()->print()->config();
|
||||
fill_concentric->print_object_config = &this->object()->config();
|
||||
} else if (surface_fill.params.pattern == ipConcentric) {
|
||||
FillConcentric *fill_concentric = dynamic_cast<FillConcentric *>(f.get());
|
||||
assert(fill_concentric != nullptr);
|
||||
fill_concentric->print_config = &this->object()->print()->config();
|
||||
fill_concentric->print_object_config = &this->object()->config();
|
||||
} else if (surface_fill.params.pattern == ipLightning)
|
||||
if (surface_fill.params.pattern == ipLightning)
|
||||
dynamic_cast<FillLightning::Filler*>(f.get())->generator = lightning_generator;
|
||||
|
||||
// calculate flow spacing for infill pattern generation
|
||||
|
@ -496,8 +767,8 @@ void Layer::make_fills(FillAdaptive::Octree* adaptive_fill_octree, FillAdaptive:
|
|||
params.anchor_length = surface_fill.params.anchor_length;
|
||||
params.anchor_length_max = surface_fill.params.anchor_length_max;
|
||||
params.resolution = resolution;
|
||||
params.use_arachne = surface_fill.params.pattern == ipConcentric;
|
||||
params.layer_height = m_regions[surface_fill.region_id]->layer()->height;
|
||||
params.use_arachne = surface_fill.params.pattern == ipConcentric || surface_fill.params.pattern == ipConcentricInternal;
|
||||
params.layer_height = layerm->layer()->height;
|
||||
|
||||
// BBS
|
||||
params.flow = surface_fill.params.flow;
|
||||
|
@ -558,7 +829,7 @@ Polylines Layer::generate_sparse_infill_polylines_for_anchoring(FillAdaptive::Oc
|
|||
switch (surface_fill.params.pattern) {
|
||||
case ipCount: continue; break;
|
||||
case ipSupportBase: continue; break;
|
||||
//TODO: case ipEnsuring: continue; break;
|
||||
case ipConcentricInternal: continue; break;
|
||||
case ipLightning:
|
||||
case ipAdaptiveCubic:
|
||||
case ipSupportCubic:
|
||||
|
@ -572,7 +843,6 @@ Polylines Layer::generate_sparse_infill_polylines_for_anchoring(FillAdaptive::Oc
|
|||
case ipCubic:
|
||||
case ipLine:
|
||||
case ipConcentric:
|
||||
case ipConcentricInternal:
|
||||
case ipHoneycomb:
|
||||
case ip3DHoneycomb:
|
||||
case ipGyroid:
|
||||
|
@ -588,8 +858,8 @@ Polylines Layer::generate_sparse_infill_polylines_for_anchoring(FillAdaptive::Oc
|
|||
f->z = this->print_z;
|
||||
f->angle = surface_fill.params.angle;
|
||||
f->adapt_fill_octree = (surface_fill.params.pattern == ipSupportCubic) ? support_fill_octree : adaptive_fill_octree;
|
||||
// TODO: f->print_config = &this->object()->print()->config();
|
||||
// TODO: f->print_object_config = &this->object()->config();
|
||||
f->print_config = &this->object()->print()->config();
|
||||
f->print_object_config = &this->object()->config();
|
||||
|
||||
if (surface_fill.params.pattern == ipLightning)
|
||||
dynamic_cast<FillLightning::Filler *>(f.get())->generator = lightning_generator;
|
||||
|
|
|
@ -1,3 +1,12 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Lukáš Hejl @hejllukas, Tomáš Mészáros @tamasmeszaros, Lukáš Matěna @lukasmatena
|
||||
///|/ Copyright (c) SuperSlicer 2018 - 2019 Remi Durand @supermerill
|
||||
///|/
|
||||
///|/ ported from lib/Slic3r/Fill/Base.pm:
|
||||
///|/ Copyright (c) Prusa Research 2016 Vojtěch Bubník @bubnikv
|
||||
///|/ Copyright (c) Slic3r 2011 - 2014 Alessandro Ranellucci @alranel
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include <stdio.h>
|
||||
#include <numeric>
|
||||
|
||||
|
|
|
@ -105,6 +105,10 @@ public:
|
|||
// Octree builds on mesh for usage in the adaptive cubic infill
|
||||
FillAdaptive::Octree* adapt_fill_octree = nullptr;
|
||||
|
||||
// PrintConfig and PrintObjectConfig are used by infills that use Arachne (Concentric and FillEnsuring).
|
||||
const PrintConfig *print_config = nullptr;
|
||||
const PrintObjectConfig *print_object_config = nullptr;
|
||||
|
||||
// BBS: all no overlap expolygons in same layer
|
||||
ExPolygons no_overlap_expolygons;
|
||||
|
||||
|
|
|
@ -1,3 +1,12 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Lukáš Hejl @hejllukas
|
||||
///|/
|
||||
///|/ ported from lib/Slic3r/Fill/Concentric.pm:
|
||||
///|/ Copyright (c) Prusa Research 2016 Vojtěch Bubník @bubnikv
|
||||
///|/ Copyright (c) Slic3r 2011 - 2015 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2012 Mark Hindess
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include "../ClipperUtils.hpp"
|
||||
#include "../ExPolygon.hpp"
|
||||
#include "../Surface.hpp"
|
||||
|
@ -109,14 +118,8 @@ void FillConcentric::_fill_surface_single(const FillParams& params,
|
|||
continue;
|
||||
|
||||
ThickPolyline thick_polyline = Arachne::to_thick_polyline(*extrusion);
|
||||
if (extrusion->is_closed && thick_polyline.points.front() == thick_polyline.points.back() && thick_polyline.width.front() == thick_polyline.width.back()) {
|
||||
thick_polyline.points.pop_back();
|
||||
assert(thick_polyline.points.size() * 2 == thick_polyline.width.size());
|
||||
int nearest_idx = last_pos.nearest_point_index(thick_polyline.points);
|
||||
std::rotate(thick_polyline.points.begin(), thick_polyline.points.begin() + nearest_idx, thick_polyline.points.end());
|
||||
std::rotate(thick_polyline.width.begin(), thick_polyline.width.begin() + 2 * nearest_idx, thick_polyline.width.end());
|
||||
thick_polyline.points.emplace_back(thick_polyline.points.front());
|
||||
}
|
||||
if (extrusion->is_closed)
|
||||
thick_polyline.start_at_index(last_pos.nearest_point_index(thick_polyline.points));
|
||||
thick_polylines_out.emplace_back(std::move(thick_polyline));
|
||||
last_pos = thick_polylines_out.back().last_point();
|
||||
}
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Lukáš Hejl @hejllukas
|
||||
///|/ Copyright (c) Slic3r 2016 Alessandro Ranellucci @alranel
|
||||
///|/
|
||||
///|/ ported from lib/Slic3r/Fill/Concentric.pm:
|
||||
///|/ Copyright (c) Prusa Research 2016 Vojtěch Bubník @bubnikv
|
||||
///|/ Copyright (c) Slic3r 2011 - 2015 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2012 Mark Hindess
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#ifndef slic3r_FillConcentric_hpp_
|
||||
#define slic3r_FillConcentric_hpp_
|
||||
|
||||
|
@ -25,12 +35,7 @@ protected:
|
|||
ExPolygon expolygon,
|
||||
ThickPolylines& thick_polylines_out) override;
|
||||
|
||||
bool no_sort() const override { return true; }
|
||||
|
||||
const PrintConfig* print_config = nullptr;
|
||||
const PrintObjectConfig* print_object_config = nullptr;
|
||||
|
||||
friend class Layer;
|
||||
bool no_sort() const override { return true; }
|
||||
};
|
||||
|
||||
} // namespace Slic3r
|
||||
|
|
|
@ -15,9 +15,6 @@ protected:
|
|||
Fill* clone() const override { return new FillConcentricInternal(*this); };
|
||||
bool no_sort() const override { return true; }
|
||||
|
||||
const PrintConfig *print_config = nullptr;
|
||||
const PrintObjectConfig *print_object_config = nullptr;
|
||||
|
||||
friend class Layer;
|
||||
};
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
namespace Slic3r {
|
||||
|
||||
class PrintRegionConfig;
|
||||
class Surface;
|
||||
|
||||
class FillRectilinear : public Fill
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Pavel Mikuš @Godrak, Lukáš Matěna @lukasmatena, Lukáš Hejl @hejllukas
|
||||
///|/ Copyright (c) Slic3r 2014 - 2016 Alessandro Ranellucci @alranel
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include "Layer.hpp"
|
||||
#include "BridgeDetector.hpp"
|
||||
#include "ClipperUtils.hpp"
|
||||
|
@ -7,6 +12,7 @@
|
|||
#include "Surface.hpp"
|
||||
#include "BoundingBox.hpp"
|
||||
#include "SVG.hpp"
|
||||
#include "Algorithm/RegionExpansion.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
@ -116,6 +122,351 @@ void LayerRegion::make_perimeters(const SurfaceCollection &slices, SurfaceCollec
|
|||
g.process_classic();
|
||||
}
|
||||
|
||||
#if 1
|
||||
|
||||
// Extract surfaces of given type from surfaces, extract fill (layer) thickness of one of the surfaces.
|
||||
static ExPolygons fill_surfaces_extract_expolygons(Surfaces &surfaces, std::initializer_list<SurfaceType> surface_types, double &thickness)
|
||||
{
|
||||
size_t cnt = 0;
|
||||
for (const Surface &surface : surfaces)
|
||||
if (std::find(surface_types.begin(), surface_types.end(), surface.surface_type) != surface_types.end()) {
|
||||
++cnt;
|
||||
thickness = surface.thickness;
|
||||
}
|
||||
if (cnt == 0)
|
||||
return {};
|
||||
|
||||
ExPolygons out;
|
||||
out.reserve(cnt);
|
||||
for (Surface &surface : surfaces)
|
||||
if (std::find(surface_types.begin(), surface_types.end(), surface.surface_type) != surface_types.end())
|
||||
out.emplace_back(std::move(surface.expolygon));
|
||||
return out;
|
||||
}
|
||||
|
||||
// Extract bridging surfaces from "surfaces", expand them into "shells" using expansion_params,
|
||||
// detect bridges.
|
||||
// Trim "shells" by the expanded bridges.
|
||||
Surfaces expand_bridges_detect_orientations(
|
||||
Surfaces &surfaces,
|
||||
ExPolygons &shells,
|
||||
const Algorithm::RegionExpansionParameters &expansion_params_into_solid_infill,
|
||||
ExPolygons &sparse,
|
||||
const Algorithm::RegionExpansionParameters &expansion_params_into_sparse_infill,
|
||||
const float closing_radius)
|
||||
{
|
||||
using namespace Slic3r::Algorithm;
|
||||
|
||||
double thickness;
|
||||
ExPolygons bridges_ex = fill_surfaces_extract_expolygons(surfaces, {stBottomBridge}, thickness);
|
||||
if (bridges_ex.empty())
|
||||
return {};
|
||||
|
||||
// Calculate bridge anchors and their expansions in their respective shell region.
|
||||
WaveSeeds bridge_anchors = wave_seeds(bridges_ex, shells, expansion_params_into_solid_infill.tiny_expansion, true);
|
||||
std::vector<RegionExpansionEx> bridge_expansions = propagate_waves_ex(bridge_anchors, shells, expansion_params_into_solid_infill);
|
||||
bool expanded_into_shells = ! bridge_expansions.empty();
|
||||
bool expanded_into_sparse = false;
|
||||
{
|
||||
WaveSeeds bridge_anchors_sparse = wave_seeds(bridges_ex, sparse, expansion_params_into_sparse_infill.tiny_expansion, true);
|
||||
std::vector<RegionExpansionEx> bridge_expansions_sparse = propagate_waves_ex(bridge_anchors_sparse, sparse, expansion_params_into_sparse_infill);
|
||||
if (! bridge_expansions_sparse.empty()) {
|
||||
expanded_into_sparse = true;
|
||||
for (WaveSeed &seed : bridge_anchors_sparse)
|
||||
seed.boundary += uint32_t(shells.size());
|
||||
for (RegionExpansionEx &expansion : bridge_expansions_sparse)
|
||||
expansion.boundary_id += uint32_t(shells.size());
|
||||
append(bridge_anchors, std::move(bridge_anchors_sparse));
|
||||
append(bridge_expansions, std::move(bridge_expansions_sparse));
|
||||
}
|
||||
}
|
||||
|
||||
// Cache for detecting bridge orientation and merging regions with overlapping expansions.
|
||||
struct Bridge {
|
||||
ExPolygon expolygon;
|
||||
uint32_t group_id;
|
||||
std::vector<RegionExpansionEx>::const_iterator bridge_expansion_begin;
|
||||
double angle = -1;
|
||||
};
|
||||
std::vector<Bridge> bridges;
|
||||
{
|
||||
bridges.reserve(bridges_ex.size());
|
||||
uint32_t group_id = 0;
|
||||
for (ExPolygon &ex : bridges_ex)
|
||||
bridges.push_back({ std::move(ex), group_id ++, bridge_expansions.end() });
|
||||
bridges_ex.clear();
|
||||
}
|
||||
|
||||
// Group the bridge surfaces by overlaps.
|
||||
auto group_id = [&bridges](uint32_t src_id) {
|
||||
uint32_t group_id = bridges[src_id].group_id;
|
||||
while (group_id != src_id) {
|
||||
src_id = group_id;
|
||||
group_id = bridges[src_id].group_id;
|
||||
}
|
||||
bridges[src_id].group_id = group_id;
|
||||
return group_id;
|
||||
};
|
||||
|
||||
{
|
||||
// Cache of bboxes per expansion boundary.
|
||||
std::vector<BoundingBox> bboxes;
|
||||
// Detect overlaps of bridge anchors inside their respective shell regions.
|
||||
// bridge_expansions are sorted by boundary id and source id.
|
||||
for (auto it = bridge_expansions.begin(); it != bridge_expansions.end();) {
|
||||
// For each boundary region:
|
||||
auto it_begin = it;
|
||||
auto it_end = std::next(it_begin);
|
||||
for (; it_end != bridge_expansions.end() && it_end->boundary_id == it_begin->boundary_id; ++ it_end) ;
|
||||
bboxes.clear();
|
||||
bboxes.reserve(it_end - it_begin);
|
||||
for (auto it2 = it_begin; it2 != it_end; ++ it2)
|
||||
bboxes.emplace_back(get_extents(it2->expolygon.contour));
|
||||
// For each bridge anchor of the current source:
|
||||
for (; it != it_end; ++ it) {
|
||||
// A grup id for this bridge.
|
||||
for (auto it2 = std::next(it); it2 != it_end; ++ it2)
|
||||
if (it->src_id != it2->src_id &&
|
||||
bboxes[it - it_begin].overlap(bboxes[it2 - it_begin]) &&
|
||||
// One may ignore holes, they are irrelevant for intersection test.
|
||||
! intersection(it->expolygon.contour, it2->expolygon.contour).empty()) {
|
||||
// The two bridge regions intersect. Give them the same (lower) group id.
|
||||
uint32_t id = group_id(it->src_id);
|
||||
uint32_t id2 = group_id(it2->src_id);
|
||||
if (id < id2)
|
||||
bridges[id2].group_id = id;
|
||||
else
|
||||
bridges[id].group_id = id2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Detect bridge directions.
|
||||
{
|
||||
std::sort(bridge_anchors.begin(), bridge_anchors.end(), Algorithm::lower_by_src_and_boundary);
|
||||
auto it_bridge_anchor = bridge_anchors.begin();
|
||||
Lines lines;
|
||||
Polygons anchor_areas;
|
||||
for (uint32_t bridge_id = 0; bridge_id < uint32_t(bridges.size()); ++ bridge_id) {
|
||||
Bridge &bridge = bridges[bridge_id];
|
||||
// lines.clear();
|
||||
anchor_areas.clear();
|
||||
int32_t last_anchor_id = -1;
|
||||
for (; it_bridge_anchor != bridge_anchors.end() && it_bridge_anchor->src == bridge_id; ++ it_bridge_anchor) {
|
||||
if (last_anchor_id != int(it_bridge_anchor->boundary)) {
|
||||
last_anchor_id = int(it_bridge_anchor->boundary);
|
||||
append(anchor_areas, to_polygons(last_anchor_id < int32_t(shells.size()) ? shells[last_anchor_id] : sparse[last_anchor_id - int32_t(shells.size())]));
|
||||
}
|
||||
// if (Points &polyline = it_bridge_anchor->path; polyline.size() >= 2) {
|
||||
// reserve_more_power_of_2(lines, polyline.size() - 1);
|
||||
// for (size_t i = 1; i < polyline.size(); ++ i)
|
||||
// lines.push_back({ polyline[i - 1], polyline[1] });
|
||||
// }
|
||||
}
|
||||
lines = to_lines(diff_pl(to_polylines(bridge.expolygon), expand(anchor_areas, float(SCALED_EPSILON))));
|
||||
auto [bridging_dir, unsupported_dist] = detect_bridging_direction(lines, to_polygons(bridge.expolygon));
|
||||
bridge.angle = M_PI + std::atan2(bridging_dir.y(), bridging_dir.x());
|
||||
#if 0
|
||||
coordf_t stroke_width = scale_(0.06);
|
||||
BoundingBox bbox = get_extents(anchor_areas);
|
||||
bbox.merge(get_extents(bridge.expolygon));
|
||||
bbox.offset(scale_(1.));
|
||||
::Slic3r::SVG
|
||||
svg(debug_out_path(("bridge" + std::to_string(bridge.angle) + "_" /* + std::to_string(this->layer()->bottom_z())*/).c_str()),
|
||||
bbox);
|
||||
svg.draw(bridge.expolygon, "cyan");
|
||||
svg.draw(lines, "green", stroke_width);
|
||||
svg.draw(anchor_areas, "red");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
// Merge the groups with the same group id, produce surfaces by merging source overhangs with their newly expanded anchors.
|
||||
Surfaces out;
|
||||
{
|
||||
Polygons acc;
|
||||
Surface templ{ stBottomBridge, {} };
|
||||
std::sort(bridge_expansions.begin(), bridge_expansions.end(), [](auto &l, auto &r) {
|
||||
return l.src_id < r.src_id || (l.src_id == r.src_id && l.boundary_id < r.boundary_id);
|
||||
});
|
||||
for (auto it = bridge_expansions.begin(); it != bridge_expansions.end(); ) {
|
||||
bridges[it->src_id].bridge_expansion_begin = it;
|
||||
uint32_t src_id = it->src_id;
|
||||
for (++ it; it != bridge_expansions.end() && it->src_id == src_id; ++ it) ;
|
||||
}
|
||||
for (uint32_t bridge_id = 0; bridge_id < uint32_t(bridges.size()); ++ bridge_id)
|
||||
if (group_id(bridge_id) == bridge_id) {
|
||||
// Head of the group.
|
||||
acc.clear();
|
||||
for (uint32_t bridge_id2 = bridge_id; bridge_id2 < uint32_t(bridges.size()); ++ bridge_id2)
|
||||
if (group_id(bridge_id2) == bridge_id) {
|
||||
append(acc, to_polygons(std::move(bridges[bridge_id2].expolygon)));
|
||||
auto it_bridge_expansion = bridges[bridge_id2].bridge_expansion_begin;
|
||||
assert(it_bridge_expansion == bridge_expansions.end() || it_bridge_expansion->src_id == bridge_id2);
|
||||
for (; it_bridge_expansion != bridge_expansions.end() && it_bridge_expansion->src_id == bridge_id2; ++ it_bridge_expansion)
|
||||
append(acc, to_polygons(std::move(it_bridge_expansion->expolygon)));
|
||||
}
|
||||
//FIXME try to be smart and pick the best bridging angle for all?
|
||||
templ.bridge_angle = bridges[bridge_id].angle;
|
||||
//NOTE: The current regularization of the shells can create small unasigned regions in the object (E.G. benchy)
|
||||
// without the following closing operation, those regions will stay unfilled and cause small holes in the expanded surface.
|
||||
// look for narrow_ensure_vertical_wall_thickness_region_radius filter.
|
||||
ExPolygons final = closing_ex(acc, closing_radius);
|
||||
// without safety offset, artifacts are generated (GH #2494)
|
||||
// union_safety_offset_ex(acc)
|
||||
for (ExPolygon &ex : final)
|
||||
out.emplace_back(templ, std::move(ex));
|
||||
}
|
||||
}
|
||||
|
||||
// Clip by the expanded bridges.
|
||||
if (expanded_into_shells)
|
||||
shells = diff_ex(shells, out);
|
||||
if (expanded_into_sparse)
|
||||
sparse = diff_ex(sparse, out);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Extract bridging surfaces from "surfaces", expand them into "shells" using expansion_params.
|
||||
// Trim "shells" by the expanded bridges.
|
||||
static Surfaces expand_merge_surfaces(
|
||||
Surfaces &surfaces,
|
||||
SurfaceType surface_type,
|
||||
ExPolygons &shells,
|
||||
const Algorithm::RegionExpansionParameters &expansion_params_into_solid_infill,
|
||||
ExPolygons &sparse,
|
||||
const Algorithm::RegionExpansionParameters &expansion_params_into_sparse_infill,
|
||||
const float closing_radius,
|
||||
const double bridge_angle = -1.)
|
||||
{
|
||||
using namespace Slic3r::Algorithm;
|
||||
|
||||
double thickness;
|
||||
ExPolygons src = fill_surfaces_extract_expolygons(surfaces, {surface_type}, thickness);
|
||||
if (src.empty())
|
||||
return {};
|
||||
|
||||
std::vector<RegionExpansion> expansions = propagate_waves(src, shells, expansion_params_into_solid_infill);
|
||||
bool expanded_into_shells = !expansions.empty();
|
||||
bool expanded_into_sparse = false;
|
||||
{
|
||||
std::vector<RegionExpansion> expansions2 = propagate_waves(src, sparse, expansion_params_into_sparse_infill);
|
||||
if (! expansions2.empty()) {
|
||||
expanded_into_sparse = true;
|
||||
for (RegionExpansion &expansion : expansions2)
|
||||
expansion.boundary_id += uint32_t(shells.size());
|
||||
append(expansions, std::move(expansions2));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<ExPolygon> expanded = merge_expansions_into_expolygons(std::move(src), std::move(expansions));
|
||||
//NOTE: The current regularization of the shells can create small unasigned regions in the object (E.G. benchy)
|
||||
// without the following closing operation, those regions will stay unfilled and cause small holes in the expanded surface.
|
||||
// look for narrow_ensure_vertical_wall_thickness_region_radius filter.
|
||||
expanded = closing_ex(expanded, closing_radius);
|
||||
// Trim the shells by the expanded expolygons.
|
||||
if (expanded_into_shells)
|
||||
shells = diff_ex(shells, expanded);
|
||||
if (expanded_into_sparse)
|
||||
sparse = diff_ex(sparse, expanded);
|
||||
|
||||
Surface templ{ surface_type, {} };
|
||||
templ.bridge_angle = bridge_angle;
|
||||
Surfaces out;
|
||||
out.reserve(expanded.size());
|
||||
for (auto &expoly : expanded)
|
||||
out.emplace_back(templ, std::move(expoly));
|
||||
return out;
|
||||
}
|
||||
|
||||
void LayerRegion::process_external_surfaces(const Layer *lower_layer, const Polygons *lower_layer_covered)
|
||||
{
|
||||
using namespace Slic3r::Algorithm;
|
||||
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
export_region_fill_surfaces_to_svg_debug("4_process_external_surfaces-initial");
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
|
||||
// Width of the perimeters.
|
||||
float shell_width = 0;
|
||||
float expansion_min = 0;
|
||||
if (int num_perimeters = this->region().config().wall_loops; num_perimeters > 0) {
|
||||
Flow external_perimeter_flow = this->flow(frExternalPerimeter);
|
||||
Flow perimeter_flow = this->flow(frPerimeter);
|
||||
shell_width = 0.5f * external_perimeter_flow.scaled_width() + external_perimeter_flow.scaled_spacing();
|
||||
shell_width += perimeter_flow.scaled_spacing() * (num_perimeters - 1);
|
||||
expansion_min = perimeter_flow.scaled_spacing();
|
||||
} else {
|
||||
// TODO: Maybe there is better solution when printing with zero perimeters, but this works reasonably well, given the situation
|
||||
shell_width = float(SCALED_EPSILON);
|
||||
expansion_min = float(SCALED_EPSILON);;
|
||||
}
|
||||
|
||||
// Scaled expansions of the respective external surfaces.
|
||||
float expansion_top = shell_width * sqrt(2.);
|
||||
float expansion_bottom = expansion_top;
|
||||
float expansion_bottom_bridge = expansion_top;
|
||||
// Expand by waves of expansion_step size (expansion_step is scaled), but with no more steps than max_nr_expansion_steps.
|
||||
static constexpr const float expansion_step = scaled<float>(0.1);
|
||||
// Don't take more than max_nr_steps for small expansion_step.
|
||||
static constexpr const size_t max_nr_expansion_steps = 5;
|
||||
// Radius (with added epsilon) to absorb empty regions emering from regularization of ensuring, viz const float narrow_ensure_vertical_wall_thickness_region_radius = 0.5f * 0.65f * min_perimeter_infill_spacing;
|
||||
const float closing_radius = 0.55f * 0.65f * 1.05f * this->flow(frSolidInfill).scaled_spacing();
|
||||
|
||||
// Expand the top / bottom / bridge surfaces into the shell thickness solid infills.
|
||||
double layer_thickness;
|
||||
ExPolygons shells = union_ex(fill_surfaces_extract_expolygons(this->fill_surfaces.surfaces, { stInternalSolid }, layer_thickness));
|
||||
ExPolygons sparse = union_ex(fill_surfaces_extract_expolygons(this->fill_surfaces.surfaces, { stInternal }, layer_thickness));
|
||||
|
||||
SurfaceCollection bridges;
|
||||
const auto expansion_params_into_sparse_infill = RegionExpansionParameters::build(expansion_min, expansion_step, max_nr_expansion_steps);
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << "Processing external surface, detecting bridges. layer" << this->layer()->print_z;
|
||||
const double custom_angle = this->region().config().bridge_angle.value;
|
||||
const auto expansion_params_into_solid_infill = RegionExpansionParameters::build(expansion_bottom_bridge, expansion_step, max_nr_expansion_steps);
|
||||
bridges.surfaces = custom_angle > 0 ?
|
||||
expand_merge_surfaces(this->fill_surfaces.surfaces, stBottomBridge, shells, expansion_params_into_solid_infill, sparse, expansion_params_into_sparse_infill, closing_radius, Geometry::deg2rad(custom_angle)) :
|
||||
expand_bridges_detect_orientations(this->fill_surfaces.surfaces, shells, expansion_params_into_solid_infill, sparse, expansion_params_into_sparse_infill, closing_radius);
|
||||
BOOST_LOG_TRIVIAL(trace) << "Processing external surface, detecting bridges - done";
|
||||
#if 0
|
||||
{
|
||||
static int iRun = 0;
|
||||
bridges.export_to_svg(debug_out_path("bridges-after-grouping-%d.svg", iRun++), true);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
Surfaces bottoms = expand_merge_surfaces(this->fill_surfaces.surfaces, stBottom, shells,
|
||||
RegionExpansionParameters::build(expansion_bottom, expansion_step, max_nr_expansion_steps),
|
||||
sparse, expansion_params_into_sparse_infill, closing_radius);
|
||||
Surfaces tops = expand_merge_surfaces(this->fill_surfaces.surfaces, stTop, shells,
|
||||
RegionExpansionParameters::build(expansion_top, expansion_step, max_nr_expansion_steps),
|
||||
sparse, expansion_params_into_sparse_infill, closing_radius);
|
||||
|
||||
// m_fill_surfaces.remove_types({ stBottomBridge, stBottom, stTop, stInternal, stInternalSolid });
|
||||
this->fill_surfaces.clear();
|
||||
reserve_more(this->fill_surfaces.surfaces, shells.size() + sparse.size() + bridges.size() + bottoms.size() + tops.size());
|
||||
{
|
||||
Surface solid_templ(stInternalSolid, {});
|
||||
solid_templ.thickness = layer_thickness;
|
||||
this->fill_surfaces.append(std::move(shells), solid_templ);
|
||||
}
|
||||
{
|
||||
Surface sparse_templ(stInternal, {});
|
||||
sparse_templ.thickness = layer_thickness;
|
||||
this->fill_surfaces.append(std::move(sparse), sparse_templ);
|
||||
}
|
||||
this->fill_surfaces.append(std::move(bridges.surfaces));
|
||||
this->fill_surfaces.append(std::move(bottoms));
|
||||
this->fill_surfaces.append(std::move(tops));
|
||||
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
export_region_fill_surfaces_to_svg_debug("4_process_external_surfaces-final");
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
}
|
||||
#else
|
||||
|
||||
//#define EXTERNAL_SURFACES_OFFSET_PARAMETERS ClipperLib::jtMiter, 3.
|
||||
//#define EXTERNAL_SURFACES_OFFSET_PARAMETERS ClipperLib::jtMiter, 1.5
|
||||
#define EXTERNAL_SURFACES_OFFSET_PARAMETERS ClipperLib::jtSquare, 0.
|
||||
|
@ -410,6 +761,7 @@ void LayerRegion::process_external_surfaces(const Layer *lower_layer, const Poly
|
|||
export_region_fill_surfaces_to_svg_debug("3_process_external_surfaces-final");
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
}
|
||||
#endif
|
||||
|
||||
void LayerRegion::prepare_fill_surfaces()
|
||||
{
|
||||
|
|
|
@ -1,3 +1,10 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Pavel Mikuš @Godrak, Lukáš Hejl @hejllukas, Lukáš Matěna @lukasmatena
|
||||
///|/ Copyright (c) SuperSlicer 2023 Remi Durand @supermerill
|
||||
///|/ Copyright (c) 2021 Ilya @xorza
|
||||
///|/ Copyright (c) Slic3r 2015 - 2016 Alessandro Ranellucci @alranel
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include "PerimeterGenerator.hpp"
|
||||
#include "AABBTreeLines.hpp"
|
||||
#include "BridgeDetector.hpp"
|
||||
|
@ -419,10 +426,12 @@ static ClipperLib_Z::Paths clip_extrusion(const ClipperLib_Z::Path& subject, con
|
|||
clipper.AddPath(subject, ClipperLib_Z::ptSubject, false);
|
||||
clipper.AddPaths(clip, ClipperLib_Z::ptClip, true);
|
||||
|
||||
ClipperLib_Z::PolyTree clipped_polytree;
|
||||
ClipperLib_Z::Paths clipped_paths;
|
||||
clipper.Execute(clipType, clipped_polytree, ClipperLib_Z::pftNonZero, ClipperLib_Z::pftNonZero);
|
||||
ClipperLib_Z::PolyTreeToPaths(clipped_polytree, clipped_paths);
|
||||
{
|
||||
ClipperLib_Z::PolyTree clipped_polytree;
|
||||
clipper.Execute(clipType, clipped_polytree, ClipperLib_Z::pftNonZero, ClipperLib_Z::pftNonZero);
|
||||
ClipperLib_Z::PolyTreeToPaths(std::move(clipped_polytree), clipped_paths);
|
||||
}
|
||||
|
||||
// Clipped path could contain vertices from the clip with a Z coordinate equal to zero.
|
||||
// For those vertices, we must assign value based on the subject.
|
||||
|
@ -1774,29 +1783,7 @@ void PerimeterGenerator::process_arachne()
|
|||
config->precise_outer_wall ? -float(ext_perimeter_width / 2. - bead_width_0 / 2.)
|
||||
: -float(ext_perimeter_width / 2. - ext_perimeter_spacing / 2.));
|
||||
|
||||
double min_nozzle_diameter = *std::min_element(print_config->nozzle_diameter.values.begin(), print_config->nozzle_diameter.values.end());
|
||||
Arachne::WallToolPathsParams input_params;
|
||||
{
|
||||
if (const auto& min_feature_size_opt = object_config->min_feature_size)
|
||||
input_params.min_feature_size = min_feature_size_opt.value * 0.01 * min_nozzle_diameter;
|
||||
|
||||
if (this->layer_id == 0) {
|
||||
if (const auto& initial_layer_min_bead_width_opt = object_config->initial_layer_min_bead_width)
|
||||
input_params.min_bead_width = initial_layer_min_bead_width_opt.value * 0.01 * min_nozzle_diameter;
|
||||
} else {
|
||||
if (const auto& min_bead_width_opt = object_config->min_bead_width)
|
||||
input_params.min_bead_width = min_bead_width_opt.value * 0.01 * min_nozzle_diameter;
|
||||
}
|
||||
|
||||
if (const auto& wall_transition_filter_deviation_opt = object_config->wall_transition_filter_deviation)
|
||||
input_params.wall_transition_filter_deviation = wall_transition_filter_deviation_opt.value * 0.01 * min_nozzle_diameter;
|
||||
|
||||
if (const auto& wall_transition_length_opt = object_config->wall_transition_length)
|
||||
input_params.wall_transition_length = wall_transition_length_opt.value * 0.01 * min_nozzle_diameter;
|
||||
|
||||
input_params.wall_transition_angle = this->object_config->wall_transition_angle.value;
|
||||
input_params.wall_distribution_count = this->object_config->wall_distribution_count.value;
|
||||
}
|
||||
Arachne::WallToolPathsParams input_params = Arachne::make_paths_params(this->layer_id, *object_config, *print_config);
|
||||
coord_t wall_0_inset = 0;
|
||||
//if (config->precise_outer_wall)
|
||||
// wall_0_inset = 0.5 * (ext_perimeter_width + this->perimeter_flow.scaled_width() - ext_perimeter_spacing -
|
||||
|
|
|
@ -1,3 +1,15 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Pavel Mikuš @Godrak, Filip Sykala @Jony01, Lukáš Hejl @hejllukas, Enrico Turri @enricoturri1966, Lukáš Matěna @lukasmatena, Tomáš Mészáros @tamasmeszaros
|
||||
///|/ Copyright (c) Slic3r 2013 - 2016 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2014 Petr Ledvina @ledvinap
|
||||
///|/ Copyright (c) 2014 Kamil Kwolek
|
||||
///|/ Copyright (c) 2013 Jose Luis Perez Diez
|
||||
///|/
|
||||
///|/ ported from lib/Slic3r/Point.pm:
|
||||
///|/ Copyright (c) Prusa Research 2018 Vojtěch Bubník @bubnikv
|
||||
///|/ Copyright (c) Slic3r 2011 - 2015 Alessandro Ranellucci @alranel
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include "Point.hpp"
|
||||
#include "Line.hpp"
|
||||
#include "MultiPoint.hpp"
|
||||
|
@ -188,18 +200,28 @@ bool has_duplicate_points(std::vector<Point> &&pts)
|
|||
return false;
|
||||
}
|
||||
|
||||
template<bool IncludeBoundary>
|
||||
BoundingBox get_extents(const Points &pts)
|
||||
{
|
||||
return BoundingBox(pts);
|
||||
BoundingBox out;
|
||||
BoundingBox::construct<IncludeBoundary>(out, pts.begin(), pts.end());
|
||||
return out;
|
||||
}
|
||||
template BoundingBox get_extents<false>(const Points &pts);
|
||||
template BoundingBox get_extents<true>(const Points &pts);
|
||||
|
||||
BoundingBox get_extents(const std::vector<Points> &pts)
|
||||
// if IncludeBoundary, then a bounding box is defined even for a single point.
|
||||
// otherwise a bounding box is only defined if it has a positive area.
|
||||
template<bool IncludeBoundary>
|
||||
BoundingBox get_extents(const VecOfPoints &pts)
|
||||
{
|
||||
BoundingBox bbox;
|
||||
for (const Points &p : pts)
|
||||
bbox.merge(get_extents(p));
|
||||
bbox.merge(get_extents<IncludeBoundary>(p));
|
||||
return bbox;
|
||||
}
|
||||
template BoundingBox get_extents<false>(const VecOfPoints &pts);
|
||||
template BoundingBox get_extents<true>(const VecOfPoints &pts);
|
||||
|
||||
BoundingBoxf get_extents(const std::vector<Vec2d> &pts)
|
||||
{
|
||||
|
|
|
@ -1,3 +1,14 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Pavel Mikuš @Godrak, Enrico Turri @enricoturri1966, Lukáš Matěna @lukasmatena, Lukáš Hejl @hejllukas, Filip Sykala @Jony01, Tomáš Mészáros @tamasmeszaros, Vojtěch Král @vojtechkral
|
||||
///|/ Copyright (c) SuperSlicer 2019 Remi Durand @supermerill
|
||||
///|/ Copyright (c) Slic3r 2013 - 2016 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2016 Mark Walker
|
||||
///|/
|
||||
///|/ ported from lib/Slic3r/Point.pm:
|
||||
///|/ Copyright (c) Prusa Research 2018 Vojtěch Bubník @bubnikv
|
||||
///|/ Copyright (c) Slic3r 2011 - 2015 Alessandro Ranellucci @alranel
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#ifndef slic3r_Point_hpp_
|
||||
#define slic3r_Point_hpp_
|
||||
|
||||
|
@ -55,6 +66,8 @@ using Pointfs = std::vector<Vec2d>;
|
|||
using Vec2ds = std::vector<Vec2d>;
|
||||
using Pointf3s = std::vector<Vec3d>;
|
||||
|
||||
using VecOfPoints = std::vector<Points>;
|
||||
|
||||
using Matrix2f = Eigen::Matrix<float, 2, 2, Eigen::DontAlign>;
|
||||
using Matrix2d = Eigen::Matrix<double, 2, 2, Eigen::DontAlign>;
|
||||
using Matrix3f = Eigen::Matrix<float, 3, 3, Eigen::DontAlign>;
|
||||
|
@ -265,8 +278,20 @@ inline Point lerp(const Point &a, const Point &b, double t)
|
|||
return ((1. - t) * a.cast<double>() + t * b.cast<double>()).cast<coord_t>();
|
||||
}
|
||||
|
||||
// if IncludeBoundary, then a bounding box is defined even for a single point.
|
||||
// otherwise a bounding box is only defined if it has a positive area.
|
||||
template<bool IncludeBoundary = false>
|
||||
BoundingBox get_extents(const Points &pts);
|
||||
BoundingBox get_extents(const std::vector<Points> &pts);
|
||||
extern template BoundingBox get_extents<false>(const Points &pts);
|
||||
extern template BoundingBox get_extents<true>(const Points &pts);
|
||||
|
||||
// if IncludeBoundary, then a bounding box is defined even for a single point.
|
||||
// otherwise a bounding box is only defined if it has a positive area.
|
||||
template<bool IncludeBoundary = false>
|
||||
BoundingBox get_extents(const VecOfPoints &pts);
|
||||
extern template BoundingBox get_extents<false>(const VecOfPoints &pts);
|
||||
extern template BoundingBox get_extents<true>(const VecOfPoints &pts);
|
||||
|
||||
BoundingBoxf get_extents(const std::vector<Vec2d> &pts);
|
||||
|
||||
// Test for duplicate points in a vector of points.
|
||||
|
|
|
@ -1,3 +1,15 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Lukáš Hejl @hejllukas, Enrico Turri @enricoturri1966
|
||||
///|/ Copyright (c) Slic3r 2013 - 2016 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2015 Maksim Derbasov @ntfshard
|
||||
///|/ Copyright (c) 2014 Petr Ledvina @ledvinap
|
||||
///|/
|
||||
///|/ ported from lib/Slic3r/Polyline.pm:
|
||||
///|/ Copyright (c) Prusa Research 2018 Vojtěch Bubník @bubnikv
|
||||
///|/ Copyright (c) Slic3r 2011 - 2014 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2012 Mark Hindess
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include "BoundingBox.hpp"
|
||||
#include "Polyline.hpp"
|
||||
#include "Exception.hpp"
|
||||
|
@ -549,6 +561,19 @@ ThickLines ThickPolyline::thicklines() const
|
|||
return lines;
|
||||
}
|
||||
|
||||
void ThickPolyline::start_at_index(int index)
|
||||
{
|
||||
assert(index >= 0 && index < this->points.size());
|
||||
assert(this->points.front() == this->points.back() && this->width.front() == this->width.back());
|
||||
if (index != 0 && index + 1 != int(this->points.size()) && this->points.front() == this->points.back() && this->width.front() == this->width.back()) {
|
||||
this->points.pop_back();
|
||||
assert(this->points.size() * 2 == this->width.size());
|
||||
std::rotate(this->points.begin(), this->points.begin() + index, this->points.end());
|
||||
std::rotate(this->width.begin(), this->width.begin() + 2 * index, this->width.end());
|
||||
this->points.emplace_back(this->points.front());
|
||||
}
|
||||
}
|
||||
|
||||
Lines3 Polyline3::lines() const
|
||||
{
|
||||
Lines3 lines;
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Tomáš Mészáros @tamasmeszaros, Pavel Mikuš @Godrak, Vojtěch Bubník @bubnikv, Lukáš Hejl @hejllukas, Lukáš Matěna @lukasmatena, Oleksandra Iushchenko @YuSanka, Enrico Turri @enricoturri1966
|
||||
///|/ Copyright (c) Slic3r 2013 - 2016 Alessandro Ranellucci @alranel
|
||||
///|/
|
||||
///|/ ported from lib/Slic3r/Polyline.pm:
|
||||
///|/ Copyright (c) Prusa Research 2018 Vojtěch Bubník @bubnikv
|
||||
///|/ Copyright (c) Slic3r 2011 - 2014 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2012 Mark Hindess
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#ifndef slic3r_Polyline_hpp_
|
||||
#define slic3r_Polyline_hpp_
|
||||
|
||||
|
@ -221,6 +231,25 @@ inline void polylines_append(Polylines &dst, Polylines &&src)
|
|||
}
|
||||
}
|
||||
|
||||
// Merge polylines at their respective end points.
|
||||
// dst_first: the merge point is at dst.begin() or dst.end()?
|
||||
// src_first: the merge point is at src.begin() or src.end()?
|
||||
// The orientation of the resulting polyline is unknown, the output polyline may start
|
||||
// either with src piece or dst piece.
|
||||
template<typename PointsType>
|
||||
inline void polylines_merge(PointsType &dst, bool dst_first, PointsType &&src, bool src_first)
|
||||
{
|
||||
if (dst_first) {
|
||||
if (src_first)
|
||||
std::reverse(dst.begin(), dst.end());
|
||||
else
|
||||
std::swap(dst, src);
|
||||
} else if (! src_first)
|
||||
std::reverse(src.begin(), src.end());
|
||||
// Merge src into dst.
|
||||
append(dst, std::move(src));
|
||||
}
|
||||
|
||||
const Point& leftmost_point(const Polylines &polylines);
|
||||
|
||||
bool remove_degenerate(Polylines &polylines);
|
||||
|
@ -242,6 +271,11 @@ public:
|
|||
width.clear();
|
||||
}
|
||||
|
||||
// Make this closed ThickPolyline starting in the specified index.
|
||||
// Be aware that this method can be applicable just for closed ThickPolyline.
|
||||
// On open ThickPolyline make no effect.
|
||||
void start_at_index(int index);
|
||||
|
||||
std::vector<coordf_t> width;
|
||||
std::pair<bool,bool> endpoints;
|
||||
};
|
||||
|
|
|
@ -1,3 +1,11 @@
|
|||
///|/ Copyright (c) Prusa Research 2017 - 2023 Oleksandra Iushchenko @YuSanka, Lukáš Matěna @lukasmatena, Tomáš Mészáros @tamasmeszaros, Lukáš Hejl @hejllukas, Vojtěch Bubník @bubnikv, Pavel Mikuš @Godrak, David Kocík @kocikdav, Enrico Turri @enricoturri1966, Vojtěch Král @vojtechkral
|
||||
///|/ Copyright (c) 2021 Martin Budden
|
||||
///|/ Copyright (c) 2021 Ilya @xorza
|
||||
///|/ Copyright (c) 2019 John Drake @foxox
|
||||
///|/ Copyright (c) 2018 Martin Loidl @LoidlM
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include <cassert>
|
||||
|
||||
#include "Config.hpp"
|
||||
|
@ -718,7 +726,7 @@ bool Preset::has_cali_lines(PresetBundle* preset_bundle)
|
|||
static std::vector<std::string> s_Preset_print_options {
|
||||
"layer_height", "initial_layer_print_height", "wall_loops", "slice_closing_radius", "spiral_mode", "slicing_mode",
|
||||
"top_shell_layers", "top_shell_thickness", "bottom_shell_layers", "bottom_shell_thickness",
|
||||
"extra_perimeters_on_overhangs", "ensure_vertical_shell_thickness", "reduce_crossing_wall", "detect_thin_wall", "detect_overhang_wall",
|
||||
"extra_perimeters_on_overhangs", "reduce_crossing_wall", "detect_thin_wall", "detect_overhang_wall",
|
||||
"seam_position", "staggered_inner_seams", "wall_infill_order", "sparse_infill_density", "sparse_infill_pattern", "top_surface_pattern", "bottom_surface_pattern",
|
||||
"infill_direction",
|
||||
"minimum_sparse_infill_area", "reduce_infill_retraction","internal_solid_infill_pattern",
|
||||
|
@ -748,7 +756,6 @@ static std::vector<std::string> s_Preset_print_options {
|
|||
"flush_into_infill", "flush_into_objects", "flush_into_support",
|
||||
"tree_support_branch_angle", "tree_support_angle_slow", "tree_support_wall_count", "tree_support_top_rate", "tree_support_branch_distance", "tree_support_tip_diameter",
|
||||
"tree_support_branch_diameter", "tree_support_branch_diameter_angle", "tree_support_branch_diameter_double_wall",
|
||||
"detect_narrow_internal_solid_infill",
|
||||
"gcode_add_line_number", "enable_arc_fitting", "infill_combination", /*"adaptive_layer_height",*/
|
||||
"support_bottom_interface_spacing", "enable_overhang_speed", "slowdown_for_curled_perimeters", "overhang_1_4_speed", "overhang_2_4_speed", "overhang_3_4_speed", "overhang_4_4_speed",
|
||||
"initial_layer_infill_speed", "only_one_wall_top",
|
||||
|
|
|
@ -1,3 +1,24 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Lukáš Matěna @lukasmatena, Lukáš Hejl @hejllukas, Tomáš Mészáros @tamasmeszaros, Oleksandra Iushchenko @YuSanka, Pavel Mikuš @Godrak, David Kocík @kocikdav, Enrico Turri @enricoturri1966, Filip Sykala @Jony01, Vojtěch Král @vojtechkral
|
||||
///|/ Copyright (c) 2023 Pedro Lamas @PedroLamas
|
||||
///|/ Copyright (c) 2023 Mimoja @Mimoja
|
||||
///|/ Copyright (c) 2020 - 2021 Sergey Kovalev @RandoMan70
|
||||
///|/ Copyright (c) 2021 Niall Sheridan @nsheridan
|
||||
///|/ Copyright (c) 2021 Martin Budden
|
||||
///|/ Copyright (c) 2021 Ilya @xorza
|
||||
///|/ Copyright (c) 2020 Paul Arden @ardenpm
|
||||
///|/ Copyright (c) 2020 rongith
|
||||
///|/ Copyright (c) 2019 Spencer Owen @spuder
|
||||
///|/ Copyright (c) 2019 Stephan Reichhelm @stephanr
|
||||
///|/ Copyright (c) 2018 Martin Loidl @LoidlM
|
||||
///|/ Copyright (c) SuperSlicer 2018 Remi Durand @supermerill
|
||||
///|/ Copyright (c) 2016 - 2017 Joseph Lenox @lordofhyphens
|
||||
///|/ Copyright (c) Slic3r 2013 - 2016 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2016 Vanessa Ezekowitz @VanessaE
|
||||
///|/ Copyright (c) 2015 Alexander Rössler @machinekoder
|
||||
///|/ Copyright (c) 2014 Petr Ledvina @ledvinap
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#include "PrintConfig.hpp"
|
||||
#include "Config.hpp"
|
||||
#include "I18N.hpp"
|
||||
|
@ -1141,14 +1162,6 @@ void PrintConfigDef::init_fff_params()
|
|||
def->mode = comAdvanced;
|
||||
def->set_default_value(new ConfigOptionStrings { " " });
|
||||
|
||||
def = this->add("ensure_vertical_shell_thickness", coBool);
|
||||
def->label = L("Ensure vertical shell thickness");
|
||||
def->category = L("Strength");
|
||||
def->tooltip = L("Add solid infill near sloping surfaces to guarantee the vertical shell thickness "
|
||||
"(top+bottom solid layers)");
|
||||
def->mode = comAdvanced;
|
||||
def->set_default_value(new ConfigOptionBool(true));
|
||||
|
||||
auto def_top_fill_pattern = def = this->add("top_surface_pattern", coEnum);
|
||||
def->label = L("Top surface pattern");
|
||||
def->category = L("Strength");
|
||||
|
@ -4346,15 +4359,6 @@ def = this->add("filament_loading_speed", coFloats);
|
|||
default: assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
def = this->add("detect_narrow_internal_solid_infill", coBool);
|
||||
def->label = L("Detect narrow internal solid infill");
|
||||
def->category = L("Strength");
|
||||
def->tooltip = L("This option will auto detect narrow internal solid infill area."
|
||||
" If enabled, concentric pattern will be used for the area to speed printing up."
|
||||
" Otherwise, rectilinear pattern is used defaultly.");
|
||||
def->mode = comAdvanced;
|
||||
def->set_default_value(new ConfigOptionBool(true));
|
||||
}
|
||||
|
||||
void PrintConfigDef::init_extruder_option_keys()
|
||||
|
@ -5155,7 +5159,8 @@ void PrintConfigDef::handle_legacy(t_config_option_key &opt_key, std::string &va
|
|||
"remove_freq_sweep", "remove_bed_leveling", "remove_extrusion_calibration",
|
||||
"support_transition_line_width", "support_transition_speed", "bed_temperature", "bed_temperature_initial_layer",
|
||||
"can_switch_nozzle_type", "can_add_auxiliary_fan", "extra_flush_volume", "spaghetti_detector", "adaptive_layer_height",
|
||||
"z_hop_type", "z_lift_type", "bed_temperature_difference"
|
||||
"z_hop_type", "z_lift_type", "bed_temperature_difference",
|
||||
"detect_narrow_internal_solid_infill", "ensure_vertical_shell_thickness",
|
||||
};
|
||||
|
||||
if (ignore.find(opt_key) != ignore.end()) {
|
||||
|
|
|
@ -1,3 +1,20 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Vojtěch Bubník @bubnikv, Lukáš Matěna @lukasmatena, Lukáš Hejl @hejllukas, Tomáš Mészáros @tamasmeszaros, Pavel Mikuš @Godrak, David Kocík @kocikdav, Oleksandra Iushchenko @YuSanka, Vojtěch Král @vojtechkral, Enrico Turri @enricoturri1966
|
||||
///|/ Copyright (c) 2023 Pedro Lamas @PedroLamas
|
||||
///|/ Copyright (c) 2020 Sergey Kovalev @RandoMan70
|
||||
///|/ Copyright (c) 2021 Martin Budden
|
||||
///|/ Copyright (c) 2021 Ilya @xorza
|
||||
///|/ Copyright (c) 2020 Paul Arden @ardenpm
|
||||
///|/ Copyright (c) 2019 Spencer Owen @spuder
|
||||
///|/ Copyright (c) 2019 Stephan Reichhelm @stephanr
|
||||
///|/ Copyright (c) 2018 Martin Loidl @LoidlM
|
||||
///|/ Copyright (c) SuperSlicer 2018 Remi Durand @supermerill
|
||||
///|/ Copyright (c) 2016 - 2017 Joseph Lenox @lordofhyphens
|
||||
///|/ Copyright (c) Slic3r 2013 - 2015 Alessandro Ranellucci @alranel
|
||||
///|/ Copyright (c) 2015 Maksim Derbasov @ntfshard
|
||||
///|/ Copyright (c) 2015 Alexander Rössler @machinekoder
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
// Configuration store of Slic3r.
|
||||
//
|
||||
// The configuration store is either static or dynamic.
|
||||
|
@ -53,7 +70,7 @@ enum InfillPattern : int {
|
|||
ipConcentric, ipRectilinear, ipGrid, ipLine, ipCubic, ipTriangles, ipStars, ipGyroid, ipHoneycomb, ipAdaptiveCubic, ipMonotonic, ipMonotonicLine, ipAlignedRectilinear, ip3DHoneycomb,
|
||||
ipHilbertCurve, ipArchimedeanChords, ipOctagramSpiral, ipSupportCubic, ipSupportBase, ipConcentricInternal,
|
||||
ipLightning,
|
||||
ipCount,
|
||||
ipCount,
|
||||
};
|
||||
|
||||
enum class IroningType {
|
||||
|
@ -723,7 +740,6 @@ PRINT_CONFIG_CLASS_DEFINE(
|
|||
((ConfigOptionBool, tree_support_adaptive_layer_height))
|
||||
((ConfigOptionBool, tree_support_auto_brim))
|
||||
((ConfigOptionFloat, tree_support_brim_width))
|
||||
((ConfigOptionBool, detect_narrow_internal_solid_infill))
|
||||
// ((ConfigOptionBool, adaptive_layer_height))
|
||||
((ConfigOptionFloat, support_bottom_interface_spacing))
|
||||
((ConfigOptionEnum<PerimeterGeneratorType>, wall_generator))
|
||||
|
@ -755,7 +771,6 @@ PRINT_CONFIG_CLASS_DEFINE(
|
|||
((ConfigOptionFloat, bridge_flow))
|
||||
((ConfigOptionFloat, bridge_speed))
|
||||
((ConfigOptionFloatOrPercent, internal_bridge_speed))
|
||||
((ConfigOptionBool, ensure_vertical_shell_thickness))
|
||||
((ConfigOptionEnum<InfillPattern>, top_surface_pattern))
|
||||
((ConfigOptionEnum<InfillPattern>, bottom_surface_pattern))
|
||||
((ConfigOptionEnum<InfillPattern>, internal_solid_infill_pattern))
|
||||
|
|
|
@ -434,6 +434,22 @@ void PrintObject::prepare_infill()
|
|||
m_print->throw_if_canceled();
|
||||
}
|
||||
|
||||
|
||||
// Add solid fills to ensure the shell vertical thickness.
|
||||
this->discover_vertical_shells();
|
||||
m_print->throw_if_canceled();
|
||||
|
||||
// Debugging output.
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
for (size_t region_id = 0; region_id < this->num_printing_regions(); ++ region_id) {
|
||||
for (const Layer *layer : m_layers) {
|
||||
LayerRegion *layerm = layer->m_regions[region_id];
|
||||
layerm->export_region_slices_to_svg_debug("3_discover_vertical_shells-final");
|
||||
layerm->export_region_fill_surfaces_to_svg_debug("3_discover_vertical_shells-final");
|
||||
} // for each layer
|
||||
} // for each region
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
|
||||
// this will detect bridges and reverse bridges
|
||||
// and rearrange top/bottom/internal surfaces
|
||||
// It produces enlarged overlapping bridging areas.
|
||||
|
@ -446,17 +462,13 @@ void PrintObject::prepare_infill()
|
|||
this->process_external_surfaces();
|
||||
m_print->throw_if_canceled();
|
||||
|
||||
// Add solid fills to ensure the shell vertical thickness.
|
||||
this->discover_vertical_shells();
|
||||
m_print->throw_if_canceled();
|
||||
|
||||
// Debugging output.
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
for (size_t region_id = 0; region_id < this->num_printing_regions(); ++ region_id) {
|
||||
for (const Layer *layer : m_layers) {
|
||||
LayerRegion *layerm = layer->m_regions[region_id];
|
||||
layerm->export_region_slices_to_svg_debug("6_discover_vertical_shells-final");
|
||||
layerm->export_region_fill_surfaces_to_svg_debug("6_discover_vertical_shells-final");
|
||||
layerm->export_region_slices_to_svg_debug("3_process_external_surfaces-final");
|
||||
layerm->export_region_fill_surfaces_to_svg_debug("3_process_external_surfaces-final");
|
||||
} // for each layer
|
||||
} // for each region
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
|
@ -1045,7 +1057,6 @@ bool PrintObject::invalidate_state_by_config_options(
|
|||
|| opt_key == "solid_infill_filament"
|
||||
|| opt_key == "sparse_infill_line_width"
|
||||
|| opt_key == "infill_direction"
|
||||
|| opt_key == "ensure_vertical_shell_thickness"
|
||||
|| opt_key == "bridge_angle"
|
||||
//BBS
|
||||
|| opt_key == "bridge_density") {
|
||||
|
@ -1419,7 +1430,7 @@ void PrintObject::process_external_surfaces()
|
|||
if (has_voids && m_layers.size() > 1) {
|
||||
// All but stInternal fill surfaces will get expanded and possibly trimmed.
|
||||
std::vector<unsigned char> layer_expansions_and_voids(m_layers.size(), false);
|
||||
for (size_t layer_idx = 0; layer_idx < m_layers.size(); ++ layer_idx) {
|
||||
for (size_t layer_idx = 1; layer_idx < m_layers.size(); ++ layer_idx) {
|
||||
const Layer *layer = m_layers[layer_idx];
|
||||
bool expansions = false;
|
||||
bool voids = false;
|
||||
|
@ -1445,6 +1456,8 @@ void PrintObject::process_external_surfaces()
|
|||
[this, &surfaces_covered, &layer_expansions_and_voids, unsupported_width](const tbb::blocked_range<size_t>& range) {
|
||||
for (size_t layer_idx = range.begin(); layer_idx < range.end(); ++ layer_idx)
|
||||
if (layer_expansions_and_voids[layer_idx + 1]) {
|
||||
// Layer above is partially filled with solid infill (top, bottom, bridging...),
|
||||
// while some sparse inill regions are empty (0% infill).
|
||||
m_print->throw_if_canceled();
|
||||
Polygons voids;
|
||||
for (const LayerRegion *layerm : m_layers[layer_idx]->regions()) {
|
||||
|
@ -1470,7 +1483,9 @@ void PrintObject::process_external_surfaces()
|
|||
m_print->throw_if_canceled();
|
||||
// BOOST_LOG_TRIVIAL(trace) << "Processing external surface, layer" << m_layers[layer_idx]->print_z;
|
||||
m_layers[layer_idx]->get_region(int(region_id))->process_external_surfaces(
|
||||
// lower layer
|
||||
(layer_idx == 0) ? nullptr : m_layers[layer_idx - 1],
|
||||
// lower layer polygons with density > 0%
|
||||
(layer_idx == 0 || surfaces_covered.empty() || surfaces_covered[layer_idx - 1].empty()) ? nullptr : &surfaces_covered[layer_idx - 1]);
|
||||
}
|
||||
}
|
||||
|
@ -1495,36 +1510,14 @@ void PrintObject::discover_vertical_shells()
|
|||
};
|
||||
bool spiral_mode = this->print()->config().spiral_mode.value;
|
||||
size_t num_layers = spiral_mode ? std::min(size_t(this->printing_region(0).config().bottom_shell_layers), m_layers.size()) : m_layers.size();
|
||||
coordf_t min_layer_height = this->slicing_parameters().min_layer_height;
|
||||
// Does this region possibly produce more than 1 top or bottom layer?
|
||||
auto has_extra_layers_fn = [min_layer_height](const PrintRegionConfig &config) {
|
||||
auto num_extra_layers = [min_layer_height](int num_solid_layers, coordf_t min_shell_thickness) {
|
||||
if (num_solid_layers == 0)
|
||||
return 0;
|
||||
int n = num_solid_layers - 1;
|
||||
int n2 = int(ceil(min_shell_thickness / min_layer_height));
|
||||
return std::max(n, n2 - 1);
|
||||
};
|
||||
return num_extra_layers(config.top_shell_layers, config.top_shell_thickness) +
|
||||
num_extra_layers(config.bottom_shell_layers, config.bottom_shell_thickness) > 0;
|
||||
};
|
||||
std::vector<DiscoverVerticalShellsCacheEntry> cache_top_botom_regions(num_layers, DiscoverVerticalShellsCacheEntry());
|
||||
bool top_bottom_surfaces_all_regions = this->num_printing_regions() > 1 && ! m_config.interface_shells.value;
|
||||
// static constexpr const float top_bottom_expansion_coeff = 1.05f;
|
||||
// Just a tiny fraction of an infill extrusion width to merge neighbor regions reliably.
|
||||
static constexpr const float top_bottom_expansion_coeff = 0.05f;
|
||||
if (top_bottom_surfaces_all_regions) {
|
||||
// This is a multi-material print and interface_shells are disabled, meaning that the vertical shell thickness
|
||||
// is calculated over all materials.
|
||||
// Is the "ensure vertical wall thickness" applicable to any region?
|
||||
bool has_extra_layers = false;
|
||||
for (size_t region_id = 0; region_id < this->num_printing_regions(); ++region_id) {
|
||||
const PrintRegionConfig &config = this->printing_region(region_id).config();
|
||||
if (config.ensure_vertical_shell_thickness.value && has_extra_layers_fn(config)) {
|
||||
has_extra_layers = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (! has_extra_layers)
|
||||
// The "ensure vertical wall thickness" feature is not applicable to any of the regions. Quit.
|
||||
return;
|
||||
BOOST_LOG_TRIVIAL(debug) << "Discovering vertical shells in parallel - start : cache top / bottom";
|
||||
//FIXME Improve the heuristics for a grain size.
|
||||
size_t grain_size = std::max(num_layers / 16, size_t(1));
|
||||
|
@ -1545,14 +1538,14 @@ void PrintObject::discover_vertical_shells()
|
|||
++ debug_idx;
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
for (size_t region_id = 0; region_id < num_regions; ++ region_id) {
|
||||
LayerRegion &layerm = *layer.m_regions[region_id];
|
||||
float min_perimeter_infill_spacing = float(layerm.flow(frSolidInfill).scaled_spacing()) * 1.05f;
|
||||
LayerRegion &layerm = *layer.m_regions[region_id];
|
||||
float top_bottom_expansion = float(layerm.flow(frSolidInfill).scaled_spacing()) * top_bottom_expansion_coeff;
|
||||
// Top surfaces.
|
||||
append(cache.top_surfaces, offset(layerm.slices.filter_by_type(stTop), min_perimeter_infill_spacing));
|
||||
append(cache.top_surfaces, offset(layerm.fill_surfaces.filter_by_type(stTop), min_perimeter_infill_spacing));
|
||||
append(cache.top_surfaces, offset(layerm.slices.filter_by_type(stTop), top_bottom_expansion));
|
||||
// append(cache.top_surfaces, offset(layerm.fill_surfaces.filter_by_type(stTop), top_bottom_expansion));
|
||||
// Bottom surfaces.
|
||||
append(cache.bottom_surfaces, offset(layerm.slices.filter_by_types(surfaces_bottom), min_perimeter_infill_spacing));
|
||||
append(cache.bottom_surfaces, offset(layerm.fill_surfaces.filter_by_types(surfaces_bottom), min_perimeter_infill_spacing));
|
||||
append(cache.bottom_surfaces, offset(layerm.slices.filter_by_types(surfaces_bottom), top_bottom_expansion));
|
||||
// append(cache.bottom_surfaces, offset(layerm.fill_surfaces.filter_by_types(surfaces_bottom), top_bottom_expansion));
|
||||
// Calculate the maximum perimeter offset as if the slice was extruded with a single extruder only.
|
||||
// First find the maxium number of perimeters per region slice.
|
||||
unsigned int perimeters = 0;
|
||||
|
@ -1594,16 +1587,6 @@ void PrintObject::discover_vertical_shells()
|
|||
}
|
||||
|
||||
for (size_t region_id = 0; region_id < this->num_printing_regions(); ++ region_id) {
|
||||
PROFILE_BLOCK(discover_vertical_shells_region);
|
||||
|
||||
const PrintRegion ®ion = this->printing_region(region_id);
|
||||
if (! region.config().ensure_vertical_shell_thickness.value)
|
||||
// This region will be handled by discover_horizontal_shells().
|
||||
continue;
|
||||
if (! has_extra_layers_fn(region.config()))
|
||||
// Zero or 1 layer, there is no additional vertical wall thickness enforced.
|
||||
continue;
|
||||
|
||||
//FIXME Improve the heuristics for a grain size.
|
||||
size_t grain_size = std::max(num_layers / 16, size_t(1));
|
||||
|
||||
|
@ -1617,16 +1600,16 @@ void PrintObject::discover_vertical_shells()
|
|||
const std::initializer_list<SurfaceType> surfaces_bottom { stBottom, stBottomBridge };
|
||||
for (size_t idx_layer = range.begin(); idx_layer < range.end(); ++ idx_layer) {
|
||||
m_print->throw_if_canceled();
|
||||
Layer &layer = *m_layers[idx_layer];
|
||||
LayerRegion &layerm = *layer.m_regions[region_id];
|
||||
float min_perimeter_infill_spacing = float(layerm.flow(frSolidInfill).scaled_spacing()) * 1.05f;
|
||||
Layer &layer = *m_layers[idx_layer];
|
||||
LayerRegion &layerm = *layer.m_regions[region_id];
|
||||
float top_bottom_expansion = float(layerm.flow(frSolidInfill).scaled_spacing()) * top_bottom_expansion_coeff;
|
||||
// Top surfaces.
|
||||
auto &cache = cache_top_botom_regions[idx_layer];
|
||||
cache.top_surfaces = offset(layerm.slices.filter_by_type(stTop), min_perimeter_infill_spacing);
|
||||
append(cache.top_surfaces, offset(layerm.fill_surfaces.filter_by_type(stTop), min_perimeter_infill_spacing));
|
||||
cache.top_surfaces = offset(layerm.slices.filter_by_type(stTop), top_bottom_expansion);
|
||||
// append(cache.top_surfaces, offset(layerm.fill_surfaces.filter_by_type(stTop), top_bottom_expansion));
|
||||
// Bottom surfaces.
|
||||
cache.bottom_surfaces = offset(layerm.slices.filter_by_types(surfaces_bottom), min_perimeter_infill_spacing);
|
||||
append(cache.bottom_surfaces, offset(layerm.fill_surfaces.filter_by_types(surfaces_bottom), min_perimeter_infill_spacing));
|
||||
cache.bottom_surfaces = offset(layerm.slices.filter_by_types(surfaces_bottom), top_bottom_expansion);
|
||||
// append(cache.bottom_surfaces, offset(layerm.fill_surfaces.filter_by_types(surfaces_bottom), top_bottom_expansion));
|
||||
// Holes over all regions. Only collect them once, they are valid for all region_id iterations.
|
||||
if (cache.holes.empty()) {
|
||||
for (size_t region_id = 0; region_id < layer.regions().size(); ++ region_id)
|
||||
|
@ -1639,13 +1622,13 @@ void PrintObject::discover_vertical_shells()
|
|||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << "Discovering vertical shells for region " << region_id << " in parallel - start : ensure vertical wall thickness";
|
||||
grain_size = 1;
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<size_t>(0, num_layers, grain_size),
|
||||
[this, region_id, &cache_top_botom_regions]
|
||||
(const tbb::blocked_range<size_t>& range) {
|
||||
// printf("discover_vertical_shells from %d to %d\n", range.begin(), range.end());
|
||||
for (size_t idx_layer = range.begin(); idx_layer < range.end(); ++ idx_layer) {
|
||||
PROFILE_BLOCK(discover_vertical_shells_region_layer);
|
||||
m_print->throw_if_canceled();
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
static size_t debug_idx = 0;
|
||||
|
@ -1657,12 +1640,12 @@ void PrintObject::discover_vertical_shells()
|
|||
const PrintRegionConfig ®ion_config = layerm->region().config();
|
||||
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
layerm->export_region_slices_to_svg_debug("4_discover_vertical_shells-initial");
|
||||
layerm->export_region_fill_surfaces_to_svg_debug("4_discover_vertical_shells-initial");
|
||||
layerm->export_region_slices_to_svg_debug("3_discover_vertical_shells-initial");
|
||||
layerm->export_region_fill_surfaces_to_svg_debug("3_discover_vertical_shells-initial");
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
|
||||
Flow solid_infill_flow = layerm->flow(frSolidInfill);
|
||||
coord_t infill_line_spacing = solid_infill_flow.scaled_spacing();
|
||||
coord_t infill_line_spacing = solid_infill_flow.scaled_spacing();
|
||||
// Find a union of perimeters below / above this surface to guarantee a minimum shell thickness.
|
||||
Polygons shell;
|
||||
Polygons holes;
|
||||
|
@ -1670,88 +1653,117 @@ void PrintObject::discover_vertical_shells()
|
|||
ExPolygons shell_ex;
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
float min_perimeter_infill_spacing = float(infill_line_spacing) * 1.05f;
|
||||
{
|
||||
PROFILE_BLOCK(discover_vertical_shells_region_layer_collect);
|
||||
#if 0
|
||||
// #ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
{
|
||||
Slic3r::SVG svg_cummulative(debug_out_path("discover_vertical_shells-perimeters-before-union-run%d.svg", debug_idx), this->bounding_box());
|
||||
for (int n = (int)idx_layer - n_extra_bottom_layers; n <= (int)idx_layer + n_extra_top_layers; ++ n) {
|
||||
if (n < 0 || n >= (int)m_layers.size())
|
||||
continue;
|
||||
ExPolygons &expolys = m_layers[n]->perimeter_expolygons;
|
||||
for (size_t i = 0; i < expolys.size(); ++ i) {
|
||||
Slic3r::SVG svg(debug_out_path("discover_vertical_shells-perimeters-before-union-run%d-layer%d-expoly%d.svg", debug_idx, n, i), get_extents(expolys[i]));
|
||||
svg.draw(expolys[i]);
|
||||
svg.draw_outline(expolys[i].contour, "black", scale_(0.05));
|
||||
svg.draw_outline(expolys[i].holes, "blue", scale_(0.05));
|
||||
svg.Close();
|
||||
{
|
||||
Slic3r::SVG svg_cummulative(debug_out_path("discover_vertical_shells-perimeters-before-union-run%d.svg", debug_idx), this->bounding_box());
|
||||
for (int n = (int)idx_layer - n_extra_bottom_layers; n <= (int)idx_layer + n_extra_top_layers; ++ n) {
|
||||
if (n < 0 || n >= (int)m_layers.size())
|
||||
continue;
|
||||
ExPolygons &expolys = m_layers[n]->perimeter_expolygons;
|
||||
for (size_t i = 0; i < expolys.size(); ++ i) {
|
||||
Slic3r::SVG svg(debug_out_path("discover_vertical_shells-perimeters-before-union-run%d-layer%d-expoly%d.svg", debug_idx, n, i), get_extents(expolys[i]));
|
||||
svg.draw(expolys[i]);
|
||||
svg.draw_outline(expolys[i].contour, "black", scale_(0.05));
|
||||
svg.draw_outline(expolys[i].holes, "blue", scale_(0.05));
|
||||
svg.Close();
|
||||
|
||||
svg_cummulative.draw(expolys[i]);
|
||||
svg_cummulative.draw_outline(expolys[i].contour, "black", scale_(0.05));
|
||||
svg_cummulative.draw_outline(expolys[i].holes, "blue", scale_(0.05));
|
||||
}
|
||||
svg_cummulative.draw(expolys[i]);
|
||||
svg_cummulative.draw_outline(expolys[i].contour, "black", scale_(0.05));
|
||||
svg_cummulative.draw_outline(expolys[i].holes, "blue", scale_(0.05));
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
polygons_append(holes, cache_top_botom_regions[idx_layer].holes);
|
||||
if (int n_top_layers = region_config.top_shell_layers.value; n_top_layers > 0) {
|
||||
// Gather top regions projected to this layer.
|
||||
coordf_t print_z = layer->print_z;
|
||||
for (int i = int(idx_layer) + 1;
|
||||
i < int(cache_top_botom_regions.size()) &&
|
||||
(i < int(idx_layer) + n_top_layers ||
|
||||
m_layers[i]->print_z - print_z < region_config.top_shell_thickness - EPSILON);
|
||||
++ i) {
|
||||
const DiscoverVerticalShellsCacheEntry &cache = cache_top_botom_regions[i];
|
||||
if (! holes.empty())
|
||||
holes = intersection(holes, cache.holes);
|
||||
if (! cache.top_surfaces.empty()) {
|
||||
polygons_append(shell, cache.top_surfaces);
|
||||
// Running the union_ using the Clipper library piece by piece is cheaper
|
||||
// than running the union_ all at once.
|
||||
shell = union_(shell);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (int n_bottom_layers = region_config.bottom_shell_layers.value; n_bottom_layers > 0) {
|
||||
// Gather bottom regions projected to this layer.
|
||||
coordf_t bottom_z = layer->bottom_z();
|
||||
for (int i = int(idx_layer) - 1;
|
||||
i >= 0 &&
|
||||
(i > int(idx_layer) - n_bottom_layers ||
|
||||
bottom_z - m_layers[i]->bottom_z() < region_config.bottom_shell_thickness - EPSILON);
|
||||
-- i) {
|
||||
const DiscoverVerticalShellsCacheEntry &cache = cache_top_botom_regions[i];
|
||||
if (! holes.empty())
|
||||
holes = intersection(holes, cache.holes);
|
||||
if (! cache.bottom_surfaces.empty()) {
|
||||
polygons_append(shell, cache.bottom_surfaces);
|
||||
// Running the union_ using the Clipper library piece by piece is cheaper
|
||||
// than running the union_ all at once.
|
||||
shell = union_(shell);
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
{
|
||||
Slic3r::SVG svg(debug_out_path("discover_vertical_shells-perimeters-before-union-%d.svg", debug_idx), get_extents(shell));
|
||||
svg.draw(shell);
|
||||
svg.draw_outline(shell, "black", scale_(0.05));
|
||||
svg.Close();
|
||||
polygons_append(holes, cache_top_botom_regions[idx_layer].holes);
|
||||
auto combine_holes = [&holes](const Polygons &holes2) {
|
||||
if (holes.empty() || holes2.empty())
|
||||
holes.clear();
|
||||
else
|
||||
holes = intersection(holes, holes2);
|
||||
};
|
||||
auto combine_shells = [&shell](const Polygons &shells2) {
|
||||
if (shell.empty())
|
||||
shell = std::move(shells2);
|
||||
else if (! shells2.empty()) {
|
||||
polygons_append(shell, shells2);
|
||||
// Running the union_ using the Clipper library piece by piece is cheaper
|
||||
// than running the union_ all at once.
|
||||
shell = union_(shell);
|
||||
}
|
||||
};
|
||||
static constexpr const bool one_more_layer_below_top_bottom_surfaces = false;
|
||||
if (int n_top_layers = region_config.top_shell_layers.value; n_top_layers > 0) {
|
||||
// Gather top regions projected to this layer.
|
||||
coordf_t print_z = layer->print_z;
|
||||
int i = int(idx_layer) + 1;
|
||||
int itop = int(idx_layer) + n_top_layers;
|
||||
bool at_least_one_top_projected = false;
|
||||
for (; i < int(cache_top_botom_regions.size()) &&
|
||||
(i < itop || m_layers[i]->print_z - print_z < region_config.top_shell_thickness - EPSILON);
|
||||
++ i) {
|
||||
at_least_one_top_projected = true;
|
||||
const DiscoverVerticalShellsCacheEntry &cache = cache_top_botom_regions[i];
|
||||
combine_holes(cache.holes);
|
||||
combine_shells(cache.top_surfaces);
|
||||
}
|
||||
if (!at_least_one_top_projected && i < int(cache_top_botom_regions.size())) {
|
||||
// Lets consider this a special case - with only 1 top solid and minimal shell thickness settings, the
|
||||
// boundaries of solid layers are not anchored over/under perimeters, so lets fix it by adding at least one
|
||||
// perimeter width of area
|
||||
Polygons anchor_area = intersection(expand(cache_top_botom_regions[idx_layer].top_surfaces,
|
||||
layerm->flow(frExternalPerimeter).scaled_spacing()),
|
||||
to_polygons(m_layers[i]->lslices));
|
||||
combine_shells(anchor_area);
|
||||
}
|
||||
|
||||
if (one_more_layer_below_top_bottom_surfaces)
|
||||
if (i < int(cache_top_botom_regions.size()) &&
|
||||
(i <= itop || m_layers[i]->bottom_z() - print_z < region_config.top_shell_thickness - EPSILON))
|
||||
combine_holes(cache_top_botom_regions[i].holes);
|
||||
}
|
||||
if (int n_bottom_layers = region_config.bottom_shell_layers.value; n_bottom_layers > 0) {
|
||||
// Gather bottom regions projected to this layer.
|
||||
coordf_t bottom_z = layer->bottom_z();
|
||||
int i = int(idx_layer) - 1;
|
||||
int ibottom = int(idx_layer) - n_bottom_layers;
|
||||
bool at_least_one_bottom_projected = false;
|
||||
for (; i >= 0 &&
|
||||
(i > ibottom || bottom_z - m_layers[i]->bottom_z() < region_config.bottom_shell_thickness - EPSILON);
|
||||
-- i) {
|
||||
at_least_one_bottom_projected = true;
|
||||
const DiscoverVerticalShellsCacheEntry &cache = cache_top_botom_regions[i];
|
||||
combine_holes(cache.holes);
|
||||
combine_shells(cache.bottom_surfaces);
|
||||
}
|
||||
|
||||
if (!at_least_one_bottom_projected && i >= 0) {
|
||||
Polygons anchor_area = intersection(expand(cache_top_botom_regions[idx_layer].bottom_surfaces,
|
||||
layerm->flow(frExternalPerimeter).scaled_spacing()),
|
||||
to_polygons(m_layers[i]->lslices));
|
||||
combine_shells(anchor_area);
|
||||
}
|
||||
|
||||
if (one_more_layer_below_top_bottom_surfaces)
|
||||
if (i >= 0 &&
|
||||
(i > ibottom || bottom_z - m_layers[i]->print_z < region_config.bottom_shell_thickness - EPSILON))
|
||||
combine_holes(cache_top_botom_regions[i].holes);
|
||||
}
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
{
|
||||
Slic3r::SVG svg(debug_out_path("discover_vertical_shells-perimeters-before-union-%d.svg", debug_idx), get_extents(shell));
|
||||
svg.draw(shell);
|
||||
svg.draw_outline(shell, "black", scale_(0.05));
|
||||
svg.Close();
|
||||
}
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
#if 0
|
||||
{
|
||||
PROFILE_BLOCK(discover_vertical_shells_region_layer_shell_);
|
||||
// shell = union_(shell, true);
|
||||
shell = union_(shell, false);
|
||||
}
|
||||
// shell = union_(shell, true);
|
||||
shell = union_(shell, false);
|
||||
#endif
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
shell_ex = union_safety_offset_ex(shell);
|
||||
shell_ex = union_safety_offset_ex(shell);
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
}
|
||||
|
||||
//if (shell.empty())
|
||||
// continue;
|
||||
|
@ -1808,31 +1820,59 @@ void PrintObject::discover_vertical_shells()
|
|||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
Polygons shell_before = shell;
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
#if 1
|
||||
// Intentionally inflate a bit more than how much the region has been shrunk,
|
||||
// so there will be some overlap between this solid infill and the other infill regions (mainly the sparse infill).
|
||||
shell = opening(union_(shell), 0.5f * min_perimeter_infill_spacing, 0.8f * min_perimeter_infill_spacing, ClipperLib::jtSquare);
|
||||
if (shell.empty())
|
||||
continue;
|
||||
#else
|
||||
// Ensure each region is at least 3x infill line width wide, so it could be filled in.
|
||||
// float margin = float(infill_line_spacing) * 3.f;
|
||||
float margin = float(infill_line_spacing) * 1.5f;
|
||||
// we use a higher miterLimit here to handle areas with acute angles
|
||||
// in those cases, the default miterLimit would cut the corner and we'd
|
||||
// get a triangle in $too_narrow; if we grow it below then the shell
|
||||
// would have a different shape from the external surface and we'd still
|
||||
// have the same angle, so the next shell would be grown even more and so on.
|
||||
Polygons too_narrow = diff(shell, opening(shell, margin, ClipperLib::jtMiter, 5.), true);
|
||||
if (! too_narrow.empty()) {
|
||||
// grow the collapsing parts and add the extra area to the neighbor layer
|
||||
// as well as to our original surfaces so that we support this
|
||||
// additional area in the next shell too
|
||||
// make sure our grown surfaces don't exceed the fill area
|
||||
polygons_append(shell, intersection(offset(too_narrow, margin), polygonsInternal));
|
||||
ExPolygons regularized_shell;
|
||||
{
|
||||
// Open to remove (filter out) regions narrower than a bit less than an infill extrusion line width.
|
||||
// Such narrow regions are difficult to fill in with a gap fill algorithm (or Arachne), however they are most likely
|
||||
// not needed for print stability / quality.
|
||||
const float narrow_ensure_vertical_wall_thickness_region_radius = 0.5f * 0.65f * min_perimeter_infill_spacing;
|
||||
// Then close gaps narrower than 1.2 * line width, such gaps are difficult to fill in with sparse infill,
|
||||
// thus they will be merged into the solid infill.
|
||||
const float narrow_sparse_infill_region_radius = 0.5f * 1.2f * min_perimeter_infill_spacing;
|
||||
// Finally expand the infill a bit to remove tiny gaps between solid infill and the other regions.
|
||||
const float tiny_overlap_radius = 0.2f * min_perimeter_infill_spacing;
|
||||
regularized_shell = shrink_ex(offset2_ex(union_ex(shell),
|
||||
// Open to remove (filter out) regions narrower than an infill extrusion line width.
|
||||
-narrow_ensure_vertical_wall_thickness_region_radius,
|
||||
// Then close gaps narrower than 1.2 * line width, such gaps are difficult to fill in with sparse infill.
|
||||
narrow_ensure_vertical_wall_thickness_region_radius + narrow_sparse_infill_region_radius, ClipperLib::jtSquare),
|
||||
// Finally expand the infill a bit to remove tiny gaps between solid infill and the other regions.
|
||||
narrow_sparse_infill_region_radius - tiny_overlap_radius, ClipperLib::jtSquare);
|
||||
|
||||
Polygons object_volume;
|
||||
Polygons internal_volume;
|
||||
{
|
||||
Polygons shrinked_bottom_slice = idx_layer > 0 ? to_polygons(m_layers[idx_layer - 1]->lslices) : Polygons{};
|
||||
Polygons shrinked_upper_slice = (idx_layer + 1) < m_layers.size() ?
|
||||
to_polygons(m_layers[idx_layer + 1]->lslices) :
|
||||
Polygons{};
|
||||
object_volume = intersection(shrinked_bottom_slice, shrinked_upper_slice);
|
||||
internal_volume = closing(polygonsInternal, SCALED_EPSILON);
|
||||
}
|
||||
|
||||
// The regularization operation may cause scattered tiny drops on the smooth parts of the model, filter them out
|
||||
// If the region checks both following conditions, it is removed:
|
||||
// 1. the area is very small,
|
||||
// OR the area is quite small and it is fully wrapped in model (not visible)
|
||||
// the in-model condition is there due to small sloping surfaces, e.g. top of the hull of the benchy
|
||||
// 2. the area does not fully cover an internal polygon
|
||||
// This is there mainly for a very thin parts, where the solid layers would be missing if the part area is quite small
|
||||
regularized_shell.erase(std::remove_if(regularized_shell.begin(), regularized_shell.end(),
|
||||
[&internal_volume, &min_perimeter_infill_spacing,
|
||||
&object_volume](const ExPolygon &p) {
|
||||
return (p.area() < min_perimeter_infill_spacing * scaled(1.5) ||
|
||||
(p.area() < min_perimeter_infill_spacing * scaled(8.0) &&
|
||||
diff(to_polygons(p), object_volume).empty())) &&
|
||||
diff(internal_volume,
|
||||
expand(to_polygons(p), min_perimeter_infill_spacing))
|
||||
.size() >= internal_volume.size();
|
||||
}),
|
||||
regularized_shell.end());
|
||||
}
|
||||
#endif
|
||||
ExPolygons new_internal_solid = intersection_ex(polygonsInternal, shell);
|
||||
if (regularized_shell.empty())
|
||||
continue;
|
||||
|
||||
ExPolygons new_internal_solid = intersection_ex(polygonsInternal, regularized_shell);
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
{
|
||||
Slic3r::SVG svg(debug_out_path("discover_vertical_shells-regularized-%d.svg", debug_idx), get_extents(shell_before));
|
||||
|
@ -1847,8 +1887,8 @@ void PrintObject::discover_vertical_shells()
|
|||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
|
||||
// Trim the internal & internalvoid by the shell.
|
||||
Slic3r::ExPolygons new_internal = diff_ex(layerm->fill_surfaces.filter_by_type(stInternal), shell);
|
||||
Slic3r::ExPolygons new_internal_void = diff_ex(layerm->fill_surfaces.filter_by_type(stInternalVoid), shell);
|
||||
Slic3r::ExPolygons new_internal = diff_ex(layerm->fill_surfaces.filter_by_type(stInternal), regularized_shell);
|
||||
Slic3r::ExPolygons new_internal_void = diff_ex(layerm->fill_surfaces.filter_by_type(stInternalVoid), regularized_shell);
|
||||
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
{
|
||||
|
@ -1871,16 +1911,12 @@ void PrintObject::discover_vertical_shells()
|
|||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
for (size_t idx_layer = 0; idx_layer < m_layers.size(); ++idx_layer) {
|
||||
LayerRegion *layerm = m_layers[idx_layer]->get_region(region_id);
|
||||
layerm->export_region_slices_to_svg_debug("4_discover_vertical_shells-final");
|
||||
layerm->export_region_fill_surfaces_to_svg_debug("4_discover_vertical_shells-final");
|
||||
layerm->export_region_slices_to_svg_debug("3_discover_vertical_shells-final");
|
||||
layerm->export_region_fill_surfaces_to_svg_debug("3_discover_vertical_shells-final");
|
||||
}
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
} // for each region
|
||||
|
||||
// Write the profiler measurements to file
|
||||
// PROFILE_UPDATE();
|
||||
// PROFILE_OUTPUT(debug_out_path("discover_vertical_shells-profile.txt").c_str());
|
||||
}
|
||||
} // void PrintObject::discover_vertical_shells()
|
||||
|
||||
// #define DEBUG_BRIDGE_OVER_INFILL
|
||||
#ifdef DEBUG_BRIDGE_OVER_INFILL
|
||||
|
@ -3071,179 +3107,20 @@ void PrintObject::discover_horizontal_shells()
|
|||
surface.surface_type = type;
|
||||
}
|
||||
#endif
|
||||
|
||||
// If ensure_vertical_shell_thickness, then the rest has already been performed by discover_vertical_shells().
|
||||
if (region_config.ensure_vertical_shell_thickness.value)
|
||||
continue;
|
||||
|
||||
coordf_t print_z = layer->print_z;
|
||||
coordf_t bottom_z = layer->bottom_z();
|
||||
for (size_t idx_surface_type = 0; idx_surface_type < 3; ++ idx_surface_type) {
|
||||
m_print->throw_if_canceled();
|
||||
SurfaceType type = (idx_surface_type == 0) ? stTop : (idx_surface_type == 1) ? stBottom : stBottomBridge;
|
||||
int num_solid_layers = (type == stTop) ? region_config.top_shell_layers.value : region_config.bottom_shell_layers.value;
|
||||
if (num_solid_layers == 0)
|
||||
continue;
|
||||
// Find slices of current type for current layer.
|
||||
// Use slices instead of fill_surfaces, because they also include the perimeter area,
|
||||
// which needs to be propagated in shells; we need to grow slices like we did for
|
||||
// fill_surfaces though. Using both ungrown slices and grown fill_surfaces will
|
||||
// not work in some situations, as there won't be any grown region in the perimeter
|
||||
// area (this was seen in a model where the top layer had one extra perimeter, thus
|
||||
// its fill_surfaces were thinner than the lower layer's infill), however it's the best
|
||||
// solution so far. Growing the external slices by EXTERNAL_INFILL_MARGIN will put
|
||||
// too much solid infill inside nearly-vertical slopes.
|
||||
|
||||
// Surfaces including the area of perimeters. Everything, that is visible from the top / bottom
|
||||
// (not covered by a layer above / below).
|
||||
// This does not contain the areas covered by perimeters!
|
||||
Polygons solid;
|
||||
for (const Surface &surface : layerm->slices.surfaces)
|
||||
if (surface.surface_type == type)
|
||||
polygons_append(solid, to_polygons(surface.expolygon));
|
||||
// Infill areas (slices without the perimeters).
|
||||
for (const Surface &surface : layerm->fill_surfaces.surfaces)
|
||||
if (surface.surface_type == type)
|
||||
polygons_append(solid, to_polygons(surface.expolygon));
|
||||
if (solid.empty())
|
||||
continue;
|
||||
// Slic3r::debugf "Layer %d has %s surfaces\n", $i, ($type == stTop) ? 'top' : 'bottom';
|
||||
|
||||
// Scatter top / bottom regions to other layers. Scattering process is inherently serial, it is difficult to parallelize without locking.
|
||||
for (int n = (type == stTop) ? int(i) - 1 : int(i) + 1;
|
||||
(type == stTop) ?
|
||||
(n >= 0 && (int(i) - n < num_solid_layers ||
|
||||
print_z - m_layers[n]->print_z < region_config.top_shell_thickness.value - EPSILON)) :
|
||||
(n < int(m_layers.size()) && (n - int(i) < num_solid_layers ||
|
||||
m_layers[n]->bottom_z() - bottom_z < region_config.bottom_shell_thickness.value - EPSILON));
|
||||
(type == stTop) ? -- n : ++ n)
|
||||
{
|
||||
// Slic3r::debugf " looking for neighbors on layer %d...\n", $n;
|
||||
// Reference to the lower layer of a TOP surface, or an upper layer of a BOTTOM surface.
|
||||
LayerRegion *neighbor_layerm = m_layers[n]->regions()[region_id];
|
||||
|
||||
// find intersection between neighbor and current layer's surfaces
|
||||
// intersections have contours and holes
|
||||
// we update $solid so that we limit the next neighbor layer to the areas that were
|
||||
// found on this one - in other words, solid shells on one layer (for a given external surface)
|
||||
// are always a subset of the shells found on the previous shell layer
|
||||
// this approach allows for DWIM in hollow sloping vases, where we want bottom
|
||||
// shells to be generated in the base but not in the walls (where there are many
|
||||
// narrow bottom surfaces): reassigning $solid will consider the 'shadow' of the
|
||||
// upper perimeter as an obstacle and shell will not be propagated to more upper layers
|
||||
//FIXME How does it work for stInternalBRIDGE? This is set for sparse infill. Likely this does not work.
|
||||
Polygons new_internal_solid;
|
||||
{
|
||||
Polygons internal;
|
||||
for (const Surface &surface : neighbor_layerm->fill_surfaces.surfaces)
|
||||
if (surface.surface_type == stInternal || surface.surface_type == stInternalSolid)
|
||||
polygons_append(internal, to_polygons(surface.expolygon));
|
||||
new_internal_solid = intersection(solid, internal, ApplySafetyOffset::Yes);
|
||||
}
|
||||
if (new_internal_solid.empty()) {
|
||||
// No internal solid needed on this layer. In order to decide whether to continue
|
||||
// searching on the next neighbor (thus enforcing the configured number of solid
|
||||
// layers, use different strategies according to configured infill density:
|
||||
if (region_config.sparse_infill_density.value == 0) {
|
||||
// If user expects the object to be void (for example a hollow sloping vase),
|
||||
// don't continue the search. In this case, we only generate the external solid
|
||||
// shell if the object would otherwise show a hole (gap between perimeters of
|
||||
// the two layers), and internal solid shells are a subset of the shells found
|
||||
// on each previous layer.
|
||||
goto EXTERNAL;
|
||||
} else {
|
||||
// If we have internal infill, we can generate internal solid shells freely.
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (region_config.sparse_infill_density.value == 0) {
|
||||
// if we're printing a hollow object we discard any solid shell thinner
|
||||
// than a perimeter width, since it's probably just crossing a sloping wall
|
||||
// and it's not wanted in a hollow print even if it would make sense when
|
||||
// obeying the solid shell count option strictly (DWIM!)
|
||||
float margin = float(neighbor_layerm->flow(frExternalPerimeter).scaled_width());
|
||||
Polygons too_narrow = diff(
|
||||
new_internal_solid,
|
||||
opening(new_internal_solid, margin, margin + ClipperSafetyOffset, jtMiter, 5));
|
||||
// Trim the regularized region by the original region.
|
||||
if (! too_narrow.empty())
|
||||
new_internal_solid = solid = diff(new_internal_solid, too_narrow);
|
||||
}
|
||||
|
||||
// make sure the new internal solid is wide enough, as it might get collapsed
|
||||
// when spacing is added in Fill.pm
|
||||
{
|
||||
//FIXME Vojtech: Disable this and you will be sorry.
|
||||
float margin = 3.f * layerm->flow(frSolidInfill).scaled_width(); // require at least this size
|
||||
// we use a higher miterLimit here to handle areas with acute angles
|
||||
// in those cases, the default miterLimit would cut the corner and we'd
|
||||
// get a triangle in $too_narrow; if we grow it below then the shell
|
||||
// would have a different shape from the external surface and we'd still
|
||||
// have the same angle, so the next shell would be grown even more and so on.
|
||||
Polygons too_narrow = diff(
|
||||
new_internal_solid,
|
||||
opening(new_internal_solid, margin, margin + ClipperSafetyOffset, ClipperLib::jtMiter, 5));
|
||||
if (! too_narrow.empty()) {
|
||||
// grow the collapsing parts and add the extra area to the neighbor layer
|
||||
// as well as to our original surfaces so that we support this
|
||||
// additional area in the next shell too
|
||||
// make sure our grown surfaces don't exceed the fill area
|
||||
Polygons internal;
|
||||
for (const Surface &surface : neighbor_layerm->fill_surfaces.surfaces)
|
||||
if (surface.is_internal() && !surface.is_bridge())
|
||||
polygons_append(internal, to_polygons(surface.expolygon));
|
||||
polygons_append(new_internal_solid,
|
||||
intersection(
|
||||
expand(too_narrow, +margin),
|
||||
// Discard bridges as they are grown for anchoring and we can't
|
||||
// remove such anchors. (This may happen when a bridge is being
|
||||
// anchored onto a wall where little space remains after the bridge
|
||||
// is grown, and that little space is an internal solid shell so
|
||||
// it triggers this too_narrow logic.)
|
||||
internal));
|
||||
// solid = new_internal_solid;
|
||||
}
|
||||
}
|
||||
|
||||
// internal-solid are the union of the existing internal-solid surfaces
|
||||
// and new ones
|
||||
SurfaceCollection backup = std::move(neighbor_layerm->fill_surfaces);
|
||||
polygons_append(new_internal_solid, to_polygons(backup.filter_by_type(stInternalSolid)));
|
||||
ExPolygons internal_solid = union_ex(new_internal_solid);
|
||||
// assign new internal-solid surfaces to layer
|
||||
neighbor_layerm->fill_surfaces.set(internal_solid, stInternalSolid);
|
||||
// subtract intersections from layer surfaces to get resulting internal surfaces
|
||||
Polygons polygons_internal = to_polygons(std::move(internal_solid));
|
||||
ExPolygons internal = diff_ex(backup.filter_by_type(stInternal), polygons_internal, ApplySafetyOffset::Yes);
|
||||
// assign resulting internal surfaces to layer
|
||||
neighbor_layerm->fill_surfaces.append(internal, stInternal);
|
||||
polygons_append(polygons_internal, to_polygons(std::move(internal)));
|
||||
// assign top and bottom surfaces to layer
|
||||
backup.keep_types({ stTop, stBottom, stBottomBridge });
|
||||
std::vector<SurfacesPtr> top_bottom_groups;
|
||||
backup.group(&top_bottom_groups);
|
||||
for (SurfacesPtr &group : top_bottom_groups)
|
||||
neighbor_layerm->fill_surfaces.append(
|
||||
diff_ex(group, polygons_internal),
|
||||
// Use an existing surface as a template, it carries the bridge angle etc.
|
||||
*group.front());
|
||||
}
|
||||
EXTERNAL:;
|
||||
} // foreach type (stTop, stBottom, stBottomBridge)
|
||||
// The rest has already been performed by discover_vertical_shells().
|
||||
} // for each layer
|
||||
} // for each region
|
||||
} // for each region
|
||||
|
||||
#ifdef SLIC3R_DEBUG_SLICE_PROCESSING
|
||||
for (size_t region_id = 0; region_id < this->num_printing_regions(); ++ region_id) {
|
||||
for (size_t region_id = 0; region_id < this->num_printing_regions(); ++region_id) {
|
||||
for (const Layer *layer : m_layers) {
|
||||
const LayerRegion *layerm = layer->m_regions[region_id];
|
||||
layerm->export_region_slices_to_svg_debug("5_discover_horizontal_shells");
|
||||
layerm->export_region_fill_surfaces_to_svg_debug("5_discover_horizontal_shells");
|
||||
} // for each layer
|
||||
} // for each region
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
}
|
||||
} // for each region
|
||||
#endif /* SLIC3R_DEBUG_SLICE_PROCESSING */
|
||||
} // void PrintObject::discover_horizontal_shells()
|
||||
|
||||
// combine fill surfaces across layers to honor the "infill every N layers" option
|
||||
// Idempotence of this method is guaranteed by the fact that we don't remove things from
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
///|/ Copyright (c) Prusa Research 2016 - 2023 Oleksandra Iushchenko @YuSanka, Vojtěch Bubník @bubnikv, Filip Sykala @Jony01, David Kocík @kocikdav, Enrico Turri @enricoturri1966, Tomáš Mészáros @tamasmeszaros, Lukáš Matěna @lukasmatena, Vojtěch Král @vojtechkral
|
||||
///|/ Copyright (c) 2019 Sijmen Schoon
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
#ifndef slic3r_Utils_hpp_
|
||||
#define slic3r_Utils_hpp_
|
||||
|
||||
|
@ -287,6 +292,16 @@ template <class VectorType> void reserve_power_of_2(VectorType &vector, size_t n
|
|||
vector.reserve(next_highest_power_of_2(n));
|
||||
}
|
||||
|
||||
template<class VectorType> void reserve_more(VectorType &vector, size_t n)
|
||||
{
|
||||
vector.reserve(vector.size() + n);
|
||||
}
|
||||
|
||||
template<class VectorType> void reserve_more_power_of_2(VectorType &vector, size_t n)
|
||||
{
|
||||
vector.reserve(next_highest_power_of_2(vector.size() + n));
|
||||
}
|
||||
|
||||
template<typename INDEX_TYPE>
|
||||
inline INDEX_TYPE prev_idx_modulo(INDEX_TYPE idx, const INDEX_TYPE count)
|
||||
{
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
///|/ Copyright (c) Prusa Research 2019 - 2023 Lukáš Hejl @hejllukas, Vojtěch Bubník @bubnikv, Lukáš Matěna @lukasmatena, Oleksandra Iushchenko @YuSanka, Pavel Mikuš @Godrak, Tomáš Mészáros @tamasmeszaros
|
||||
///|/
|
||||
///|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
|
||||
///|/
|
||||
// #include "libslic3r/GCodeSender.hpp"
|
||||
#include "ConfigManipulation.hpp"
|
||||
#include "I18N.hpp"
|
||||
|
@ -273,7 +277,6 @@ void ConfigManipulation::update_print_fff_config(DynamicPrintConfig* config, con
|
|||
sparse_infill_density == 0 &&
|
||||
! config->opt_bool("enable_support") &&
|
||||
config->opt_int("enforce_support_layers") == 0 &&
|
||||
config->opt_bool("ensure_vertical_shell_thickness") &&
|
||||
! config->opt_bool("detect_thin_wall") &&
|
||||
config->opt_enum<TimelapseType>("timelapse_type") == TimelapseType::tlTraditional))
|
||||
{
|
||||
|
@ -300,7 +303,6 @@ void ConfigManipulation::update_print_fff_config(DynamicPrintConfig* config, con
|
|||
new_conf.set_key_value("sparse_infill_density", new ConfigOptionPercent(0));
|
||||
new_conf.set_key_value("enable_support", new ConfigOptionBool(false));
|
||||
new_conf.set_key_value("enforce_support_layers", new ConfigOptionInt(0));
|
||||
new_conf.set_key_value("ensure_vertical_shell_thickness", new ConfigOptionBool(true));
|
||||
new_conf.set_key_value("detect_thin_wall", new ConfigOptionBool(false));
|
||||
new_conf.set_key_value("timelapse_type", new ConfigOptionEnum<TimelapseType>(tlTraditional));
|
||||
sparse_infill_density = 0;
|
||||
|
@ -520,7 +522,7 @@ void ConfigManipulation::toggle_print_fff_options(DynamicPrintConfig *config, co
|
|||
}
|
||||
|
||||
bool have_perimeters = config->opt_int("wall_loops") > 0;
|
||||
for (auto el : { "extra_perimeters_on_overhangs", "ensure_vertical_shell_thickness", "detect_thin_wall", "detect_overhang_wall",
|
||||
for (auto el : { "extra_perimeters_on_overhangs", "detect_thin_wall", "detect_overhang_wall",
|
||||
"seam_position", "staggered_inner_seams", "wall_infill_order", "outer_wall_line_width",
|
||||
"inner_wall_speed", "outer_wall_speed", "small_perimeter_speed", "small_perimeter_threshold" })
|
||||
toggle_field(el, have_perimeters);
|
||||
|
|
|
@ -1939,8 +1939,6 @@ void TabPrint::build()
|
|||
optgroup->append_single_option_line("bridge_angle");
|
||||
optgroup->append_single_option_line("minimum_sparse_infill_area");
|
||||
optgroup->append_single_option_line("infill_combination");
|
||||
optgroup->append_single_option_line("detect_narrow_internal_solid_infill");
|
||||
optgroup->append_single_option_line("ensure_vertical_shell_thickness");
|
||||
|
||||
page = add_options_page(L("Speed"), "empty");
|
||||
optgroup = page->new_optgroup(L("Initial layer speed"), L"param_speed_first", 15);
|
||||
|
|
Loading…
Reference in a new issue