App: move from float to double for accuracy parameter, add a virtual method to determine a default accuracy
This commit is contained in:
@@ -235,7 +235,7 @@ void PointKernel::save(std::ostream& out) const
|
||||
|
||||
void PointKernel::getPoints(std::vector<Base::Vector3d> &Points,
|
||||
std::vector<Base::Vector3d> &/*Normals*/,
|
||||
float /*Accuracy*/, uint16_t /*flags*/) const
|
||||
double /*Accuracy*/, uint16_t /*flags*/) const
|
||||
{
|
||||
unsigned long ctpoints = _Points.size();
|
||||
Points.reserve(ctpoints);
|
||||
|
||||
@@ -92,7 +92,7 @@ public:
|
||||
|
||||
void getPoints(std::vector<Base::Vector3d> &Points,
|
||||
std::vector<Base::Vector3d> &Normals,
|
||||
float Accuracy, uint16_t flags=0) const override;
|
||||
double Accuracy, uint16_t flags=0) const override;
|
||||
void transformGeometry(const Base::Matrix4D &rclMat) override;
|
||||
Base::BoundBox3d getBoundBox()const override;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user