Merge branch 'master' into DraftModifiersAppPart

This commit is contained in:
Yorik van Havre
2020-01-15 10:07:28 +01:00
committed by GitHub
31 changed files with 4302 additions and 2350 deletions

View File

@@ -94,16 +94,16 @@
</widget>
</item>
<item row="0" column="1">
<widget class="Gui::QuantitySpinBox" name="xPos"/>
<widget class="Gui::QuantitySpinBox" name="xPos" native="true"/>
</item>
<item row="2" column="1">
<widget class="Gui::QuantitySpinBox" name="yPos"/>
<widget class="Gui::QuantitySpinBox" name="yPos" native="true"/>
</item>
<item row="3" column="1">
<widget class="Gui::QuantitySpinBox" name="zPos"/>
<widget class="Gui::QuantitySpinBox" name="zPos" native="true"/>
</item>
<item row="4" column="1">
<widget class="Gui::QuantitySpinBox" name="axialPos"/>
<widget class="Gui::QuantitySpinBox" name="axialPos" native="true"/>
</item>
<item row="5" column="1">
<widget class="QPushButton" name="applyAxial">
@@ -179,13 +179,13 @@
</widget>
</item>
<item row="0" column="1">
<widget class="Gui::QuantitySpinBox" name="xCnt"/>
<widget class="Gui::QuantitySpinBox" name="xCnt" native="true"/>
</item>
<item row="1" column="1">
<widget class="Gui::QuantitySpinBox" name="yCnt"/>
<widget class="Gui::QuantitySpinBox" name="yCnt" native="true"/>
</item>
<item row="2" column="1">
<widget class="Gui::QuantitySpinBox" name="zCnt"/>
<widget class="Gui::QuantitySpinBox" name="zCnt" native="true"/>
</item>
<item row="3" column="0" colspan="2">
<widget class="QCheckBox" name="centerOfMass">
@@ -228,7 +228,7 @@
<item row="1" column="0">
<widget class="QStackedWidget" name="stackedWidget">
<property name="currentIndex">
<number>0</number>
<number>1</number>
</property>
<widget class="QWidget" name="page">
<layout class="QGridLayout">
@@ -298,7 +298,7 @@
</widget>
</item>
<item row="1" column="1">
<widget class="Gui::QuantitySpinBox" name="angle"/>
<widget class="Gui::QuantitySpinBox" name="angle" native="true"/>
</item>
</layout>
</item>
@@ -347,7 +347,7 @@
</sizepolicy>
</property>
<property name="text">
<string>Pitch:</string>
<string>Around y-axis:</string>
</property>
</widget>
</item>
@@ -360,7 +360,7 @@
</sizepolicy>
</property>
<property name="text">
<string>Yaw:</string>
<string>Around z-axis:</string>
</property>
</widget>
</item>
@@ -373,26 +373,26 @@
</sizepolicy>
</property>
<property name="text">
<string>Roll:</string>
<string>Around x-axis:</string>
</property>
</widget>
</item>
<item row="0" column="1">
<widget class="Gui::QuantitySpinBox" name="rollAngle">
<widget class="Gui::QuantitySpinBox" name="rollAngle" native="true">
<property name="toolTip">
<string>Rotation around the x-axis</string>
</property>
</widget>
</item>
<item row="1" column="1">
<widget class="Gui::QuantitySpinBox" name="pitchAngle">
<widget class="Gui::QuantitySpinBox" name="pitchAngle" native="true">
<property name="toolTip">
<string>Rotation around the y-axis</string>
</property>
</widget>
</item>
<item row="2" column="1">
<widget class="Gui::QuantitySpinBox" name="yawAngle">
<widget class="Gui::QuantitySpinBox" name="yawAngle" native="true">
<property name="toolTip">
<string>Rotation around the z-axis</string>
</property>
@@ -414,7 +414,7 @@
</item>
<item>
<property name="text">
<string>Euler angles (XY'Z'')</string>
<string>Euler angles (xy'z'')</string>
</property>
</item>
</widget>

View File

@@ -31,6 +31,7 @@
# include <QFile>
# include <QImage>
# include <QImageWriter>
# include <QPainter>
#endif
#if !defined(FC_OS_MACOSX)
@@ -405,7 +406,7 @@ void SoQtOffscreenRenderer::init(const SbViewportRegion & vpr,
else {
this->renderaction = new SoGLRenderAction(vpr);
this->renderaction->setCacheContext(SoGLCacheContextElement::getUniqueCacheContext());
this->renderaction->setTransparencyType(SoGLRenderAction::SORTED_OBJECT_BLEND);
this->renderaction->setTransparencyType(SoGLRenderAction::SORTED_OBJECT_SORTED_TRIANGLE_BLEND);
}
this->didallocation = glrenderaction ? false : true;
@@ -818,9 +819,6 @@ SoQtOffscreenRenderer::writeToImage (QImage& img) const
}
}
}
else if (PRIVATE(this)->backgroundcolor[3] == 1.0) {
img = img.convertToFormat(QImage::Format_RGB32);
}
}
/*!

View File

@@ -1608,6 +1608,7 @@ void View3DInventorViewer::savePicture(int w, int h, int s, const QColor& bg, QI
renderer.setViewportRegion(vp);
renderer.getGLRenderAction()->setSmoothing(true);
renderer.getGLRenderAction()->setNumPasses(s);
renderer.getGLRenderAction()->setTransparencyType(SoGLRenderAction::SORTED_OBJECT_SORTED_TRIANGLE_BLEND);
if (bgColor.isValid())
renderer.setBackgroundColor(SbColor(bgColor.redF(), bgColor.greenF(), bgColor.blueF()));
if (!renderer.render(root))
@@ -1616,6 +1617,15 @@ void View3DInventorViewer::savePicture(int w, int h, int s, const QColor& bg, QI
renderer.writeToImage(img);
root->unref();
}
if (!bgColor.isValid() || bgColor.alphaF() == 1.0) {
QImage image(img.width(), img.height(), QImage::Format_RGB32);
QPainter painter(&image);
painter.fillRect(image.rect(), Qt::black);
painter.drawImage(0, 0, img);
painter.end();
img = image;
}
}
catch (...) {
root->unref();
@@ -2048,6 +2058,13 @@ QImage View3DInventorViewer::grabFramebuffer()
renderToFramebuffer(&fbo);
res = fbo.toImage(false);
QImage image(res.width(), res.height(), QImage::Format_RGB32);
QPainter painter(&image);
painter.fillRect(image.rect(),Qt::black);
painter.drawImage(0, 0, res);
painter.end();
res = image;
}
#endif
@@ -2111,8 +2128,14 @@ void View3DInventorViewer::imageFromFramebuffer(int width, int height, int sampl
bits++;
}
}
} else if (alpha == 255)
img = img.convertToFormat(QImage::Format_RGB32);
} else if (alpha == 255) {
QImage image(img.width(), img.height(), QImage::Format_RGB32);
QPainter painter(&image);
painter.fillRect(image.rect(),Qt::black);
painter.drawImage(0, 0, img);
painter.end();
img = image;
}
}
void View3DInventorViewer::renderToFramebuffer(QtGLFramebufferObject* fbo)

View File

@@ -52,6 +52,7 @@ SET(Draft_utilities
draftutils/utils.py
draftutils/gui_utils.py
draftutils/todo.py
draftutils/translate.py
)
SET(Draft_objects

View File

@@ -46,22 +46,23 @@ __url__ = "https://www.freecadweb.org"
"""The Draft module offers a range of tools to create and manipulate basic 2D objects"""
import FreeCAD, math, sys, os, DraftVecUtils, WorkingPlane
import draftutils.translate
from FreeCAD import Vector
from PySide.QtCore import QT_TRANSLATE_NOOP
if FreeCAD.GuiUp:
import FreeCADGui, Draft_rc
from PySide import QtCore
from PySide.QtCore import QT_TRANSLATE_NOOP
gui = True
#from DraftGui import translate
else:
def QT_TRANSLATE_NOOP(ctxt,txt):
return txt
# def QT_TRANSLATE_NOOP(ctxt,txt):
# return txt
#print("FreeCAD Gui not present. Draft module will have some features disabled.")
gui = False
def translate(ctx,txt):
return txt
translate = draftutils.translate.translate
#---------------------------------------------------------------------------
# Backwards compatibility

View File

@@ -49,48 +49,9 @@ import DraftVecUtils
from PySide import QtCore, QtGui, QtSvg
try:
_encoding = QtGui.QApplication.UnicodeUTF8 if six.PY2 else None
def translate(context, text, utf8_decode=True):
"""convenience function for Qt translator
context: str
context is typically a class name (e.g., "MyDialog")
text: str
text which gets translated
utf8_decode: bool [False]
if set to true utf8 encoded unicode will be returned. This option does not have influence
on python3 as for python3 we are returning utf-8 encoded unicode by default!
"""
if six.PY3:
return QtGui.QApplication.translate(context, text, None)
elif utf8_decode:
return QtGui.QApplication.translate(context, text, None, _encoding)
else:
return QtGui.QApplication.translate(context, text, None, _encoding).encode("utf8")
import draftutils.translate
translate = draftutils.translate.translate
except AttributeError:
def translate(context, text, utf8_decode=False):
"""convenience function for Qt translator
context: str
context is typically a class name (e.g., "MyDialog")
text: str
text which gets translated
utf8_decode: bool [False]
if set to true utf8 encoded unicode will be returned. This option does not have influence
on python3 as for python3 we are returning utf-8 encoded unicode by default!
"""
if six.PY3:
return QtGui.QApplication.translate(context, text, None)
elif QtCore.qVersion() > "4":
if utf8_decode:
return QtGui.QApplication.translate(context, text, None)
else:
return QtGui.QApplication.translate(context, text, None).encode("utf8")
else:
if utf8_decode:
return QtGui.QApplication.translate(context, text, None, _encoding)
else:
return QtGui.QApplication.translate(context, text, None, _encoding).encode("utf8")
import draftutils.utils
utf8_decode = draftutils.utils.utf8_decode

View File

@@ -0,0 +1,200 @@
"""This module provides translate functions for the Draft Workbench.
This module contains auxiliary functions to translate strings
using the QtGui module.
"""
## @package translate
# \ingroup DRAFT
# \brief This module provides translate functions for the Draft Workbench
# ***************************************************************************
# * (c) 2009 Yorik van Havre <yorik@uncreated.net> *
# * (c) 2019 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
from PySide import QtCore
from PySide import QtGui
import six
Qtranslate = QtGui.QApplication.translate
# This property only exists in Qt4, which is normally paired
# with Python 2.
# But if Python 2 is used with Qt5 (rare),
# this assignment will fail.
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
_encoding = None
def translate(context, text, utf8_decode=False):
"""Convenience function for the Qt translate function.
It wraps around `QtGui.QApplication.translate`,
which is the same as `QtCore.QCoreApplication.translate`.
Parameters
----------
context : str
In C++ it is typically a class name.
But it can also be any other string to categorize the translation,
for example, the name of a workbench, tool, or function
that is being translated. Usually it will be the name
of the workbench.
text : str
Text that will be translated. It could be a single word,
a full sentence, paragraph, or multiple paragraphs with new lines.
Usually the last endline character '\\\\n'
that finishes the string doesn't need to be included
for translation.
utf8_decode : bool
It defaults to `False`.
This must be set to `True` to indicate that the `text`
is an `'utf8'` encoded string, so it should be returned as such.
This option is ignored when using Python 3
as with Python 3 all strings are `'utf8'` by default.
Returns
-------
str
A unicode string returned by `QtGui.QApplication.translate`.
If `utf8_decode` is `True`, the resulting string will be encoded
in `'utf8'`, and a `bytes` object will be returned.
::
Qtranslate = QtGui.QApplication.translate
return Qtranslate(context, text, None).encode("utf8")
Unicode strings
---------------
Whether it is Qt4 or Qt5, the `translate` function
always returns a unicode string.
The difference is how it handles the input.
Reference: https://pyside.github.io/docs/pyside/PySide/QtCore/
In Qt4 the translate function has a 4th parameter to define the encoding
of the input string.
>>> QtCore.QCoreApplication.translate(context, text, None, UnicodeUT8)
>>> QtGui.QApplication.translate(context, text, None, UnicodeUT8)
Reference: https://doc.qt.io/qtforpython/PySide2/QtCore
In Qt5 the strings are always assumed unicode, so the 4th parameter
is for a different use, and it is not used.
>>> QtCore.QCoreApplication.translate(context, text, None)
>>> QtGui.QApplication.translate(context, text, None)
"""
# Python 3 and Qt5
# The text is a utf8 string, and since it is Qt5
# the translate function doesn't use the 4th parameter
if six.PY3:
return Qtranslate(context, text, None)
# Python 2
elif QtCore.qVersion() > "4":
# Python 2 and Qt5
if utf8_decode:
# The text is a utf8 string, and since it is Qt5
# the translate function doesn't use the 4th parameter
return Qtranslate(context, text, None)
else:
# The text is not a unicode string, and since it is Qt5
# the translate function doesn't use the 4th parameter.
# Therefore the output string needs to be encoded manually
# as utf8 bytes before returning.
return Qtranslate(context, text, None).encode("utf8")
else:
# Python 2 and Qt4
if utf8_decode:
# The text is a utf8 string, and since it is Qt4
# the translate function uses the 4th parameter
# to handle the input encoding.
return Qtranslate(context, text, None, _encoding)
else:
# The text is not a unicode string, and since it is Qt4
# the translate function uses the 4th parameter
# to handle the encoding.
# In this case, the `encoding` is `None`, therefore
# the output string needs to be encoded manually
# as utf8 bytes before returning.
return Qtranslate(context, text, None, _encoding).encode("utf8")
# Original code no longer used. It is listed here for reference
# to show how the different pairings Py2/Qt4, Py3/Qt5, Py2/Qt5, Py3/Qt4
# were handled in the past.
# If there is a problem with the code above, this code can be made active
# again, and the code above can be commented out.
#
# =============================================================================
# try:
# _encoding = QtGui.QApplication.UnicodeUTF8 if six.PY2 else None
# def translate(context, text, utf8_decode=True):
# """convenience function for Qt translator
# context: str
# context is typically a class name (e.g., "MyDialog")
# text: str
# text which gets translated
# utf8_decode: bool [False]
# if set to true utf8 encoded unicode will be returned.
# This option does not have influence
# on python3 as for python3 we are returning utf-8 encoded
# unicode by default!
# """
# if six.PY3:
# return Qtranslate(context, text, None)
# elif utf8_decode:
# return Qtranslate(context, text, None, _encoding)
# else:
# return Qtranslate(context, text, None, _encoding).encode("utf8")
#
# except AttributeError:
# def translate(context, text, utf8_decode=False):
# """convenience function for Qt translator
# context: str
# context is typically a class name (e.g., "MyDialog")
# text: str
# text which gets translated
# utf8_decode: bool [False]
# if set to true utf8 encoded unicode will be returned.
# This option does not have influence
# on python3 as for python3 we are returning utf-8 encoded
# unicode by default!
# """
# if six.PY3:
# return Qtranslate(context, text, None)
# elif QtCore.qVersion() > "4":
# if utf8_decode:
# return Qtranslate(context, text, None)
# else:
# return Qtranslate(context, text, None).encode("utf8")
# else:
# if utf8_decode:
# return Qtranslate(context, text, None, _encoding)
# else:
# return Qtranslate(context, text, None,
# _encoding).encode("utf8")
# =============================================================================

View File

@@ -72,6 +72,11 @@ def importFrd(
from . import importToolsFem
import ObjectsFem
if analysis:
doc = analysis.Document
else:
doc = FreeCAD.ActiveDocument
m = read_frd_result(filename)
result_mesh_object = None
res_obj = None
@@ -79,7 +84,7 @@ def importFrd(
if len(m["Nodes"]) > 0:
mesh = importToolsFem.make_femmesh(m)
result_mesh_object = ObjectsFem.makeMeshResult(
FreeCAD.ActiveDocument,
doc,
"ResultMesh"
)
result_mesh_object.FemMesh = mesh
@@ -114,7 +119,7 @@ def importFrd(
.format(result_name_prefix)
)
res_obj = ObjectsFem.makeResultMechanical(FreeCAD.ActiveDocument, results_name)
res_obj = ObjectsFem.makeResultMechanical(doc, results_name)
res_obj.Mesh = result_mesh_object
res_obj = importToolsFem.fill_femresult_mechanical(res_obj, result_set)
if analysis:
@@ -186,7 +191,7 @@ def importFrd(
results_name = ("{}_Results".format(result_name_prefix))
else:
results_name = ("Results".format(result_name_prefix))
res_obj = ObjectsFem.makeResultMechanical(FreeCAD.ActiveDocument, results_name)
res_obj = ObjectsFem.makeResultMechanical(doc, results_name)
res_obj.Mesh = result_mesh_object
# TODO, node numbers in result obj could be set
if analysis:
@@ -196,7 +201,7 @@ def importFrd(
if analysis:
import FemGui
FemGui.setActiveAnalysis(analysis)
FreeCAD.ActiveDocument.recompute()
doc.recompute()
else:
Console.PrintError(

View File

@@ -20,7 +20,9 @@ SOURCE_GROUP("" FILES ${OpenSCAD_SRCS})
SET(ply_SRCS
ply/lex.py
ply/README
ply/README.md
ply/ANNOUNCE
ply/CHANGES
ply/yacc.py
ply/__init__.py
)

View File

@@ -1,11 +1,11 @@
February 17, 2011
February 15, 2018
Announcing : PLY-3.4 (Python Lex-Yacc)
Announcing : PLY-3.11 (Python Lex-Yacc)
http://www.dabeaz.com/ply
I'm pleased to announce PLY-3.4--a pure Python implementation of the
common parsing tools lex and yacc. PLY-3.4 is a minor bug fix
I'm pleased to announce PLY-3.11--a pure Python implementation of the
common parsing tools lex and yacc. PLY-3.11 is a minor bug fix
release. It supports both Python 2 and Python 3.
If you are new to PLY, here are a few highlights:

View File

@@ -1,3 +1,317 @@
Version 3.11
---------------------
02/15/18 beazley
Fixed some minor bugs related to re flags and token order.
Github pull requests #151 and #153.
02/15/18 beazley
Added a set_lexpos() method to grammar symbols. Github issue #148.
04/13/17 beazley
Mostly minor bug fixes and small code cleanups.
Version 3.10
---------------------
01/31/17: beazley
Changed grammar signature computation to not involve hashing
functions. Parts are just combined into a big string.
10/07/16: beazley
Fixed Issue #101: Incorrect shift-reduce conflict resolution with
precedence specifier.
PLY was incorrectly resolving shift-reduce conflicts in certain
cases. For example, in the example/calc/calc.py example, you
could trigger it doing this:
calc > -3 - 4
1 (correct answer should be -7)
calc >
Issue and suggested patch contributed by https://github.com/RomaVis
Version 3.9
---------------------
08/30/16: beazley
Exposed the parser state number as the parser.state attribute
in productions and error functions. For example:
def p_somerule(p):
'''
rule : A B C
'''
print('State:', p.parser.state)
May address issue #65 (publish current state in error callback).
08/30/16: beazley
Fixed Issue #88. Python3 compatibility with ply/cpp.
08/30/16: beazley
Fixed Issue #93. Ply can crash if SyntaxError is raised inside
a production. Not actually sure if the original implementation
worked as documented at all. Yacc has been modified to follow
the spec as outlined in the CHANGES noted for 11/27/07 below.
08/30/16: beazley
Fixed Issue #97. Failure with code validation when the original
source files aren't present. Validation step now ignores
the missing file.
08/30/16: beazley
Minor fixes to version numbers.
Version 3.8
---------------------
10/02/15: beazley
Fixed issues related to Python 3.5. Patch contributed by Barry Warsaw.
Version 3.7
---------------------
08/25/15: beazley
Fixed problems when reading table files from pickled data.
05/07/15: beazley
Fixed regression in handling of table modules if specified as module
objects. See https://github.com/dabeaz/ply/issues/63
Version 3.6
---------------------
04/25/15: beazley
If PLY is unable to create the 'parser.out' or 'parsetab.py' files due
to permission issues, it now just issues a warning message and
continues to operate. This could happen if a module using PLY
is installed in a funny way where tables have to be regenerated, but
for whatever reason, the user doesn't have write permission on
the directory where PLY wants to put them.
04/24/15: beazley
Fixed some issues related to use of packages and table file
modules. Just to emphasize, PLY now generates its special
files such as 'parsetab.py' and 'lextab.py' in the *SAME*
directory as the source file that uses lex() and yacc().
If for some reason, you want to change the name of the table
module, use the tabmodule and lextab options:
lexer = lex.lex(lextab='spamlextab')
parser = yacc.yacc(tabmodule='spamparsetab')
If you specify a simple name as shown, the module will still be
created in the same directory as the file invoking lex() or yacc().
If you want the table files to be placed into a different package,
then give a fully qualified package name. For example:
lexer = lex.lex(lextab='pkgname.files.lextab')
parser = yacc.yacc(tabmodule='pkgname.files.parsetab')
For this to work, 'pkgname.files' must already exist as a valid
Python package (i.e., the directories must already exist and be
set up with the proper __init__.py files, etc.).
Version 3.5
---------------------
04/21/15: beazley
Added support for defaulted_states in the parser. A
defaulted_state is a state where the only legal action is a
reduction of a single grammar rule across all valid input
tokens. For such states, the rule is reduced and the
reading of the next lookahead token is delayed until it is
actually needed at a later point in time.
This delay in consuming the next lookahead token is a
potentially important feature in advanced parsing
applications that require tight interaction between the
lexer and the parser. For example, a grammar rule change
modify the lexer state upon reduction and have such changes
take effect before the next input token is read.
*** POTENTIAL INCOMPATIBILITY ***
One potential danger of defaulted_states is that syntax
errors might be deferred to a a later point of processing
than where they were detected in past versions of PLY.
Thus, it's possible that your error handling could change
slightly on the same inputs. defaulted_states do not change
the overall parsing of the input (i.e., the same grammar is
accepted).
If for some reason, you need to disable defaulted states,
you can do this:
parser = yacc.yacc()
parser.defaulted_states = {}
04/21/15: beazley
Fixed debug logging in the parser. It wasn't properly reporting goto states
on grammar rule reductions.
04/20/15: beazley
Added actions to be defined to character literals (Issue #32). For example:
literals = [ '{', '}' ]
def t_lbrace(t):
r'\{'
# Some action
t.type = '{'
return t
def t_rbrace(t):
r'\}'
# Some action
t.type = '}'
return t
04/19/15: beazley
Import of the 'parsetab.py' file is now constrained to only consider the
directory specified by the outputdir argument to yacc(). If not supplied,
the import will only consider the directory in which the grammar is defined.
This should greatly reduce problems with the wrong parsetab.py file being
imported by mistake. For example, if it's found somewhere else on the path
by accident.
*** POTENTIAL INCOMPATIBILITY *** It's possible that this might break some
packaging/deployment setup if PLY was instructed to place its parsetab.py
in a different location. You'll have to specify a proper outputdir= argument
to yacc() to fix this if needed.
04/19/15: beazley
Changed default output directory to be the same as that in which the
yacc grammar is defined. If your grammar is in a file 'calc.py',
then the parsetab.py and parser.out files should be generated in the
same directory as that file. The destination directory can be changed
using the outputdir= argument to yacc().
04/19/15: beazley
Changed the parsetab.py file signature slightly so that the parsetab won't
regenerate if created on a different major version of Python (ie., a
parsetab created on Python 2 will work with Python 3).
04/16/15: beazley
Fixed Issue #44 call_errorfunc() should return the result of errorfunc()
04/16/15: beazley
Support for versions of Python <2.7 is officially dropped. PLY may work, but
the unit tests requires Python 2.7 or newer.
04/16/15: beazley
Fixed bug related to calling yacc(start=...). PLY wasn't regenerating the
table file correctly for this case.
04/16/15: beazley
Added skipped tests for PyPy and Java. Related to use of Python's -O option.
05/29/13: beazley
Added filter to make unit tests pass under 'python -3'.
Reported by Neil Muller.
05/29/13: beazley
Fixed CPP_INTEGER regex in ply/cpp.py (Issue 21).
Reported by @vbraun.
05/29/13: beazley
Fixed yacc validation bugs when from __future__ import unicode_literals
is being used. Reported by Kenn Knowles.
05/29/13: beazley
Added support for Travis-CI. Contributed by Kenn Knowles.
05/29/13: beazley
Added a .gitignore file. Suggested by Kenn Knowles.
05/29/13: beazley
Fixed validation problems for source files that include a
different source code encoding specifier. Fix relies on
the inspect module. Should work on Python 2.6 and newer.
Not sure about older versions of Python.
Contributed by Michael Droettboom
05/21/13: beazley
Fixed unit tests for yacc to eliminate random failures due to dict hash value
randomization in Python 3.3
Reported by Arfrever
10/15/12: beazley
Fixed comment whitespace processing bugs in ply/cpp.py.
Reported by Alexei Pososin.
10/15/12: beazley
Fixed token names in ply/ctokens.py to match rule names.
Reported by Alexei Pososin.
04/26/12: beazley
Changes to functions available in panic mode error recover. In previous versions
of PLY, the following global functions were available for use in the p_error() rule:
yacc.errok() # Reset error state
yacc.token() # Get the next token
yacc.restart() # Reset the parsing stack
The use of global variables was problematic for code involving multiple parsers
and frankly was a poor design overall. These functions have been moved to methods
of the parser instance created by the yacc() function. You should write code like
this:
def p_error(p):
...
parser.errok()
parser = yacc.yacc()
*** POTENTIAL INCOMPATIBILITY *** The original global functions now issue a
DeprecationWarning.
04/19/12: beazley
Fixed some problems with line and position tracking and the use of error
symbols. If you have a grammar rule involving an error rule like this:
def p_assignment_bad(p):
'''assignment : location EQUALS error SEMI'''
...
You can now do line and position tracking on the error token. For example:
def p_assignment_bad(p):
'''assignment : location EQUALS error SEMI'''
start_line = p.lineno(3)
start_pos = p.lexpos(3)
If the trackng=True option is supplied to parse(), you can additionally get
spans:
def p_assignment_bad(p):
'''assignment : location EQUALS error SEMI'''
start_line, end_line = p.linespan(3)
start_pos, end_pos = p.lexspan(3)
Note that error handling is still a hairy thing in PLY. This won't work
unless your lexer is providing accurate information. Please report bugs.
Suggested by a bug reported by Davis Herring.
04/18/12: beazley
Change to doc string handling in lex module. Regex patterns are now first
pulled from a function's .regex attribute. If that doesn't exist, then
.doc is checked as a fallback. The @TOKEN decorator now sets the .regex
attribute of a function instead of its doc string.
Changed suggested by Kristoffer Ellersgaard Koch.
04/18/12: beazley
Fixed issue #1: Fixed _tabversion. It should use __tabversion__ instead of __version__
Reported by Daniele Tricoli
04/18/12: beazley
Fixed issue #8: Literals empty list causes IndexError
Reported by Walter Nissen.
04/18/12: beazley
Fixed issue #12: Typo in code snippet in documentation
Reported by florianschanda.
04/18/12: beazley
Fixed issue #10: Correctly escape t_XOREQUAL pattern.
Reported by Andy Kittner.
Version 3.4
---------------------
02/17/11: beazley
@@ -134,7 +448,7 @@ Version 3.0
to specify a logging object for the 'parser.out' output.
01/09/09: beazley
*HUGE* refactoring of the ply.yacc() implementation. The high-level
*HUGE* refactoring of the the ply.yacc() implementation. The high-level
user interface is backwards compatible, but the internals are completely
reorganized into classes. No more global variables. The internals
are also more extensible. For example, you can use the classes to
@@ -174,7 +488,7 @@ Version 3.0
directly. Preparation for Python 3.0 support.
11/04/08: beazley
Fixed a bug with referring to symbols on the parsing stack using negative
Fixed a bug with referring to symbols on the the parsing stack using negative
indices.
05/29/08: beazley

View File

@@ -1,22 +0,0 @@
Metadata-Version: 1.0
Name: ply
Version: 3.4
Summary: Python Lex & Yacc
Home-page: http://www.dabeaz.com/ply/
Author: David Beazley
Author-email: dave@dabeaz.com
License: BSD
Description:
PLY is yet another implementation of lex and yacc for Python. Some notable
features include the fact that its implemented entirely in Python and it
uses LALR(1) parsing which is efficient and well suited for larger grammars.
PLY provides most of the standard lex/yacc features including support for empty
productions, precedence rules, error recovery, and support for ambiguous grammars.
PLY is extremely easy to use and provides very extensive error checking.
It is compatible with both Python 2 and Python 3.
Platform: UNKNOWN
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 2

View File

@@ -1,6 +1,8 @@
PLY (Python Lex-Yacc) Version 3.4
# PLY (Python Lex-Yacc) Version 3.11
Copyright (C) 2001-2011,
[![Build Status](https://travis-ci.org/dabeaz/ply.svg?branch=master)](https://travis-ci.org/dabeaz/ply)
Copyright (C) 2001-2018
David M. Beazley (Dabeaz LLC)
All rights reserved.
@@ -96,7 +98,7 @@ A simple example is found at the end of this document
Requirements
============
PLY requires the use of Python 2.2 or greater. However, you should
PLY requires the use of Python 2.6 or greater. However, you should
use the latest Python release if possible. It should work on just
about any platform. PLY has been tested with both CPython and Jython.
It also seems to work with IronPython.
@@ -112,7 +114,11 @@ book "Compilers : Principles, Techniques, and Tools" by Aho, Sethi, and
Ullman. The topics found in "Lex & Yacc" by Levine, Mason, and Brown
may also be useful.
A Google group for PLY can be found at
The GitHub page for PLY can be found at:
https://github.com/dabeaz/ply
An old and relatively inactive discussion group for PLY is found at:
http://groups.google.com/group/ply-hack
@@ -130,7 +136,7 @@ and testing a revised LALR(1) implementation for PLY-2.0.
Special Note for PLY-3.0
========================
PLY-3.0 the first PLY release to support Python 3. However, backwards
compatibility with Python 2.2 is still preserved. PLY provides dual
compatibility with Python 2.6 is still preserved. PLY provides dual
Python 2/3 compatibility by restricting its implementation to a common
subset of basic language features. You should not convert PLY using
2to3--it is not necessary and may in fact break the implementation.
@@ -141,109 +147,109 @@ Example
Here is a simple example showing a PLY implementation of a calculator
with variables.
# -----------------------------------------------------------------------------
# calc.py
#
# A simple calculator with variables.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# calc.py
#
# A simple calculator with variables.
# -----------------------------------------------------------------------------
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
# Tokens
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Ignored characters
t_ignore = " \t"
# Ignored characters
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
import ply.lex as lex
lex.lex()
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
# Precedence rules for the arithmetic operators
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# dictionary of names (for storing variables)
names = { }
# Build the lexer
import ply.lex as lex
lex.lex()
def p_statement_assign(p):
'statement : NAME EQUALS expression'
names[p[1]] = p[3]
# Precedence rules for the arithmetic operators
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
def p_statement_expr(p):
'statement : expression'
print(p[1])
# dictionary of names (for storing variables)
names = { }
def p_expression_binop(p):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if p[2] == '+' : p[0] = p[1] + p[3]
elif p[2] == '-': p[0] = p[1] - p[3]
elif p[2] == '*': p[0] = p[1] * p[3]
elif p[2] == '/': p[0] = p[1] / p[3]
def p_statement_assign(p):
'statement : NAME EQUALS expression'
names[p[1]] = p[3]
def p_expression_uminus(p):
'expression : MINUS expression %prec UMINUS'
p[0] = -p[2]
def p_statement_expr(p):
'statement : expression'
print(p[1])
def p_expression_group(p):
'expression : LPAREN expression RPAREN'
p[0] = p[2]
def p_expression_binop(p):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if p[2] == '+' : p[0] = p[1] + p[3]
elif p[2] == '-': p[0] = p[1] - p[3]
elif p[2] == '*': p[0] = p[1] * p[3]
elif p[2] == '/': p[0] = p[1] / p[3]
def p_expression_number(p):
'expression : NUMBER'
p[0] = p[1]
def p_expression_uminus(p):
'expression : MINUS expression %prec UMINUS'
p[0] = -p[2]
def p_expression_name(p):
'expression : NAME'
try:
p[0] = names[p[1]]
except LookupError:
print("Undefined name '%s'" % p[1])
p[0] = 0
def p_expression_group(p):
'expression : LPAREN expression RPAREN'
p[0] = p[2]
def p_error(p):
print("Syntax error at '%s'" % p.value)
def p_expression_number(p):
'expression : NUMBER'
p[0] = p[1]
import ply.yacc as yacc
yacc.yacc()
def p_expression_name(p):
'expression : NAME'
try:
p[0] = names[p[1]]
except LookupError:
print("Undefined name '%s'" % p[1])
p[0] = 0
while 1:
try:
s = raw_input('calc > ') # use input() on Python 3
except EOFError:
break
yacc.parse(s)
def p_error(p):
print("Syntax error at '%s'" % p.value)
import ply.yacc as yacc
yacc.yacc()
while True:
try:
s = raw_input('calc > ') # use input() on Python 3
except EOFError:
break
yacc.parse(s)
Bug Reports and Patches
@@ -252,12 +258,10 @@ My goal with PLY is to simply have a decent lex/yacc implementation
for Python. As a general rule, I don't spend huge amounts of time
working on it unless I receive very specific bug reports and/or
patches to fix problems. I also try to incorporate submitted feature
requests and enhancements into each new version. To contact me about
bugs and/or new features, please send email to dave@dabeaz.com.
In addition there is a Google group for discussing PLY related issues at
http://groups.google.com/group/ply-hack
requests and enhancements into each new version. Please visit the PLY
github page at https://github.com/dabeaz/ply to submit issues and pull
requests. To contact me about bugs and/or new features, please send
email to dave@dabeaz.com.
-- Dave

View File

@@ -1,16 +0,0 @@
The PLY to-do list:
1. Finish writing the C Preprocessor module. Started in the
file ply/cpp.py
2. Create and document libraries of useful tokens.
3. Expand the examples/yply tool that parses bison/yacc
files.
4. Think of various diabolical things to do with the
new yacc internals. For example, it is now possible
to specify grammrs using completely different schemes
than the reflection approach used by PLY.

View File

@@ -1,36 +1,5 @@
# -----------------------------------------------------------------------------
# ply: __init__.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
# PLY package
# Author: David Beazley (dave@dabeaz.com)
__version__ = '3.11'
__all__ = ['lex','yacc']

914
src/Mod/OpenSCAD/ply/cpp.py Normal file
View File

@@ -0,0 +1,914 @@
# -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
import sys
# Some Python 3 compatibility shims
if sys.version_info.major < 3:
STRING_TYPES = (str, unicode)
else:
STRING_TYPES = str
xrange = range
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT1(t):
r'(/\*(.|\n)*?\*/)'
ncr = t.value.count("\n")
t.lexer.lineno += ncr
# replace with one space or a number of '\n'
t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
return t
# Line comment
def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
i -= 1
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
del macro.value[i + 1]
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if j < len(tokens) and tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
else:
# This is not a macro. It is just a word which
# equals to name of the macro. Hence, go to the
# next token.
i += 1
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except Exception:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
# insert necessary whitespace instead of eaten tokens
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,STRING_TYPES):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)

View File

@@ -0,0 +1,127 @@
# ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'INCREMENT', 'DECREMENT',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
[bdist_wheel]
universal = 1
[metadata]
description-file = README.md
[egg_info]
tag_build =
tag_date = 0

View File

@@ -1,36 +1,3 @@
# -----------------------------------------------------------------------------
# ply: setup.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
try:
from setuptools import setup
except ImportError:
@@ -50,7 +17,7 @@ PLY is extremely easy to use and provides very extensive error checking.
It is compatible with both Python 2 and Python 3.
""",
license="""BSD""",
version = "3.4",
version = "3.11",
author = "David Beazley",
author_email = "dave@dabeaz.com",
maintainer = "David Beazley",

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
# ply: ygen.py
#
# This is a support program that auto-generates different versions of the YACC parsing
# function with different features removed for the purposes of performance.
#
# Users should edit the method LRParser.parsedebug() in yacc.py. The source code
# for that method is then used to create the other methods. See the comments in
# yacc.py for further details.
import os.path
import shutil
def get_source_range(lines, tag):
srclines = enumerate(lines)
start_tag = '#--! %s-start' % tag
end_tag = '#--! %s-end' % tag
for start_index, line in srclines:
if line.strip().startswith(start_tag):
break
for end_index, line in srclines:
if line.strip().endswith(end_tag):
break
return (start_index + 1, end_index)
def filter_section(lines, tag):
filtered_lines = []
include = True
tag_text = '#--! %s' % tag
for line in lines:
if line.strip().startswith(tag_text):
include = not include
elif include:
filtered_lines.append(line)
return filtered_lines
def main():
dirname = os.path.dirname(__file__)
shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
lines = f.readlines()
parse_start, parse_end = get_source_range(lines, 'parsedebug')
parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
# Get the original source
orig_lines = lines[parse_start:parse_end]
# Filter the DEBUG sections out
parseopt_lines = filter_section(orig_lines, 'DEBUG')
# Filter the TRACKING sections out
parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
# Replace the parser source sections with updated versions
lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
lines[parseopt_start:parseopt_end] = parseopt_lines
lines = [line.rstrip()+'\n' for line in lines]
with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
f.writelines(lines)
print('Updated yacc.py')
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

View File

@@ -121,7 +121,7 @@
<string/>
</property>
<property name="title">
<string>Attachment Offset:</string>
<string>Attachment Offset (in local coordinates):</string>
</property>
<layout class="QGridLayout" name="gridLayout">
<item row="1" column="0">
@@ -133,7 +133,7 @@
</sizepolicy>
</property>
<property name="text">
<string>X:</string>
<string>In x-direction:</string>
</property>
<property name="buddy">
<cstring>labelOffset</cstring>
@@ -149,7 +149,7 @@
</sizepolicy>
</property>
<property name="text">
<string>Y:</string>
<string>In y-direction:</string>
</property>
</widget>
</item>
@@ -181,7 +181,7 @@
</sizepolicy>
</property>
<property name="text">
<string>Z:</string>
<string>In z-direction:</string>
</property>
</widget>
</item>
@@ -213,7 +213,7 @@
</sizepolicy>
</property>
<property name="text">
<string>Roll:</string>
<string>Around x-axis:</string>
</property>
</widget>
</item>
@@ -226,7 +226,7 @@
</sizepolicy>
</property>
<property name="text">
<string>Pitch:</string>
<string>Around y-axis:</string>
</property>
</widget>
</item>
@@ -239,7 +239,7 @@
</sizepolicy>
</property>
<property name="text">
<string>Yaw:</string>
<string>Around z-axis:</string>
</property>
</widget>
</item>

View File

@@ -617,15 +617,6 @@ void SoBrepFaceSet::GLRender(SoGLRenderAction *action)
// material override with transparncy won't work.
mb.sendFirst();
if(SoShapeStyleElement::get(state)->getFlags()
& (SoShapeStyleElement::TRANSP_TEXTURE|SoShapeStyleElement::TRANSP_MATERIAL))
{
// For some reason, there is an inconsistence in blending state between
// OpenGL and Coin, especially when doing offscreen rendering.
if(!glIsEnabled(GL_BLEND))
glEnable(GL_BLEND);
}
// When setting transparency shouldGLRender() handles the rendering and returns false.
// Therefore generatePrimitives() needs to be re-implemented to handle the materials
// correctly.

View File

@@ -108,10 +108,12 @@ void TaskAttacher::makeRefStrings(std::vector<QString>& refstrings, std::vector<
}
}
TaskAttacher::TaskAttacher(Gui::ViewProviderDocumentObject *ViewProvider,QWidget *parent, QString picture, QString text)
TaskAttacher::TaskAttacher(Gui::ViewProviderDocumentObject *ViewProvider, QWidget *parent,
QString picture, QString text, TaskAttacher::VisibilityFunction visFunc)
: TaskBox(Gui::BitmapFactory().pixmap(picture.toLatin1()), text, true, parent),
SelectionObserver(ViewProvider),
ViewProvider(ViewProvider)
ViewProvider(ViewProvider),
visibilityFunc(visFunc)
{
//check if we are attachable
if (!ViewProvider->getObject()->hasExtension(Part::AttachExtension::getExtensionClassTypeId()))
@@ -318,7 +320,7 @@ bool TaskAttacher::updatePreview()
ui->message->setStyleSheet(QString::fromLatin1("QLabel{color: green;}"));
}
}
QString splmLabelText = attached ? tr("Attachment Offset:") : tr("Attachment Offset (inactive - not attached):");
QString splmLabelText = attached ? tr("Attachment Offset (in local coordinates):") : tr("Attachment Offset (inactive - not attached):");
ui->groupBox_AttachmentOffset->setTitle(splmLabelText);
ui->groupBox_AttachmentOffset->setEnabled(attached);
@@ -935,25 +937,11 @@ void TaskAttacher::changeEvent(QEvent *e)
void TaskAttacher::visibilityAutomation(bool opening_not_closing)
{
if (opening_not_closing) {
//crash guards
if (!ViewProvider)
return;
if (!ViewProvider->getObject())
return;
if (!ViewProvider->getObject()->getNameInDocument())
return;
auto editDoc = Gui::Application::Instance->editDocument();
App::DocumentObject *editObj = ViewProvider->getObject();
std::string editSubName;
ViewProviderDocumentObject *editVp = 0;
if(editDoc) {
editDoc->getInEdit(&editVp,&editSubName);
if(editVp)
editObj = editVp->getObject();
}
try{
auto defvisfunc = [] (bool opening_not_closing,
Gui::ViewProviderDocumentObject* vp,
App::DocumentObject *editObj,
const std::string& editSubName) {
if (opening_not_closing) {
QString code = QString::fromLatin1(
"import Show\n"
"tv = Show.TempoVis(App.ActiveDocument, tag= 'PartGui::TaskAttacher')\n"
@@ -970,11 +958,39 @@ void TaskAttacher::visibilityAutomation(bool opening_not_closing)
"\t\t\ttv.show([lnk[0] for lnk in tvObj.Support])\n"
"del(tvObj)"
).arg(
QString::fromLatin1(Gui::Command::getObjectCmd(ViewProvider->getObject()).c_str()),
QString::fromLatin1(Gui::Command::getObjectCmd(vp->getObject()).c_str()),
QString::fromLatin1(Gui::Command::getObjectCmd(editObj).c_str()),
QString::fromLatin1(editSubName.c_str()));
Gui::Command::runCommand(Gui::Command::Gui,code.toLatin1().constData());
}
else {
Base::Interpreter().runString("del(tv)");
}
};
auto visAutoFunc = visibilityFunc ? visibilityFunc : defvisfunc;
if (opening_not_closing) {
//crash guards
if (!ViewProvider)
return;
if (!ViewProvider->getObject())
return;
if (!ViewProvider->getObject()->getNameInDocument())
return;
auto editDoc = Gui::Application::Instance->editDocument();
App::DocumentObject *editObj = ViewProvider->getObject();
std::string editSubName;
ViewProviderDocumentObject *editVp = 0;
if (editDoc) {
editDoc->getInEdit(&editVp,&editSubName);
if (editVp)
editObj = editVp->getObject();
}
try {
visAutoFunc(opening_not_closing, ViewProvider, editObj, editSubName);
}
catch (const Base::Exception &e){
e.ReportException();
}
@@ -985,7 +1001,7 @@ void TaskAttacher::visibilityAutomation(bool opening_not_closing)
}
else {
try {
Base::Interpreter().runString("del(tv)");
visAutoFunc(opening_not_closing, nullptr, nullptr, std::string());
}
catch (Base::Exception &e) {
e.ReportException();

View File

@@ -30,6 +30,7 @@
#include <Gui/TaskView/TaskView.h>
#include <Gui/TaskView/TaskDialog.h>
#include <Mod/Part/App/Attacher.h>
#include <boost/function.hpp>
class Ui_TaskAttacher;
@@ -52,8 +53,11 @@ class PartGuiExport TaskAttacher : public Gui::TaskView::TaskBox, public Gui::Se
Q_OBJECT
public:
TaskAttacher(Gui::ViewProviderDocumentObject *ViewProvider,QWidget *parent = 0,
QString picture = QString::fromLatin1(""), QString text = QString::fromLatin1("Attachment"));
typedef boost::function<void (bool, Gui::ViewProviderDocumentObject*, App::DocumentObject *, const std::string&)> VisibilityFunction;
TaskAttacher(Gui::ViewProviderDocumentObject *ViewProvider, QWidget *parent = 0,
QString picture = QString(),
QString text = QString::fromLatin1("Attachment"), VisibilityFunction func = 0);
~TaskAttacher();
bool getFlip(void) const;
@@ -125,6 +129,7 @@ protected:
private:
QWidget* proxy;
Ui_TaskAttacher* ui;
VisibilityFunction visibilityFunc;
// TODO fix documentation here (2015-11-10, Fat-Zer)
int iActiveRef; //what reference is being picked in 3d view now? -1 means no one, 0-3 means a reference is being picked.

View File

@@ -121,7 +121,7 @@
<string/>
</property>
<property name="title">
<string>Attachment Offset:</string>
<string>Attachment Offset (in local coordinates):</string>
</property>
<layout class="QGridLayout" name="gridLayout">
<item row="1" column="0">
@@ -133,7 +133,7 @@
</sizepolicy>
</property>
<property name="text">
<string>X:</string>
<string>In x-direction:</string>
</property>
<property name="buddy">
<cstring>labelOffset</cstring>
@@ -149,7 +149,7 @@
</sizepolicy>
</property>
<property name="text">
<string>Y:</string>
<string>In y-direction:</string>
</property>
</widget>
</item>
@@ -182,7 +182,7 @@ of object being attached.</string>
</sizepolicy>
</property>
<property name="text">
<string>Z:</string>
<string>In z-direction:</string>
</property>
</widget>
</item>
@@ -215,7 +215,7 @@ of object being attached.</string>
</sizepolicy>
</property>
<property name="text">
<string>Roll:</string>
<string>Around x-axis:</string>
</property>
</widget>
</item>
@@ -228,7 +228,7 @@ of object being attached.</string>
</sizepolicy>
</property>
<property name="text">
<string>Pitch:</string>
<string>Around y-axis:</string>
</property>
</widget>
</item>
@@ -241,7 +241,7 @@ of object being attached.</string>
</sizepolicy>
</property>
<property name="text">
<string>Yaw:</string>
<string>Around z-axis:</string>
</property>
</widget>
</item>

View File

@@ -661,7 +661,36 @@ TaskPrimitiveParameters::TaskPrimitiveParameters(ViewProviderPrimitive* Primitiv
primitive = new TaskBoxPrimitives(PrimitiveView);
Content.push_back(primitive);
parameter = new PartGui::TaskAttacher(PrimitiveView);
// handle visibility automation differently to the default method
auto customvisfunc = [] (bool opening_not_closing,
Gui::ViewProviderDocumentObject* vp,
App::DocumentObject *editObj,
const std::string& editSubName) {
if (opening_not_closing) {
QString code = QString::fromLatin1(
"import Show\n"
"tv = Show.TempoVis(App.ActiveDocument, tag= 'PartGui::TaskAttacher')\n"
"tvObj = %1\n"
"dep_features = tv.get_all_dependent(%2, '%3')\n"
"if tvObj.isDerivedFrom('PartDesign::CoordinateSystem'):\n"
"\tvisible_features = [feat for feat in tvObj.InList if feat.isDerivedFrom('PartDesign::FeaturePrimitive')]\n"
"\tdep_features = [feat for feat in dep_features if feat not in visible_features]\n"
"\tdel(visible_features)\n"
"tv.hide(dep_features)\n"
"del(dep_features)\n"
"del(tvObj)"
).arg(
QString::fromLatin1(Gui::Command::getObjectCmd(vp->getObject()).c_str()),
QString::fromLatin1(Gui::Command::getObjectCmd(editObj).c_str()),
QString::fromLatin1(editSubName.c_str()));
Gui::Command::runCommand(Gui::Command::Gui,code.toLatin1().constData());
}
else {
Base::Interpreter().runString("del(tv)");
}
};
parameter = new PartGui::TaskAttacher(PrimitiveView, nullptr, QString(), tr("Attachment"), customvisfunc);
Content.push_back(parameter);
}

View File

@@ -0,0 +1,116 @@
# *********************************************************************************************
# * Copyright (c) 2019/2020 Rene 'Renne' Bartsch, B.Sc. Informatics <rene@bartschnet.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ********************************************************************************************/
import FreeCAD
import PathScripts
from PathScripts import PostUtils
import datetime
TOOLTIP='''
This is a postprocessor file for the Path workbench. It is used to take
a pseudo-gcode fragment outputted by a Path object and output real GCode
suitable for the Max Computer GmbH nccad9 Computer Numeric Control.
Supported features:
- 3-axis milling
- manual tool change with tool number as comment
- spindle speed as comment
!!! gCode files must use the suffix .knc !!!'''
MACHINE_NAME = '''Max Computer GmbH nccad9 MCS/KOSY'''
# gCode for changing tools
# M01 <String> ; Displays <String> and waits for user interaction
TOOL_CHANGE = '''G77 ; Move to release position
M10 O6.0 ; Stop spindle
M01 Insert tool TOOL
G76 ; Move to reference point to ensure correct coordinates after tool change
M10 O6.1 ; Start spindel'''
# gCode finishing the program
POSTAMBLE = '''G77 ; Move to release position
M10 O6.0 ; Stop spindle'''
# gCode header with information about CAD-software, post-processor and date/time
HEADER = ''';Exported by FreeCAD
;Post Processor: {}
;CAM file: {}
;Output Time: {}
'''.format(__name__, FreeCAD.ActiveDocument.FileName, str(datetime.datetime.now()))
def export(objectslist, filename, argstring):
gcode = HEADER
for obj in objectslist:
for command in obj.Path.Commands:
# Manipulate tool change commands
if 'M6' == command.Name:
gcode += TOOL_CHANGE.replace('TOOL', str(int(command.Parameters['T'])))
# Convert spindle speed (rpm) command to comment
elif 'M3' == command.Name:
gcode += 'M01 Set spindle speed to ' + str(int(command.Parameters['S'])) + ' rounds per minute'
# Add other commands
else:
gcode += command.Name
# Loop through command parameters
for parameter, value in command.Parameters.items():
# Multiply F parameter value by 10 (FreeCAD = mm/s, nccad = 1/10 mm/s)
if 'F' == parameter:
value *= 10
# Add command parameters and values and round float as nccad9 does not support exponents
gcode += ' ' + parameter + str(round(value, 5))
gcode += '\n'
gcode += POSTAMBLE + '\n'
# Open editor window
if FreeCAD.GuiUp:
dia = PostUtils.GCodeEditorDialog()
dia.editor.setText(gcode)
result = dia.exec_()
if result:
gcode = dia.editor.toPlainText()
# Save to file
if filename != '-':
gfile = open(filename, "w")
gfile.write(gcode)
gfile.close()
return filename

View File

@@ -40,6 +40,7 @@
#include <QTextStream>
#include <QFile>
#include <QLabel>
#include <QTextCodec>
#include <cmath>
#endif
@@ -944,8 +945,9 @@ void QGVPage::postProcessXml(QTemporaryFile& temporaryFile, QString fileName, QS
QTextStream stream( &outFile );
stream.setGenerateByteOrderMark(true);
stream.setCodec("UTF-8");
stream << exportDoc.toString();
stream << exportDoc.toByteArray();
outFile.close();
}