fix paragraph splitting bug (reporters Olivia Zhang & Echo Bell); version-->3.3.31
authorrobin <robin@reportlab.com>
Tue, 07 Feb 2017 11:19:18 +0000
changeset 4315 7c65c6e52b13
parent 4314 27d3ed39ad3e
child 4316 14c18006410a
fix paragraph splitting bug (reporters Olivia Zhang & Echo Bell); version-->3.3.31
CHANGES.md
src/reportlab/__init__.py
src/reportlab/platypus/paragraph.py
src/reportlab/platypus/paraparser.py
tests/test_platypus_paragraphs.py
--- a/CHANGES.md	Thu Feb 02 15:53:11 2017 +0000
+++ b/CHANGES.md	Tue Feb 07 11:19:18 2017 +0000
@@ -13,6 +13,7 @@
 
 RELEASE 3.4  ??/02/2017
 -----------------------
+    * fix split paragraph rendering bug (reported by Olivia Zhang & Echo Bell)
     * support for Path autoclose & fillMode; version --> 3.3.29
     * add support for different fill policies in renderXX drawPath; version-->3.3.28
     * allow for UTF_16_LE BOM, fix for bug contributed by Michael Poindexter mpoindexter@housecanary.com
@@ -45,7 +46,7 @@
     * attempt to remove quadratic performance hit when longTableOptimize is set
     * allow DATA: scheme in open for read
     * import Table _rowpositions calculation
-    * support small ttfs which don't allow subsets
+    * support small ttfs which do not allow subsets
     * add rl_settings allowTTFSubsetting
     * address issue #76 (deprecated immports) reported by Richard Eames
     * add table cell support for simple background shadings, contributed by Jeffrey Creem jcreem@bitbucket
@@ -58,6 +59,7 @@
     * Dinu Gherman
     * Claude Paroz
     * dbrnz @ bitbucket
+    * Echo Bell
     * Eric Gillet
     * Jeffrey Creem jcreem@bitbucket
     * Johann Du Toit
@@ -66,6 +68,7 @@
     * Kyle McFarlane https://bitbucket.org/kylemacfarlane/
     * Michael Poindexter mpoindexter@housecanary.com
     * Neil Schemenauer
+    * Olivia Zhang
     * Richard Eames
     * Robin Westin
     * Tim Meneely
--- a/src/reportlab/__init__.py	Thu Feb 02 15:53:11 2017 +0000
+++ b/src/reportlab/__init__.py	Tue Feb 07 11:19:18 2017 +0000
@@ -1,9 +1,9 @@
 #Copyright ReportLab Europe Ltd. 2000-2017
 #see license.txt for license details
 __doc__="""The Reportlab PDF generation library."""
-Version = "3.3.30"
+Version = "3.3.31"
 __version__=Version
-__date__='20170202'
+__date__='20170207'
 
 import sys, os
 
--- a/src/reportlab/platypus/paragraph.py	Thu Feb 02 15:53:11 2017 +0000
+++ b/src/reportlab/platypus/paragraph.py	Tue Feb 07 11:19:18 2017 +0000
@@ -7,7 +7,7 @@
 from operator import truth
 from unicodedata import category
 from reportlab.pdfbase.pdfmetrics import stringWidth, getFont, getAscentDescent
-from reportlab.platypus.paraparser import ParaParser
+from reportlab.platypus.paraparser import ParaParser, _PCT
 from reportlab.platypus.flowables import Flowable
 from reportlab.lib.colors import Color
 from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
@@ -235,7 +235,7 @@
                 txfs = tx._fontsize
                 if txfs is None:
                     txfs = xs.style.fontSize
-                w = imgNormV(cbDefn.width,None)
+                w = imgNormV(cbDefn.width,xs.paraWidth)
                 h = imgNormV(cbDefn.height,txfs)
                 iy0,iy1 = imgVRange(h,cbDefn.valign,txfs)
                 cur_x_s = cur_x + nSpaces*ws
@@ -437,10 +437,34 @@
 class _HSWord(list):
     pass
 
+class _SplitList(list):
+    pass
+
+class _SplitListLast(_SplitList):
+    pass
+
+class _HSSplitList(_HSWord):
+    pass
+
+def _processed_frags(frags):
+    try:
+        return isinstance(frags[0][0],(float,int))
+    except:
+        return False
+
 _FK_TEXT = 0
 _FK_IMG = 1
 _FK_APPEND = 2
 _FK_BREAK = 3
+
+def _rescaleFrag(f):
+    w = f[0]
+    if isinstance(w,_PCT):
+        if w._normalizer!=maxWidth:
+            w._normalizer = maxWidth
+            w = w.normalizedValue(maxWidth)
+            f[0] = w
+
 def _getFragWords(frags,maxWidth=None):
     ''' given a Parafrag list return a list of fragwords
         [[size, (f00,w00), ..., (f0n,w0n)],....,[size, (fm0,wm0), ..., (f0n,wmn)]]
@@ -448,88 +472,114 @@
         each sublist represents a word
     '''
     R = []
-    W = []
-    hangingSpace = False
-    n = 0
-    hangingStrip = True
-    for f in frags:
-        text = f.text
-        if text!='':
-            f._fkind = _FK_TEXT
-            if hangingStrip:
-                text = text.lstrip()
-                if not text: continue
-                hangingStrip = False
-            S = split(text)
-            if text[0] in whitespace:
-                if W:
-                    W.insert(0,n)   #end preceding word
-                    R.append(W)
-                    whs = hangingSpace
+    R_append = R.append
+    if _processed_frags(frags):
+        i = 0
+        n = len(frags)
+        while i<n:
+            f = frags[i]
+            if isinstance(f,(_SplitList,_HSSplitList,_SplitListLast)):
+                #we need to re-join these to make a single word
+                W = [0]
+                while True:
+                    W[0] += f[0]
+                    W.extend(f[1:])
+                    if isinstance(f,(_SplitListLast,_HSSplitList)):
+                        break
+                    i += 1
+                    if i==n: break  #exceptional case when a split paragraph split in the middle of such a sequence
+                    f = frags[i]
+                if isinstance(f,_HSSplitList):
+                    f = _HSWord(W)
+                else:
+                    f = W
+            else:
+                _rescaleFrag(f)
+            R_append(f)
+            i += 1
+    else:
+        W = []
+        hangingSpace = False
+        n = 0
+        hangingStrip = True
+        for f in frags:
+            text = f.text
+            if text!='':
+                f._fkind = _FK_TEXT
+                if hangingStrip:
+                    text = text.lstrip()
+                    if not text: continue
+                    hangingStrip = False
+                S = split(text)
+                if text[0] in whitespace:
+                    if W:
+                        W.insert(0,n)   #end preceding word
+                        R_append(W)
+                        whs = hangingSpace
+                        W = []
+                        hangingSpace = False
+                        n = 0
+                    else:
+                        whs = R and isinstance(R[-1],_HSWord)
+                    if not whs:
+                        S.insert(0,'')
+                    elif not S:
+                        continue
+
+                for w in S[:-1]:
+                    W.append((f,w))
+                    n += stringWidth(w, f.fontName, f.fontSize)
+                    W.insert(0,n)
+                    R_append(_HSWord(W))
                     W = []
-                    hangingSpace = False
                     n = 0
-                else:
-                    whs = R and isinstance(R[-1],_HSWord)
-                if not whs:
-                    S.insert(0,'')
-                elif not S:
-                    continue
 
-            for w in S[:-1]:
+                hangingSpace = False
+                w = S[-1]
                 W.append((f,w))
                 n += stringWidth(w, f.fontName, f.fontSize)
-                W.insert(0,n)
-                R.append(_HSWord(W))
-                W = []
-                n = 0
-
-            hangingSpace = False
-            w = S[-1]
-            W.append((f,w))
-            n += stringWidth(w, f.fontName, f.fontSize)
-            if text and text[-1] in whitespace:
-                W.insert(0,n)
-                R.append(_HSWord(W))
-                W = []
-                n = 0
-        elif hasattr(f,'cbDefn'):
-            cb = f.cbDefn
-            w = getattr(cb,'width',0)
-            if w:
-                if hasattr(w,'normalizedValue'):
-                    w._normalizer = maxWidth
-                    w = w.normalizedValue(maxWidth)
+                if text and text[-1] in whitespace:
+                    W.insert(0,n)
+                    R_append(_HSWord(W))
+                    W = []
+                    n = 0
+            elif hasattr(f,'cbDefn'):
+                cb = f.cbDefn
+                w = getattr(cb,'width',0)
+                if w:
+                    if hasattr(w,'normalizedValue'):
+                        w._normalizer = maxWidth
+                        w = w.normalizedValue(maxWidth)
+                    if W:
+                        W.insert(0,n)
+                        R_append(_HSWord(W) if hangingSpace else W)
+                        W = []
+                        hangingSpace = False
+                        n = 0
+                    f._fkind = _FK_IMG
+                    R_append([w,(f,'')])
+                    hangingStrip = False
+                else:
+                    f._fkind = _FK_APPEND
+                    if not W and R and isinstance(R[-1],_HSWord):
+                        R[-1].append((f,''))
+                    else:
+                        W.append((f,''))
+            elif hasattr(f, 'lineBreak'):
+                #pass the frag through.  The line breaker will scan for it.
                 if W:
                     W.insert(0,n)
-                    R.append(_HSWord(W) if hangingSpace else W)
+                    R_append(W)
                     W = []
-                    hangingSpace = False
                     n = 0
-                f._fkind = _FK_IMG
-                R.append([w,(f,'')])
-                hangingStrip = False
-            else:
-                f._fkind = _FK_APPEND
-                if not W and R and isinstance(R[-1],_HSWord):
-                    R[-1].append((f,''))
-                else:
-                    W.append((f,''))
-        elif hasattr(f, 'lineBreak'):
-            #pass the frag through.  The line breaker will scan for it.
-            if W:
-                W.insert(0,n)
-                R.append(W)
-                W = []
-                n = 0
-                hangingSpace = False
-            f._fkind = _FK_BREAK
-            R.append([0,(f,'')])
-            hangingStrip = True
+                    hangingSpace = False
+                f._fkind = _FK_BREAK
+                R_append([0,(f,'')])
+                hangingStrip = True
 
-    if W:
-        W.insert(0,n)
-        R.append(W)
+        if W:
+            W.insert(0,n)
+            R_append(W)
 
     return R
 
@@ -545,12 +595,6 @@
         else:
             yield f, 0, s
 
-class _SplitList(list):
-    pass
-
-class _HSSplitList(_HSWord):
-    pass
-
 def _splitFragWord(w,maxWidth,maxWidths,lineno):
     '''given a frag word, w, as returned by getFragWords
     split it into frag words that fit in lines of length
@@ -559,7 +603,9 @@
     .....
     maxWidths[lineno+n]
 
-    return the new word list
+    return the new word list which is either 
+    _SplitList....._SPlitList or
+    _splitList....._HSSplitList if the word is hanging space.
     '''
     R = []
     maxlineno = len(maxWidths)-1
@@ -582,15 +628,15 @@
                 lineno += 1
                 maxWidth = maxWidths[min(maxlineno,lineno)]
                 W = []
-                newLineWidth = wordWidth = cw
+                newLineWidth = cw
+                wordWidth = 0
             fragText = u''
             f = g
-            wordWidth = 0
         wordWidth += cw
         fragText += c
         lineWidth = newLineWidth
     W.append((f,fragText))
-    W = _HSSplitList([wordWidth]+W) if isinstance(w,_HSWord) else _SplitList([wordWidth]+W)
+    W = _HSSplitList([wordWidth]+W) if isinstance(w,_HSWord) else _SplitListLast([wordWidth]+W)
 
     R.append(W)
     return R
@@ -983,7 +1029,7 @@
         w = u.width
         if hasattr(w,'normalizedValue'):
             w._normalizer = maxWidth
-            w = w.normalizedValue(None)
+            w = w.normalizedValue(maxWidth)
         widthUsed += w
         lineBreak = hasattr(u.frag,'lineBreak')
         endLine = (widthUsed>maxWidth + _FUZZ and widthUsed>0) or lineBreak
@@ -1134,7 +1180,7 @@
 
         #AR hack
         self.text = text
-        self.frags = frags
+        self.frags = frags  #either the parse fragments or frag word list
         self.style = style
         self.bulletText = bulletText
         self.debug = 0  #turn this on to see a pretty one with all the margins etc.
@@ -1182,7 +1228,7 @@
         frags = self.frags
         nFrags= len(frags)
         if not nFrags: return 0
-        if nFrags==1:
+        if nFrags==1 and not _processed_frags(frags):
             f = frags[0]
             fS = f.fontSize
             fN = f.fontName
@@ -1190,8 +1236,17 @@
         else:
             return max(w[0] for w in _getFragWords(frags))
 
+    def _split_blParaProcessed(self,blPara,start,stop):
+        if not stop: return []
+        lines = blPara.lines
+        sFW = lines[start].sFW
+        sFWN = lines[stop].sFW if stop!=len(lines) else len(self.frags)
+        return self.frags[sFW:sFWN]
+
     def _get_split_blParaFunc(self):
-        return self.blPara.kind==0 and _split_blParaSimple or _split_blParaHard
+        return (_split_blParaSimple if self.blPara.kind==0 
+                    else (_split_blParaHard if not _processed_frags(self.frags)
+                        else self._split_blParaProcessed))
 
     def split(self,availWidth, availHeight):
         if len(self.frags)<=0: return []
@@ -1229,7 +1284,7 @@
                 l = max(leading,1.2*style.fontSize)
             elif autoLeading=='min':
                 l = 1.2*style.fontSize
-            s = int(availHeight/(l*1.0))
+            s = int(availHeight/float(l))
             height = s*l
 
         allowOrphans = getattr(self,'allowOrphans',getattr(style,'allowOrphans',0))
@@ -1303,6 +1358,7 @@
                             - words=word list
                                 each word is itself a fragment with
                                 various settings
+            in addition frags becomes a frag word list
 
         This structure can be used to easily draw paragraphs with the various alignments.
         You can supply either a single width or a list of widths; the latter will have its
@@ -1330,7 +1386,9 @@
         calcBounds = autoLeading not in ('','off')
         frags = self.frags
         nFrags= len(frags)
-        if nFrags==1 and not (style.endDots or hasattr(frags[0],'cbDefn') or hasattr(frags[0],'backColor')):
+        if (nFrags==1 
+                and not (style.endDots or hasattr(frags[0],'cbDefn') or hasattr(frags[0],'backColor')
+                            or _processed_frags(frags))):
             f = frags[0]
             fontSize = f.fontSize
             fontName = f.fontName
@@ -1386,15 +1444,15 @@
                             textColor=style.textColor, ascent=style.fontSize,descent=-0.2*style.fontSize,
                             lines=[])
         else:
-            if hasattr(self,'blPara') and getattr(self,'_splitpara',0):
-                #NB this is an utter hack that awaits the proper information
-                #preserving splitting algorithm
-                return self.blPara
             njlbv = not style.justifyBreaks
             words = []
+            FW = []
+            aFW = FW.append
             _words = _getFragWords(frags,maxWidth)
+            sFW = 0
             while _words:
                 w = _words.pop(0)
+                aFW(w)
                 f = w[-1][0]
                 fontName = f.fontName
                 fontSize = f.fontSize
@@ -1419,6 +1477,7 @@
                     if wordWidth>max(maxWidths[nmw:nmw+1]):
                         #a long word
                         _words[0:0] = _splitFragWord(w,maxWidth-spaceWidth-currentWidth,maxWidths,lineno)
+                        FW.pop(-1)  #remove this as we are doing this one again
                         self._splitLongWordCount += 1
                         continue
                 endLine = (newWidth>(maxWidth+space*spaceShrinkage) and n>0) or lineBreak
@@ -1497,7 +1556,9 @@
                     if currentWidth>self._width_max: self._width_max = currentWidth
                     #end of line
                     lines.append(FragLine(extraSpace=maxWidth-currentWidth, wordCount=n,
-                                        lineBreak=lineBreak and njlbv, words=words, fontSize=maxSize, ascent=maxAscent, descent=minDescent, maxWidth=maxWidth))
+                                        lineBreak=lineBreak and njlbv, words=words, fontSize=maxSize, ascent=maxAscent, descent=minDescent, maxWidth=maxWidth,
+                                        sFW=sFW))
+                    sFW = len(FW)-1
 
                     #start new line
                     lineno += 1
@@ -1540,14 +1601,13 @@
                         minDescent = min(minDescent,descent)
 
             #deal with any leftovers on the final line
-            if words!=[]:
+            if words:
                 if currentWidth>self._width_max: self._width_max = currentWidth
                 lines.append(ParaLines(extraSpace=(maxWidth - currentWidth),wordCount=n,lineBreak=False,
-                                    words=words, fontSize=maxSize,ascent=maxAscent,descent=minDescent,maxWidth=maxWidth))
+                                    words=words, fontSize=maxSize,ascent=maxAscent,descent=minDescent,maxWidth=maxWidth,sFW=sFW))
+            self.frags = FW
             return ParaLines(kind=1, lines=lines)
 
-        return lines
-
     def breakLinesCJK(self, maxWidths):
         """Initially, the dumbest possible wrapping algorithm.
         Cannot handle font variations."""
@@ -1805,6 +1865,7 @@
                 xs.f = f
                 xs.style = style
                 xs.autoLeading = autoLeading
+                xs.paraWidth = self.width
 
                 tx._fontname,tx._fontsize = None, None
                 line = lines[0]
@@ -1832,9 +1893,17 @@
         frags = getattr(self,'frags',None)
         if frags:
             plains = []
-            for frag in frags:
-                if hasattr(frag, 'text'):
-                    plains.append(frag.text)
+            plains_append = plains.append
+            if _processed_frags(frags):
+                for word in frags:
+                    for style,text in word[1:]:
+                        plains_append(text)
+                    if isinstance(word,_HSWord):
+                        plains_append(' ')
+            else:
+                for frag in frags:
+                    if hasattr(frag, 'text'):
+                        plains_append(frag.text)
             return ''.join(plains)
         elif identify:
             text = getattr(self,'text',None)
@@ -2005,7 +2074,10 @@
         dumpParagraphLines(P)
         S = P.split(6*72,h/2.0)
         print(len(S))
+        dumpParagraphFrags(S[0])
         dumpParagraphLines(S[0])
+        S[1].wrap(6*72, 9.7*72)
+        dumpParagraphFrags(S[1])
         dumpParagraphLines(S[1])
 
 
--- a/src/reportlab/platypus/paraparser.py	Thu Feb 02 15:53:11 2017 +0000
+++ b/src/reportlab/platypus/paraparser.py	Tue Feb 07 11:19:18 2017 +0000
@@ -91,13 +91,20 @@
     else:
         return _num(s,unit,allowRelative)
 
-class _PCT:
-    def __init__(self,v):
-        self._value = v*0.01
+class _PCT(float):
+    def __new__(cls,v):
+        self = float.__new__(cls,v*0.01)
+        self._normalizer = 1.0
+        self._value = v
+        return self
 
     def normalizedValue(self,normalizer):
-        normalizer = normalizer or getattr(self,'_normalizer')
-        return normalizer*self._value
+        if not normalizer:
+            normaliser = self._normalizer
+        r = _PCT(normalizer*self._value)
+        r._value = self._value
+        r._normalizer = normalizer
+        return r
 
 def fontSizeNormalize(frag,attr,default):
     if not hasattr(frag,attr): return default
--- a/tests/test_platypus_paragraphs.py	Thu Feb 02 15:53:11 2017 +0000
+++ b/tests/test_platypus_paragraphs.py	Tue Feb 07 11:19:18 2017 +0000
@@ -699,6 +699,105 @@
         doc = MyDocTemplate(outputfile('test_platypus_paragraphs_AutoNextPageTemplate.pdf'))
         doc.build(story)
 
+    def testParaBrFlowing(self):
+        from reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, PageBegin
+        from reportlab.lib.units import inch
+        class MyDocTemplate(BaseDocTemplate):
+            _invalidInitArgs = ('pageTemplates',)
+
+            def __init__(self, filename, **kw):
+                self.allowSplitting = 0
+                BaseDocTemplate.__init__(self, filename, **kw)
+                self.addPageTemplates(
+                        [
+                        PageTemplate('normal',
+                                [
+                                Frame(inch, 4.845*inch, 3*inch, 3.645*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red")),
+                                Frame(4.27*inch, 4.845*inch, 3*inch, 3.645*inch, id='second',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red")),
+                                Frame(inch, inch, 3*inch, 3.645*inch, id='third',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red")),
+                                Frame(4.27*inch, inch, 3*inch, 3.645*inch, id='fourth',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))
+                                ],
+                                ),
+                        ])
+        styleSheet = getSampleStyleSheet()
+        normal = ParagraphStyle(name='normal',fontName='Helvetica',fontSize=10,leading=12,parent=styleSheet['Normal'])
+        bold = ParagraphStyle(name='bold',fontName='Helvetica-Bold',fontSize=12,leading=14.4,parent=normal)
+        brText="""
+Clearly, the natural general principle that will subsume this case is
+not subject to a parasitic gap construction.  Presumably, most of the
+methodological work in modern linguistics can be defined in such a way
+as to impose the system of base rules exclusive of the lexicon.  In the
+discussion of resumptive pronouns following (81), the fundamental error
+of regarding functional notions as categorial is to be regarded as a
+descriptive <span color="red">fact</span>.<br/>So far, the earlier discussion of deviance is not
+quite equivalent to a parasitic gap construction.  To characterize a
+linguistic level L, a case of semigrammaticalness of a different sort
+may remedy and, at the same time, eliminate irrelevant intervening
+contexts in selectional <span color="red">rules</span>.<br/>
+Summarizing, then, we assume that the descriptive power of the base
+component can be defined in such a way as to impose nondistinctness in
+the sense of distinctive feature theory.  A lot of sophistication has
+been developed about the utilization of machines for complex purposes,
+the notion of level of grammaticalness delimits an abstract underlying
+<span color="red">order</span>.<br/>To provide a constituent structure for T(Z,K), a subset of
+English sentences interesting on quite independent grounds appears to
+correlate rather closely with problems of phonemic and morphological
+analysis.  For one thing, this analysis of a formative as a pair of sets
+of features is rather different from a general convention regarding the
+forms of the grammar.  A lot of sophistication has been developed about
+the utilization of machines for complex purposes, a case of
+semigrammaticalness of a different sort is not to be considered in
+determining an important distinction in language <span color="red">use</span>.<br/>
+We will bring evidence in favor of the following thesis:  a subset of
+English sentences interesting on quite independent grounds delimits a
+descriptive <span color="red">fact</span>.<br/>To characterize a linguistic level L, the notion of
+level of grammaticalness is not to be considered in determining a
+parasitic gap construction.  It must be emphasized, once again, that the
+speaker-hearer's linguistic intuition can be defined in such a way as to
+impose a stipulation to place the constructions into these various
+categories.  On our assumptions, the appearance of parasitic gaps in
+domains relatively inaccessible to ordinary extraction raises serious
+doubts about problems of phonemic and morphological analysis.  For one
+thing, the fundamental error of regarding functional notions as
+categorial is not quite equivalent to a stipulation to place the
+constructions into these various <span color="red">categories</span>.<br/>
+Thus the descriptive power of the base component is unspecified with
+respect to the strong generative capacity of the theory.  Presumably,
+the theory of syntactic features developed earlier appears to correlate
+rather closely with a corpus of utterance tokens upon which conformity
+has been defined by the paired utterance test.  To provide a constituent
+structure for T(Z,K), a case of semigrammaticalness of a different sort
+is not to be considered in determining the ultimate standard that
+determines the accuracy of any proposed grammar.  For any transformation
+which is sufficiently diversified in application to be of any interest,
+a subset of English sentences interesting on quite independent grounds
+raises serious doubts about the requirement that branching is not
+tolerated within the dominance scope of a complex symbol.  We will bring
+evidence in favor of the following thesis:  an important property of
+these three types of EC is not to be considered in determining the
+system of base rules exclusive of the <span color="red">lexicon</span>.<br/>
+With this clarification, the descriptive power of the base component is
+not subject to the requirement that branching is not tolerated within
+the dominance scope of a complex <span color="red">symbol</span>.<br/>In the discussion of
+resumptive pronouns following (81), this selectionally introduced
+contextual feature does not readily tolerate a parasitic gap
+construction.  Another superficial similarity is the interest in
+simulation of behavior, a descriptively adequate grammar does not affect
+the structure of a corpus of utterance tokens upon which conformity has
+been defined by the paired utterance <span color="red">test</span>.<br/>From C1, it follows that the
+speaker-hearer's linguistic intuition is not to be considered in
+determining the traditional practice of grammarians.  Let us continue to
+suppose that the notion of level of grammaticalness is necessary to
+impose an interpretation on the system of base rules exclusive of the
+<span color="red">lexicon</span>.<br/>
+"""
+        story =[]
+        a = story.append
+        a(Paragraph('Paragraph Flowing', bold))
+        a(Paragraph(brText, normal))
+        doc = MyDocTemplate(outputfile('test_platypus_paragraphs_para_br_flowing.pdf'))
+        doc.build(story)
+
 #noruntests
 def makeSuite():
     return makeSuiteForClasses(ParagraphCorners,SplitFrameParagraphTest,FragmentTestCase, ParagraphSplitTestCase, ULTestCase, JustifyTestCase,