mirror of
https://github.com/byteworksinc/ORCA-C.git
synced 2025-08-15 07:27:27 +00:00
Properly stringize tokens that start with a trigraph.
This did not work correctly before, because such tokens were recorded as starting with the third character of the trigraph. Here is an example affected by this: #define mkstr(a) # a #include <stdio.h> int main(void) { puts(mkstr(??!)); puts(mkstr(??!??!)); puts(mkstr('??<')); puts(mkstr(+??!)); puts(mkstr(+??')); }
This commit is contained in:
11
Scanner.asm
11
Scanner.asm
@@ -446,6 +446,7 @@ rkModifiers ds 2
|
|||||||
*
|
*
|
||||||
* Outputs:
|
* Outputs:
|
||||||
* ch - character read
|
* ch - character read
|
||||||
|
* currentChPtr - pointer to ch in source file
|
||||||
*
|
*
|
||||||
****************************************************************
|
****************************************************************
|
||||||
*
|
*
|
||||||
@@ -493,12 +494,15 @@ pf1 dey
|
|||||||
pf2 sty lastWasReturn
|
pf2 sty lastWasReturn
|
||||||
! 1:
|
! 1:
|
||||||
lab1 anop
|
lab1 anop
|
||||||
|
! currentChPtr := chPtr;
|
||||||
! if chPtr = eofPtr then begin {flag end of file if we're there}
|
! if chPtr = eofPtr then begin {flag end of file if we're there}
|
||||||
lda chPtr
|
lda chPtr
|
||||||
|
sta currentChPtr
|
||||||
|
ldx chPtr+2
|
||||||
|
stx currentChPtr+2
|
||||||
cmp eofPtr
|
cmp eofPtr
|
||||||
bne la1
|
bne la1
|
||||||
lda chPtr+2
|
cpx eofPtr+2
|
||||||
cmp eofPtr+2
|
|
||||||
beq la2
|
beq la2
|
||||||
la1 brl lb5
|
la1 brl lb5
|
||||||
la2 anop
|
la2 anop
|
||||||
@@ -621,7 +625,8 @@ lb4 lda [p1],Y
|
|||||||
! else begin
|
! else begin
|
||||||
lb5 anop
|
lb5 anop
|
||||||
! ch := chr(chPtr^); {fetch the character}
|
! ch := chr(chPtr^); {fetch the character}
|
||||||
move4 chPtr,p1
|
sta p1
|
||||||
|
stx p1+2
|
||||||
lda [p1]
|
lda [p1]
|
||||||
and #$00FF
|
and #$00FF
|
||||||
sta ch
|
sta ch
|
||||||
|
10
Scanner.pas
10
Scanner.pas
@@ -144,6 +144,7 @@ procedure NextCh; extern;
|
|||||||
{ }
|
{ }
|
||||||
{ Globals: }
|
{ Globals: }
|
||||||
{ ch - character read }
|
{ ch - character read }
|
||||||
|
{ currentChPtr - pointer to ch in source file }
|
||||||
|
|
||||||
|
|
||||||
procedure NextToken;
|
procedure NextToken;
|
||||||
@@ -240,6 +241,7 @@ type
|
|||||||
|
|
||||||
var
|
var
|
||||||
charStrPrefix: charStrPrefixEnum; {prefix of character/string literal}
|
charStrPrefix: charStrPrefixEnum; {prefix of character/string literal}
|
||||||
|
currentChPtr: ptr; {pointer to current character in source file}
|
||||||
customDefaultName: stringPtr; {name of custom pre-included default file}
|
customDefaultName: stringPtr; {name of custom pre-included default file}
|
||||||
dateStr: longStringPtr; {macro date string}
|
dateStr: longStringPtr; {macro date string}
|
||||||
doingCommandLine: boolean; {are we processing the cc= command line?}
|
doingCommandLine: boolean; {are we processing the cc= command line?}
|
||||||
@@ -2269,6 +2271,7 @@ if gotName then begin {read the file name from the line}
|
|||||||
changedSourceFile := true;
|
changedSourceFile := true;
|
||||||
ReadFile; {read the file}
|
ReadFile; {read the file}
|
||||||
chPtr := bofPtr; {set the start, end pointers}
|
chPtr := bofPtr; {set the start, end pointers}
|
||||||
|
currentChPtr := bofPtr;
|
||||||
eofPtr := pointer(ord4(bofPtr)+ffDCBGS.fileLength);
|
eofPtr := pointer(ord4(bofPtr)+ffDCBGS.fileLength);
|
||||||
firstPtr := chPtr; {first char in line}
|
firstPtr := chPtr; {first char in line}
|
||||||
ch := chr(RETURN); {set the initial character}
|
ch := chr(RETURN); {set the initial character}
|
||||||
@@ -4188,6 +4191,7 @@ expandMacros := true; {enable macro expansion}
|
|||||||
reportEOL := false; {report eolsy as a token?}
|
reportEOL := false; {report eolsy as a token?}
|
||||||
lineNumber := 1; {start the line counter}
|
lineNumber := 1; {start the line counter}
|
||||||
chPtr := start; {set the start, end pointers}
|
chPtr := start; {set the start, end pointers}
|
||||||
|
currentChPtr := start;
|
||||||
eofPtr := endPtr;
|
eofPtr := endPtr;
|
||||||
firstPtr := start; {first char in line}
|
firstPtr := start; {first char in line}
|
||||||
numErr := 0; {no errors so far}
|
numErr := 0; {no errors so far}
|
||||||
@@ -4942,8 +4946,8 @@ while charKinds[ord(ch)] in [illegal,ch_white,ch_eol] do begin
|
|||||||
end;
|
end;
|
||||||
end; {while}
|
end; {while}
|
||||||
tokenLine := lineNumber; {record the position of the token}
|
tokenLine := lineNumber; {record the position of the token}
|
||||||
tokenColumn := ord(ord4(chPtr)-ord4(firstPtr));
|
tokenColumn := ord(ord4(currentChPtr)-ord4(firstPtr)+1);
|
||||||
tokenStart := pointer(ord4(chPtr)-1);
|
tokenStart := currentChPtr;
|
||||||
6:
|
6:
|
||||||
token.class := reservedSymbol; {default to the most common class}
|
token.class := reservedSymbol; {default to the most common class}
|
||||||
case charKinds[ord(ch)] of
|
case charKinds[ord(ch)] of
|
||||||
@@ -5366,7 +5370,7 @@ case charKinds[ord(ch)] of
|
|||||||
|
|
||||||
otherwise: Error(57);
|
otherwise: Error(57);
|
||||||
end; {case}
|
end; {case}
|
||||||
tokenEnd := pointer(ord4(chPtr)-1); {record the end of the token}
|
tokenEnd := currentChPtr; {record the end of the token}
|
||||||
2:
|
2:
|
||||||
if skipping then {conditional compilation branch}
|
if skipping then {conditional compilation branch}
|
||||||
if not (token.kind in [eofsy,eolsy]) then
|
if not (token.kind in [eofsy,eolsy]) then
|
||||||
|
2
cc.notes
2
cc.notes
@@ -1784,7 +1784,7 @@ int foo(int[42]);
|
|||||||
|
|
||||||
182. #pragma path directives were not saved in .sym files. This could cause ORCA/C not to search the proper paths for include files that were not represented in the .sym file (e.g. because they were included after a function).
|
182. #pragma path directives were not saved in .sym files. This could cause ORCA/C not to search the proper paths for include files that were not represented in the .sym file (e.g. because they were included after a function).
|
||||||
|
|
||||||
183. The # preprocessor operator would not work correctly on tokens that had been produced by the ## preprocessor operator, or on tokens that were split over two or more lines using line continuations.
|
183. The # preprocessor operator would not work correctly on tokens that had been produced by the ## preprocessor operator, tokens that were split over two or more lines using line continuations, or tokens represented using trigraphs.
|
||||||
|
|
||||||
-- Bugs from C 2.1.0 that have been fixed -----------------------------------
|
-- Bugs from C 2.1.0 that have been fixed -----------------------------------
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user