1
0
mirror of https://gitlab.com/camelot/kickc.git synced 2024-11-26 12:49:21 +00:00

Added simple preprocessor to the KickC compiler (only supports #define without parameters). #169

This commit is contained in:
jespergravgaard 2020-04-05 14:32:07 +02:00
parent 375578e7df
commit 30e35bee41
13 changed files with 654 additions and 393 deletions

View File

@ -1,269 +0,0 @@
package dk.camelot64.kickc.macros;
import dk.camelot64.kickc.model.CompileError;
import org.antlr.v4.runtime.*;
import java.util.*;
/**
* C Macro expander.
* <p>
* The macro expander takes one token source as input and produces a new expanded token source as output
*/
public class CMacroExpander {
/** The channel containing whitespace. */
private final int channelWhitespace;
/** The token type for tokens containing whitespace. */
private final int tokenWhitespace;
/** The token type for #define. */
private final int tokenDefine;
/** The token type for identifiers. */
private final int tokenIdentifier;
/** The token type for parenthesis begin. */
private final int tokenParBegin;
/** The token type for parenthesis end. */
private final int tokenParEnd;
/** The token type for comma. */
private final int tokenComma;
/** The token type for define multi-line. */
private final int tokenDefineMultiline;
public CMacroExpander(int channelWhitespace, int tokenWhitespace, int tokenDefine, int tokenIdentifier, int tokenParBegin, int tokenParEnd, int tokenComma, int tokenDefineMultiline) {
this.channelWhitespace = channelWhitespace;
this.tokenWhitespace = tokenWhitespace;
this.tokenDefine = tokenDefine;
this.tokenIdentifier = tokenIdentifier;
this.tokenParBegin = tokenParBegin;
this.tokenParEnd = tokenParEnd;
this.tokenComma = tokenComma;
this.tokenDefineMultiline = tokenDefineMultiline;
}
public TokenSource expandMacros(TokenSource inputTokenSource) {
List<Token> inputTokens = getTokenList(inputTokenSource);
final TokenIterator tokenIterator = new TokenIterator(inputTokens);
Map<String, List<Token>> macros = new LinkedHashMap<>();
final ArrayList<Token> expandedTokens = new ArrayList<>();
while(tokenIterator.hasNext()) {
Token inputToken = tokenIterator.next();
if(inputToken.getType() == tokenDefine) {
// #define a new macro - find the name
skipWhitespace(tokenIterator);
String macroName = getToken(tokenIterator, tokenIdentifier).getText();
// Examine whether the macro has parameters
skipWhitespace(tokenIterator);
if(tokenIterator.peek().getType() == tokenParBegin) {
// Macro has parameters - find parameter name list
throw new CompileError("Macros with parameters not supported!");
}
// Find body by gobbling tokens until the line ends
final ArrayList<Token> macroBody = new ArrayList<>();
boolean macroRead = true;
while(macroRead) {
final Token bodyToken = tokenIterator.next();
if(bodyToken.getType() == tokenDefineMultiline) {
// Skip the multi-line token, add a newline token and continue reading body on the next line
final CommonToken newlineToken = new CommonToken(bodyToken);
newlineToken.setType(tokenWhitespace);
newlineToken.setChannel(channelWhitespace);
newlineToken.setText("\n");
macroBody.add(newlineToken);
continue;
}
if(bodyToken.getChannel() == channelWhitespace && bodyToken.getText().contains("\n")) {
macroRead = false;
} else {
macroBody.add(bodyToken);
}
}
macros.put(macroName, macroBody);
} else {
if(inputToken.getType() == tokenIdentifier) {
final String macroName = inputToken.getText();
if(macros.containsKey(macroName)) {
// Check for macro recursion
if(inputToken instanceof ExpansionToken) {
if(((ExpansionToken) inputToken).getMacroNames().contains(macroName)) {
// Detected macro recursion in the expansion - add directly to output and move on!
expandedTokens.add(inputToken);
continue;
}
}
// Macro expansion is needed
final List<Token> macroBody = macros.get(macroName);
List<Token> expandedBody = new ArrayList<>();
for(Token bodyToken : macroBody) {
final CommonToken expandedToken = new CommonToken(inputToken);
expandedToken.setText(bodyToken.getText());
expandedToken.setType(bodyToken.getType());
expandedToken.setChannel(bodyToken.getChannel());
Set<String> macroNames = new HashSet<>();
if(inputToken instanceof ExpansionToken) {
// Transfer macro names to the new expansion
macroNames = ((ExpansionToken) inputToken).getMacroNames();
}
macroNames.add(macroName);
expandedBody.add(new ExpansionToken(expandedToken, macroNames));
}
tokenIterator.addFirst(expandedBody);
} else {
expandedTokens.add(inputToken);
}
} else {
expandedTokens.add(inputToken);
}
}
}
return new ListTokenSource(expandedTokens);
}
private Token getToken(TokenIterator tokenIterator, int tokenType) {
if(!tokenIterator.hasNext())
throw new CompileError("File ended unexpectedly. Was expecting token " + tokenType);
final Token token = tokenIterator.next();
if(token.getType() != tokenType)
throw new CompileError("Unexpected token. Was expecting " + tokenType);
return token;
}
/**
* Skip whitespace tokens, positioning iterator at the next non-whitespace
*
* @param tokenIterator The token iterator
*/
private void skipWhitespace(TokenIterator tokenIterator) {
while(tokenIterator.hasNext() && tokenIterator.peek().getChannel() == channelWhitespace)
tokenIterator.next();
}
private List<Token> getTokenList(TokenSource inputTokenSource) {
List<Token> inputTokens = new ArrayList<>();
Token inputToken;
do {
inputToken = inputTokenSource.nextToken();
inputTokens.add(inputToken);
} while(inputToken.getType() != Token.EOF);
return inputTokens;
}
/** A token iterator supporting peeking backed by a list of lists of tokens.
* Macro expansion works by prepending a new list of tokens which contains the body of the macro being expanded */
static class TokenIterator implements Iterator<Token> {
Deque<Token> tokens;
public TokenIterator(Collection<Token> tokens) {
this.tokens = new LinkedList<>(tokens);
}
/**
* Get the next token without advancing the cursor.
*
* @return The next token. null if there are no more tokens.
*/
public Token peek() {
return tokens.getFirst();
}
@Override
public boolean hasNext() {
return !tokens.isEmpty();
}
@Override
public Token next() {
return tokens.removeFirst();
}
/**
* Add a bunch of tokens to the start of the iterator.
* This is called when a macro is expanded to add the macro body to the start of the input.
* @param tokens The tokens to add
*/
public void addFirst(List<Token> tokens) {
Collections.reverse(tokens);
for(Token token : tokens) {
this.tokens.addFirst(token);
}
}
}
/** A token that is the result of macro expansion.
* Keeps track of which macros was used for the expansion.
* */
public class ExpansionToken implements Token {
/** The underlying token. */
private Token subToken;
/** The names of all macros used for expanding this token. */
private Set<String> macroNames;
public ExpansionToken(Token subToken, Set<String> macroNames) {
this.subToken = subToken;
this.macroNames = macroNames;
}
public Set<String> getMacroNames() {
return macroNames;
}
@Override
public String getText() {
return subToken.getText();
}
@Override
public int getType() {
return subToken.getType();
}
@Override
public int getLine() {
return subToken.getLine();
}
@Override
public int getCharPositionInLine() {
return subToken.getCharPositionInLine();
}
@Override
public int getChannel() {
return subToken.getChannel();
}
@Override
public int getTokenIndex() {
return subToken.getTokenIndex();
}
@Override
public int getStartIndex() {
return subToken.getStartIndex();
}
@Override
public int getStopIndex() {
return subToken.getStopIndex();
}
@Override
public TokenSource getTokenSource() {
return subToken.getTokenSource();
}
@Override
public CharStream getInputStream() {
return subToken.getInputStream();
}
}
}

View File

@ -1,6 +1,7 @@
package dk.camelot64.kickc.parser; package dk.camelot64.kickc.parser;
import dk.camelot64.kickc.SourceLoader; import dk.camelot64.kickc.SourceLoader;
import dk.camelot64.kickc.preprocessor.CTokenSourcePreprocessor;
import dk.camelot64.kickc.model.CompileError; import dk.camelot64.kickc.model.CompileError;
import dk.camelot64.kickc.model.Program; import dk.camelot64.kickc.model.Program;
import org.antlr.v4.runtime.*; import org.antlr.v4.runtime.*;
@ -38,7 +39,7 @@ public class CParser {
private final CommonTokenStream tokenStream; private final CommonTokenStream tokenStream;
/** The token source stack handling import files. */ /** The token source stack handling import files. */
private CTokenSourceStack cFileTokenStack; private CTokenSource cTokenSource;
/** The input files that have been parsed. Maps file name to the lexer. */ /** The input files that have been parsed. Maps file name to the lexer. */
private Map<String, CFile> cFiles; private Map<String, CFile> cFiles;
@ -62,8 +63,9 @@ public class CParser {
public CParser(Program program) { public CParser(Program program) {
this.program = program; this.program = program;
this.cFiles = new LinkedHashMap<>(); this.cFiles = new LinkedHashMap<>();
this.cFileTokenStack = new CTokenSourceStack(); this.cTokenSource = new CTokenSource();
this.tokenStream = new CommonTokenStream(cFileTokenStack); final CTokenSourcePreprocessor preprocessor = new CTokenSourcePreprocessor(cTokenSource, CHANNEL_WHITESPACE, KickCLexer.WS, KickCLexer.DEFINE, KickCLexer.NAME, KickCLexer.PAR_BEGIN, KickCLexer.PAR_END, KickCLexer.COMMA, KickCLexer.DEFINE_CONTINUE);
this.tokenStream = new CommonTokenStream(preprocessor);
this.parser = new KickCParser(tokenStream, this); this.parser = new KickCParser(tokenStream, this);
this.typedefs = new ArrayList<>(); this.typedefs = new ArrayList<>();
parser.setBuildParseTree(true); parser.setBuildParseTree(true);
@ -131,7 +133,7 @@ public class CParser {
* @return The path of the folder containing the source file currently being tokenized * @return The path of the folder containing the source file currently being tokenized
*/ */
private Path getCurrentSourceFolderPath() { private Path getCurrentSourceFolderPath() {
TokenSource currentSource = cFileTokenStack.getCurrentSource(); TokenSource currentSource = cTokenSource.getCurrentSource();
String sourceName = currentSource.getSourceName(); String sourceName = currentSource.getSourceName();
CFile cFile = cFiles.get(sourceName); CFile cFile = cFiles.get(sourceName);
File file = cFile.file; File file = cFile.file;
@ -190,7 +192,7 @@ public class CParser {
}); });
CFile cFile = new CFile(file, lexer); CFile cFile = new CFile(file, lexer);
cFiles.put(file.getAbsolutePath(), cFile); cFiles.put(file.getAbsolutePath(), cFile);
cFileTokenStack.pushSource(lexer); cTokenSource.addSource(lexer);
} catch(IOException e) { } catch(IOException e) {
throw new CompileError("Error parsing file " + fileName, e); throw new CompileError("Error parsing file " + fileName, e);
} }

View File

@ -0,0 +1,99 @@
package dk.camelot64.kickc.parser;
import org.antlr.v4.runtime.*;
import java.util.ArrayList;
import java.util.Deque;
import java.util.LinkedList;
/**
* An ANTLR4 Token Source that supports pushing sub-sources at the front of the stream.
* This can be used for importing files or for macro expansion.
*/
public class CTokenSource implements TokenSource {
/** Stack of underlying sources */
private Deque<TokenSource> subSources;
public CTokenSource() {
this.subSources = new LinkedList<>();
}
public CTokenSource(TokenSource tokenSource) {
this.subSources = new LinkedList<>();
addSource(tokenSource);
}
/**
* Pushes a token source at the current location ).
* The pushed source will immediately be used for tokens and only when it is exhausted will tokens resume from the current source
*
* @param source The source to push
*/
public void addSource(TokenSource source) {
subSources.addFirst(source);
}
public TokenSource getCurrentSource() {
return subSources.peekFirst();
}
/**
* Peek the next token without removing it from the source.
*
* @return The next token of the source.
*/
public Token peekToken() {
// Get the next token
final Token token = nextToken();
// And push it back to the front of the stack
final ArrayList<Token> tokens = new ArrayList<>();
tokens.add(token);
addSource(new ListTokenSource(tokens));
return token;
}
@Override
public Token nextToken() {
TokenSource currentSource = getCurrentSource();
Token token = currentSource.nextToken();
if(token.getType() == Token.EOF && subSources.size() > 1) {
// We are at the end of the current sub-source and have more sub-sources to go through - move on to the next one!
subSources.pop();
return nextToken();
} else {
return token;
}
}
@Override
public int getLine() {
return getCurrentSource().getLine();
}
@Override
public int getCharPositionInLine() {
return getCurrentSource().getCharPositionInLine();
}
@Override
public CharStream getInputStream() {
return getCurrentSource().getInputStream();
}
@Override
public String getSourceName() {
return getCurrentSource().getSourceName();
}
@Override
public void setTokenFactory(TokenFactory<?> factory) {
throw new RuntimeException("Not implemented!!");
}
@Override
public TokenFactory<?> getTokenFactory() {
return getCurrentSource().getTokenFactory();
}
}

View File

@ -1,113 +0,0 @@
package dk.camelot64.kickc.parser;
import org.antlr.v4.runtime.*;
import java.util.Stack;
/**
* An ANTLR4 Token Source that can keep track of multiple underlying source files.
*/
public class CTokenSourceStack implements TokenSource {
/** Stack of underlying sources */
private Stack<TokenSource> sourceStack;
public CTokenSourceStack() {
this.sourceStack = new Stack<>();
}
/**
* Pushes a token source at the current location.
* The pushed source will immediately be used for tokens and only when it is exhausted will tokens resume from the current source
* @param source The source to push
*/
public void pushSource(TokenSource source) {
sourceStack.push(source);
}
public TokenSource getCurrentSource() {
if(sourceStack.size()>0)
return sourceStack.peek();
else
return new TokenSource() {
@Override
public Token nextToken() {
return null;
}
@Override
public int getLine() {
return 0;
}
@Override
public int getCharPositionInLine() {
return 0;
}
@Override
public CharStream getInputStream() {
return null;
}
@Override
public String getSourceName() {
return "";
}
@Override
public void setTokenFactory(TokenFactory<?> factory) {
}
@Override
public TokenFactory<?> getTokenFactory() {
return null;
}
};
}
@Override
public Token nextToken() {
TokenSource currentSource = getCurrentSource();
Token token = currentSource.nextToken();
if(token.getType()==Token.EOF) {
// Last token of the current source - pop the stack!
sourceStack.pop();
if(!sourceStack.isEmpty()) {
// Recurse to find next token
return nextToken();
}
}
return token;
}
@Override
public int getLine() {
return getCurrentSource().getLine();
}
@Override
public int getCharPositionInLine() {
return getCurrentSource().getCharPositionInLine();
}
@Override
public CharStream getInputStream() {
return getCurrentSource().getInputStream();
}
@Override
public String getSourceName() {
return getCurrentSource().getSourceName();
}
@Override
public void setTokenFactory(TokenFactory<?> factory) {
throw new RuntimeException("Not implemented!!");
}
@Override
public TokenFactory<?> getTokenFactory() {
return getCurrentSource().getTokenFactory();
}
}

View File

@ -0,0 +1,278 @@
package dk.camelot64.kickc.preprocessor;
import dk.camelot64.kickc.model.CompileError;
import dk.camelot64.kickc.parser.CTokenSource;
import org.antlr.v4.runtime.*;
import java.util.*;
/**
* C preprocessor
* <p>
* The preprocessor takes a token source as input and produces macro expanded tokens as output
*/
public class CTokenSourcePreprocessor implements TokenSource {
/** The token source containing the input */
private CTokenSource input;
/**
* The #defined macros.
* Maps macro name to the tokens of the expansion
*/
private Map<String, List<Token>> defines;
/** The channel containing whitespace. */
private final int channelWhitespace;
/** The token type for tokens containing whitespace. */
private final int tokenWhitespace;
/** The token type for #define. */
private final int tokenDefine;
/** The token type for identifiers. */
private final int tokenIdentifier;
/** The token type for define multi-line. */
private final int tokenDefineMultiline;
/** The token type for parenthesis begin. */
private final int tokenParBegin;
/** The token type for parenthesis end. */
private final int tokenParEnd;
/** The token type for comma. */
private final int tokenComma;
public CTokenSourcePreprocessor(TokenSource input, int channelWhitespace, int tokenWhitespace, int tokenDefine, int tokenIdentifier, int tokenParBegin, int tokenParEnd, int tokenComma, int tokenDefineMultiline) {
if(input instanceof CTokenSource) {
// If possible use the input directly instead of wrapping it
this.input = (CTokenSource) input;
} else {
this.input = new CTokenSource(input);
}
this.defines = new LinkedHashMap<>();
this.channelWhitespace = channelWhitespace;
this.tokenWhitespace = tokenWhitespace;
this.tokenDefine = tokenDefine;
this.tokenIdentifier = tokenIdentifier;
this.tokenParBegin = tokenParBegin;
this.tokenParEnd = tokenParEnd;
this.tokenComma = tokenComma;
this.tokenDefineMultiline = tokenDefineMultiline;
}
@Override
public Token nextToken() {
Token token = input.nextToken();
// Perform preprocessing on tokens as long as it is needed
while(preprocess(token, input)) {
token = input.nextToken();
}
return token;
}
@Override
public int getLine() {
return input.getLine();
}
@Override
public int getCharPositionInLine() {
return input.getCharPositionInLine();
}
@Override
public CharStream getInputStream() {
return input.getInputStream();
}
@Override
public String getSourceName() {
return input.getSourceName();
}
@Override
public void setTokenFactory(TokenFactory<?> factory) {
input.setTokenFactory(factory);
}
@Override
public TokenFactory<?> getTokenFactory() {
return input.getTokenFactory();
}
/**
* Perform any preprocessing needed on a token. If preprocessing is not needed nothing is done.
*
* This method may gobble more tokens from the source (for instance if a macro is being defined) and it may push tokens at the front of the source (if a macro is being expanded).
*
* @param inputToken The token to process
* @param cTokenSource The token source used for getting more tokens or for pushing macro expansions
* @return true if the input token was preprocessed (and should not be added to the output). False if the token was not a preprocessor token
*/
private boolean preprocess(Token inputToken, CTokenSource cTokenSource) {
boolean wasPreprocessed;
if(inputToken.getType() == tokenDefine) {
// #define a new macro - find the name
skipWhitespace(cTokenSource);
String macroName = nextToken(cTokenSource, tokenIdentifier).getText();
// Examine whether the macro has parameters
skipWhitespace(cTokenSource);
if(cTokenSource.peekToken().getType() == tokenParBegin) {
// Macro has parameters - find parameter name list
throw new CompileError("Macros with parameters not supported!");
}
// Find body by gobbling tokens until the line ends
final ArrayList<Token> macroBody = new ArrayList<>();
boolean macroRead = true;
while(macroRead) {
final Token bodyToken = cTokenSource.nextToken();
if(bodyToken.getType() == tokenDefineMultiline) {
// Skip the multi-line token, add a newline token and continue reading body on the next line
final CommonToken newlineToken = new CommonToken(bodyToken);
newlineToken.setType(tokenWhitespace);
newlineToken.setChannel(channelWhitespace);
newlineToken.setText("\n");
macroBody.add(newlineToken);
continue;
}
if(bodyToken.getChannel() == channelWhitespace && bodyToken.getText().contains("\n")) {
macroRead = false;
} else {
macroBody.add(bodyToken);
}
}
defines.put(macroName, macroBody);
return true;
} else {
if(inputToken.getType() == tokenIdentifier) {
final String macroName = inputToken.getText();
List<Token> macroBody = defines.get(macroName);
if(macroBody != null) {
// Check for macro recursion
if(inputToken instanceof ExpansionToken) {
if(((ExpansionToken) inputToken).getMacroNames().contains(macroName)) {
// Detected macro recursion in the expansion - add directly to output and do not perform expansion!
macroBody = null;
}
}
}
if(macroBody != null) {
// Macro expansion is needed
List<Token> expandedBody = new ArrayList<>();
for(Token bodyToken : macroBody) {
final CommonToken expandedToken = new CommonToken(inputToken);
expandedToken.setText(bodyToken.getText());
expandedToken.setType(bodyToken.getType());
expandedToken.setChannel(bodyToken.getChannel());
Set<String> macroNames = new HashSet<>();
if(inputToken instanceof ExpansionToken) {
// Transfer macro names to the new expansion
macroNames = ((ExpansionToken) inputToken).getMacroNames();
}
macroNames.add(macroName);
expandedBody.add(new ExpansionToken(expandedToken, macroNames));
}
cTokenSource.addSource(new ListTokenSource(expandedBody));
return true;
}
}
}
return false;
}
/**
* Pull first token from a source and check that it matches the expected type. Any other type will produce an error.
*
* @param cTokenSource The token source
* @param tokenType The type to expect
* @return The token
*/
private Token nextToken(CTokenSource cTokenSource, int tokenType) {
final Token token = cTokenSource.nextToken();
if(token.getType() != tokenType)
throw new CompileError("Unexpected token. Was expecting " + tokenType);
return token;
}
/**
* Skip whitespace tokens, positioning iterator at the next non-whitespace
*
* @param cTokenSource The token iterator
*/
private void skipWhitespace(CTokenSource cTokenSource) {
while(cTokenSource.peekToken().getChannel() == channelWhitespace)
cTokenSource.nextToken();
}
/**
* A token that is the result of macro expansion.
* Keeps track of which macros was used for the expansion to avoid macro recursion.
**/
public static class ExpansionToken implements Token {
/** The underlying token. */
private Token subToken;
/** The names of all macros used for expanding this token. */
private Set<String> macroNames;
ExpansionToken(Token subToken, Set<String> macroNames) {
this.subToken = subToken;
this.macroNames = macroNames;
}
Set<String> getMacroNames() {
return macroNames;
}
@Override
public String getText() {
return subToken.getText();
}
@Override
public int getType() {
return subToken.getType();
}
@Override
public int getLine() {
return subToken.getLine();
}
@Override
public int getCharPositionInLine() {
return subToken.getCharPositionInLine();
}
@Override
public int getChannel() {
return subToken.getChannel();
}
@Override
public int getTokenIndex() {
return subToken.getTokenIndex();
}
@Override
public int getStartIndex() {
return subToken.getStartIndex();
}
@Override
public int getStopIndex() {
return subToken.getStopIndex();
}
@Override
public TokenSource getTokenSource() {
return subToken.getTokenSource();
}
@Override
public CharStream getInputStream() {
return subToken.getInputStream();
}
}
}

View File

@ -1,6 +1,6 @@
package dk.camelot64.kickc.parsing.macros; package dk.camelot64.kickc.parsing.macros;
import dk.camelot64.kickc.macros.CMacroExpander; import dk.camelot64.kickc.preprocessor.CTokenSourcePreprocessor;
import dk.camelot64.kickc.model.CompileError; import dk.camelot64.kickc.model.CompileError;
import org.antlr.v4.runtime.*; import org.antlr.v4.runtime.*;
import org.junit.Test; import org.junit.Test;
@ -106,8 +106,7 @@ public class TestMacrosParser {
} }
}); });
final CMacroExpander cMacroExpander = new CMacroExpander(CHANNEL_WHITESPACE, MacrosLexer.WHITESPACE, MacrosLexer.DEFINE, MacrosLexer.IDENTIFIER, MacrosLexer.PAR_BEGIN, MacrosLexer.PAR_END, MacrosLexer.COMMA, MacrosLexer.DEFINE_CONTINUE); final CTokenSourcePreprocessor expandedTokenSource = new CTokenSourcePreprocessor(lexer, CHANNEL_WHITESPACE, MacrosLexer.WHITESPACE, MacrosLexer.DEFINE, MacrosLexer.IDENTIFIER, MacrosLexer.PAR_BEGIN, MacrosLexer.PAR_END, MacrosLexer.COMMA, MacrosLexer.DEFINE_CONTINUE);
final TokenSource expandedTokenSource = cMacroExpander.expandMacros(lexer);
MacrosParser parser = new MacrosParser(new CommonTokenStream(expandedTokenSource)); MacrosParser parser = new MacrosParser(new CommonTokenStream(expandedTokenSource));
parser.setBuildParseTree(true); parser.setBuildParseTree(true);
parser.addErrorListener(new BaseErrorListener() { parser.addErrorListener(new BaseErrorListener() {

View File

@ -37,6 +37,11 @@ public class TestPrograms {
public TestPrograms() { public TestPrograms() {
} }
@Test
public void testPreprocessor0() throws IOException, URISyntaxException {
compileAndCompare("preprocessor-0");
}
@Test @Test
public void testMaCoalesceProblem() throws IOException, URISyntaxException { public void testMaCoalesceProblem() throws IOException, URISyntaxException {
compileAndCompare("ma_coalesce_problem"); compileAndCompare("ma_coalesce_problem");

View File

@ -1,8 +1,7 @@
// Minimal range based for() loop // Minimal range based for() loop
byte* SCREEN1 = $0400; char* SCREEN1 = 0x0400;
byte* SCREEN2 = $0500; char* SCREEN2 = 0x0500;
void main() { void main() {
for(byte i : 0..255) { for(byte i : 0..255) {

View File

@ -0,0 +1,10 @@
// Test the preprocessor
// A simple #define
#define A 'a'
char * const SCREEN = 0x0400;
void main() {
*SCREEN = A;
}

View File

@ -0,0 +1,13 @@
// Test the preprocessor
// A simple #define
.pc = $801 "Basic"
:BasicUpstart(main)
.pc = $80d "Program"
.label SCREEN = $400
main: {
// *SCREEN = A
lda #'a'
sta SCREEN
// }
rts
}

View File

@ -0,0 +1,17 @@
@begin: scope:[] from
[0] phi()
to:@1
@1: scope:[] from @begin
[1] phi()
[2] call main
to:@end
@end: scope:[] from @1
[3] phi()
(void()) main()
main: scope:[main] from @1
[4] *((const nomodify byte*) SCREEN) ← (byte) 'a'
to:main::@return
main::@return: scope:[main] from main
[5] return
to:@return

View File

@ -0,0 +1,214 @@
CONTROL FLOW GRAPH SSA
@begin: scope:[] from
to:@1
(void()) main()
main: scope:[main] from @1
*((const nomodify byte*) SCREEN) ← (byte) 'a'
to:main::@return
main::@return: scope:[main] from main
return
to:@return
@1: scope:[] from @begin
call main
to:@2
@2: scope:[] from @1
to:@end
@end: scope:[] from @2
SYMBOL TABLE SSA
(label) @1
(label) @2
(label) @begin
(label) @end
(const nomodify byte*) SCREEN = (byte*)(number) $400
(void()) main()
(label) main::@return
Simplifying constant pointer cast (byte*) 1024
Successful SSA optimization PassNCastSimplification
Adding NOP phi() at start of @begin
Adding NOP phi() at start of @1
Adding NOP phi() at start of @2
Adding NOP phi() at start of @end
CALL GRAPH
Calls in [] to main:2
Created 0 initial phi equivalence classes
Coalesced down to 0 phi equivalence classes
Culled Empty Block (label) @2
Adding NOP phi() at start of @begin
Adding NOP phi() at start of @1
Adding NOP phi() at start of @end
FINAL CONTROL FLOW GRAPH
@begin: scope:[] from
[0] phi()
to:@1
@1: scope:[] from @begin
[1] phi()
[2] call main
to:@end
@end: scope:[] from @1
[3] phi()
(void()) main()
main: scope:[main] from @1
[4] *((const nomodify byte*) SCREEN) ← (byte) 'a'
to:main::@return
main::@return: scope:[main] from main
[5] return
to:@return
VARIABLE REGISTER WEIGHTS
(void()) main()
Initial phi equivalence classes
Complete equivalence classes
INITIAL ASM
Target platform is c64basic / MOS6502X
// File Comments
// Test the preprocessor
// A simple #define
// Upstart
.pc = $801 "Basic"
:BasicUpstart(__bbegin)
.pc = $80d "Program"
// Global Constants & labels
.label SCREEN = $400
// @begin
__bbegin:
// [1] phi from @begin to @1 [phi:@begin->@1]
__b1_from___bbegin:
jmp __b1
// @1
__b1:
// [2] call main
jsr main
// [3] phi from @1 to @end [phi:@1->@end]
__bend_from___b1:
jmp __bend
// @end
__bend:
// main
main: {
// [4] *((const nomodify byte*) SCREEN) ← (byte) 'a' -- _deref_pbuc1=vbuc2
lda #'a'
sta SCREEN
jmp __breturn
// main::@return
__breturn:
// [5] return
rts
}
// File Data
REGISTER UPLIFT POTENTIAL REGISTERS
Statement [4] *((const nomodify byte*) SCREEN) ← (byte) 'a' [ ] ( main:2 [ ] { } ) always clobbers reg byte a
REGISTER UPLIFT SCOPES
Uplift Scope [main]
Uplift Scope []
Uplifting [main] best 27 combination
Uplifting [] best 27 combination
ASSEMBLER BEFORE OPTIMIZATION
// File Comments
// Test the preprocessor
// A simple #define
// Upstart
.pc = $801 "Basic"
:BasicUpstart(__bbegin)
.pc = $80d "Program"
// Global Constants & labels
.label SCREEN = $400
// @begin
__bbegin:
// [1] phi from @begin to @1 [phi:@begin->@1]
__b1_from___bbegin:
jmp __b1
// @1
__b1:
// [2] call main
jsr main
// [3] phi from @1 to @end [phi:@1->@end]
__bend_from___b1:
jmp __bend
// @end
__bend:
// main
main: {
// [4] *((const nomodify byte*) SCREEN) ← (byte) 'a' -- _deref_pbuc1=vbuc2
lda #'a'
sta SCREEN
jmp __breturn
// main::@return
__breturn:
// [5] return
rts
}
// File Data
ASSEMBLER OPTIMIZATIONS
Removing instruction jmp __b1
Removing instruction jmp __bend
Removing instruction jmp __breturn
Succesful ASM optimization Pass5NextJumpElimination
Removing instruction __b1_from___bbegin:
Removing instruction __b1:
Removing instruction __bend_from___b1:
Succesful ASM optimization Pass5RedundantLabelElimination
Removing instruction __bend:
Removing instruction __breturn:
Succesful ASM optimization Pass5UnusedLabelElimination
Updating BasicUpstart to call main directly
Removing instruction jsr main
Succesful ASM optimization Pass5SkipBegin
Removing instruction __bbegin:
Succesful ASM optimization Pass5UnusedLabelElimination
FINAL SYMBOL TABLE
(label) @1
(label) @begin
(label) @end
(const nomodify byte*) SCREEN = (byte*) 1024
(void()) main()
(label) main::@return
FINAL ASSEMBLER
Score: 12
// File Comments
// Test the preprocessor
// A simple #define
// Upstart
.pc = $801 "Basic"
:BasicUpstart(main)
.pc = $80d "Program"
// Global Constants & labels
.label SCREEN = $400
// @begin
// [1] phi from @begin to @1 [phi:@begin->@1]
// @1
// [2] call main
// [3] phi from @1 to @end [phi:@1->@end]
// @end
// main
main: {
// *SCREEN = A
// [4] *((const nomodify byte*) SCREEN) ← (byte) 'a' -- _deref_pbuc1=vbuc2
lda #'a'
sta SCREEN
// main::@return
// }
// [5] return
rts
}
// File Data

View File

@ -0,0 +1,7 @@
(label) @1
(label) @begin
(label) @end
(const nomodify byte*) SCREEN = (byte*) 1024
(void()) main()
(label) main::@return