mirror of
https://github.com/michaelcmartin/Ophis.git
synced 2024-10-12 15:23:38 +00:00
Major formatting fixes:
* No more tabs * Fix copyright notices to point at right files and name the license right
This commit is contained in:
parent
d5ec7bdacd
commit
1df8ad465d
@ -1,17 +1,17 @@
|
||||
"""Command line options data.
|
||||
|
||||
verbose:
|
||||
0: Only report errors
|
||||
1: Announce each file as it is read, and data count (default)
|
||||
2: As above, but also announce each pass.
|
||||
3: As above, but print the IR after each pass.
|
||||
4: As above, but print the labels after each pass.
|
||||
|
||||
6510 compatibility and deprecation are handled in Ophis.Main."""
|
||||
|
||||
# Copyright 2002 Michael C. Martin.
|
||||
# You may use, modify, and distribute this file under the BSD
|
||||
# license: See LICENSE.txt for details.
|
||||
|
||||
verbose = 1;
|
||||
|
||||
"""Command line options data.
|
||||
|
||||
verbose:
|
||||
0: Only report errors
|
||||
1: Announce each file as it is read, and data count (default)
|
||||
2: As above, but also announce each pass.
|
||||
3: As above, but print the IR after each pass.
|
||||
4: As above, but print the labels after each pass.
|
||||
|
||||
6510 compatibility and deprecation are handled in Ophis.Main."""
|
||||
|
||||
# Copyright 2002-2012 Michael C. Martin and additional contributors.
|
||||
# You may use, modify, and distribute this file under the MIT
|
||||
# license: See README for details.
|
||||
|
||||
verbose = 1;
|
||||
|
||||
|
@ -1,13 +1,10 @@
|
||||
"""Core pragmas
|
||||
|
||||
Provides the core assembler directives. It does not guarantee
|
||||
compatibility with older versions of P65-Perl."""
|
||||
Provides the core assembler directives."""
|
||||
|
||||
# Copyright 2002 Michael C. Martin.
|
||||
# You may use, modify, and distribute this file under the BSD
|
||||
# license: See LICENSE.txt for details.
|
||||
|
||||
from __future__ import nested_scopes
|
||||
# Copyright 2002-2012 Michael C. Martin and additional contributors.
|
||||
# You may use, modify, and distribute this file under the MIT
|
||||
# license: See README for details.
|
||||
|
||||
import Ophis.IR as IR
|
||||
import Ophis.Frontend as FE
|
||||
@ -18,193 +15,193 @@ basecharmap = "".join([chr(x) for x in range(256)])
|
||||
currentcharmap = basecharmap
|
||||
|
||||
def reset():
|
||||
global loadedfiles, currentcharmap, basecharmap
|
||||
loadedfiles={}
|
||||
currentcharmap = basecharmap
|
||||
global loadedfiles, currentcharmap, basecharmap
|
||||
loadedfiles={}
|
||||
currentcharmap = basecharmap
|
||||
|
||||
def pragmaInclude(ppt, line, result):
|
||||
"Includes a source file"
|
||||
filename = line.expect("STRING").value
|
||||
line.expect("EOL")
|
||||
if type(filename)==str: result.append(FE.parse_file(ppt, filename))
|
||||
"Includes a source file"
|
||||
filename = line.expect("STRING").value
|
||||
line.expect("EOL")
|
||||
if type(filename)==str: result.append(FE.parse_file(ppt, filename))
|
||||
|
||||
def pragmaRequire(ppt, line, result):
|
||||
"Includes a source file at most one time"
|
||||
filename = line.expect("STRING").value
|
||||
line.expect("EOL")
|
||||
if type(filename)==str:
|
||||
global loadedfiles
|
||||
if filename not in loadedfiles:
|
||||
loadedfiles[filename]=1
|
||||
result.append(FE.parse_file(ppt, filename))
|
||||
"Includes a source file at most one time"
|
||||
filename = line.expect("STRING").value
|
||||
line.expect("EOL")
|
||||
if type(filename)==str:
|
||||
global loadedfiles
|
||||
if filename not in loadedfiles:
|
||||
loadedfiles[filename]=1
|
||||
result.append(FE.parse_file(ppt, filename))
|
||||
|
||||
def pragmaIncbin(ppt, line, result):
|
||||
"Includes a binary file"
|
||||
filename = line.expect("STRING").value
|
||||
line.expect("EOL")
|
||||
if type(filename)==str:
|
||||
try:
|
||||
f = file(filename, "rb")
|
||||
bytes = f.read()
|
||||
f.close()
|
||||
except IOError:
|
||||
Err.log ("Could not read "+filename)
|
||||
return
|
||||
bytes = [IR.ConstantExpr(ord(x)) for x in bytes]
|
||||
result.append(IR.Node(ppt, "Byte", *bytes))
|
||||
"Includes a binary file"
|
||||
filename = line.expect("STRING").value
|
||||
line.expect("EOL")
|
||||
if type(filename)==str:
|
||||
try:
|
||||
f = file(filename, "rb")
|
||||
bytes = f.read()
|
||||
f.close()
|
||||
except IOError:
|
||||
Err.log ("Could not read "+filename)
|
||||
return
|
||||
bytes = [IR.ConstantExpr(ord(x)) for x in bytes]
|
||||
result.append(IR.Node(ppt, "Byte", *bytes))
|
||||
|
||||
def pragmaCharmap(ppt, line, result):
|
||||
"Modify the character map."
|
||||
global currentcharmap, basecharmap
|
||||
bytes = readData(line)
|
||||
if len(bytes) == 0:
|
||||
currentcharmap = basecharmap
|
||||
else:
|
||||
try:
|
||||
base = bytes[0].data
|
||||
newsubstr = "".join([chr(x.data) for x in bytes[1:]])
|
||||
currentcharmap = currentcharmap[:base] + newsubstr + currentcharmap[base+len(newsubstr):]
|
||||
if len(currentcharmap) != 256 or base < 0 or base > 255:
|
||||
Err.log("Charmap replacement out of range")
|
||||
currentcharmap = currentcharmap[:256]
|
||||
except ValueError:
|
||||
Err.log("Illegal character in .charmap directive")
|
||||
"Modify the character map."
|
||||
global currentcharmap, basecharmap
|
||||
bytes = readData(line)
|
||||
if len(bytes) == 0:
|
||||
currentcharmap = basecharmap
|
||||
else:
|
||||
try:
|
||||
base = bytes[0].data
|
||||
newsubstr = "".join([chr(x.data) for x in bytes[1:]])
|
||||
currentcharmap = currentcharmap[:base] + newsubstr + currentcharmap[base+len(newsubstr):]
|
||||
if len(currentcharmap) != 256 or base < 0 or base > 255:
|
||||
Err.log("Charmap replacement out of range")
|
||||
currentcharmap = currentcharmap[:256]
|
||||
except ValueError:
|
||||
Err.log("Illegal character in .charmap directive")
|
||||
|
||||
def pragmaCharmapbin(ppt, line, result):
|
||||
"Load a new character map from a file"
|
||||
global currentcharmap
|
||||
filename = line.expect("STRING").value
|
||||
line.expect("EOL")
|
||||
if type(filename)==str:
|
||||
try:
|
||||
f = file(filename, "rb")
|
||||
bytes = f.read()
|
||||
f.close()
|
||||
except IOError:
|
||||
Err.log ("Could not read "+filename)
|
||||
return
|
||||
if len(bytes)==256:
|
||||
currentcharmap = bytes
|
||||
else:
|
||||
Err.log("Character map "+filename+" not 256 bytes long")
|
||||
"Load a new character map from a file"
|
||||
global currentcharmap
|
||||
filename = line.expect("STRING").value
|
||||
line.expect("EOL")
|
||||
if type(filename)==str:
|
||||
try:
|
||||
f = file(filename, "rb")
|
||||
bytes = f.read()
|
||||
f.close()
|
||||
except IOError:
|
||||
Err.log ("Could not read "+filename)
|
||||
return
|
||||
if len(bytes)==256:
|
||||
currentcharmap = bytes
|
||||
else:
|
||||
Err.log("Character map "+filename+" not 256 bytes long")
|
||||
|
||||
def pragmaOrg(ppt, line, result):
|
||||
"Relocates the PC with no output"
|
||||
newPC = FE.parse_expr(line)
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "SetPC", newPC))
|
||||
"Relocates the PC with no output"
|
||||
newPC = FE.parse_expr(line)
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "SetPC", newPC))
|
||||
|
||||
def pragmaAdvance(ppt, line, result):
|
||||
"Outputs filler until reaching the target PC"
|
||||
newPC = FE.parse_expr(line)
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "Advance", newPC))
|
||||
"Outputs filler until reaching the target PC"
|
||||
newPC = FE.parse_expr(line)
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "Advance", newPC))
|
||||
|
||||
def pragmaCheckpc(ppt, line, result):
|
||||
"Enforces that the PC has not exceeded a certain point"
|
||||
target = FE.parse_expr(line)
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "CheckPC", target))
|
||||
"Enforces that the PC has not exceeded a certain point"
|
||||
target = FE.parse_expr(line)
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "CheckPC", target))
|
||||
|
||||
def pragmaAlias(ppt, line, result):
|
||||
"Assigns an arbitrary label"
|
||||
lbl = line.expect("LABEL").value
|
||||
target = FE.parse_expr(line)
|
||||
result.append(IR.Node(ppt, "Label", lbl, target))
|
||||
"Assigns an arbitrary label"
|
||||
lbl = line.expect("LABEL").value
|
||||
target = FE.parse_expr(line)
|
||||
result.append(IR.Node(ppt, "Label", lbl, target))
|
||||
|
||||
def pragmaSpace(ppt, line, result):
|
||||
"Reserves space in a data segment for a variable"
|
||||
lbl = line.expect("LABEL").value
|
||||
size = line.expect("NUM").value
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "Label", lbl, IR.PCExpr()))
|
||||
result.append(IR.Node(ppt, "SetPC", IR.SequenceExpr([IR.PCExpr(), "+", IR.ConstantExpr(size)])))
|
||||
"Reserves space in a data segment for a variable"
|
||||
lbl = line.expect("LABEL").value
|
||||
size = line.expect("NUM").value
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "Label", lbl, IR.PCExpr()))
|
||||
result.append(IR.Node(ppt, "SetPC", IR.SequenceExpr([IR.PCExpr(), "+", IR.ConstantExpr(size)])))
|
||||
|
||||
def pragmaText(ppt, line, result):
|
||||
"Switches to a text segment"
|
||||
next = line.expect("LABEL", "EOL")
|
||||
if next.type == "LABEL":
|
||||
line.expect("EOL")
|
||||
segment = next.value
|
||||
else:
|
||||
segment = "*text-default*"
|
||||
result.append(IR.Node(ppt, "TextSegment", segment))
|
||||
"Switches to a text segment"
|
||||
next = line.expect("LABEL", "EOL")
|
||||
if next.type == "LABEL":
|
||||
line.expect("EOL")
|
||||
segment = next.value
|
||||
else:
|
||||
segment = "*text-default*"
|
||||
result.append(IR.Node(ppt, "TextSegment", segment))
|
||||
|
||||
def pragmaData(ppt, line, result):
|
||||
"Switches to a data segment (no output allowed)"
|
||||
next = line.expect("LABEL", "EOL")
|
||||
if next.type == "LABEL":
|
||||
line.expect("EOL")
|
||||
segment = next.value
|
||||
else:
|
||||
segment = "*data-default*"
|
||||
result.append(IR.Node(ppt, "DataSegment", segment))
|
||||
"Switches to a data segment (no output allowed)"
|
||||
next = line.expect("LABEL", "EOL")
|
||||
if next.type == "LABEL":
|
||||
line.expect("EOL")
|
||||
segment = next.value
|
||||
else:
|
||||
segment = "*data-default*"
|
||||
result.append(IR.Node(ppt, "DataSegment", segment))
|
||||
|
||||
def readData(line):
|
||||
"Read raw data from a comma-separated list"
|
||||
if line.lookahead(0).type == "STRING":
|
||||
data = [IR.ConstantExpr(ord(x)) for x in line.expect("STRING").value.translate(currentcharmap)]
|
||||
else:
|
||||
data = [FE.parse_expr(line)]
|
||||
next = line.expect(',', 'EOL').type
|
||||
while next == ',':
|
||||
if line.lookahead(0).type == "STRING":
|
||||
data.extend([IR.ConstantExpr(ord(x)) for x in line.expect("STRING").value])
|
||||
else:
|
||||
data.append(FE.parse_expr(line))
|
||||
next = line.expect(',', 'EOL').type
|
||||
return data
|
||||
"Read raw data from a comma-separated list"
|
||||
if line.lookahead(0).type == "STRING":
|
||||
data = [IR.ConstantExpr(ord(x)) for x in line.expect("STRING").value.translate(currentcharmap)]
|
||||
else:
|
||||
data = [FE.parse_expr(line)]
|
||||
next = line.expect(',', 'EOL').type
|
||||
while next == ',':
|
||||
if line.lookahead(0).type == "STRING":
|
||||
data.extend([IR.ConstantExpr(ord(x)) for x in line.expect("STRING").value])
|
||||
else:
|
||||
data.append(FE.parse_expr(line))
|
||||
next = line.expect(',', 'EOL').type
|
||||
return data
|
||||
|
||||
def pragmaByte(ppt, line, result):
|
||||
"Raw data, a byte at a time"
|
||||
bytes = readData(line)
|
||||
result.append(IR.Node(ppt, "Byte", *bytes))
|
||||
"Raw data, a byte at a time"
|
||||
bytes = readData(line)
|
||||
result.append(IR.Node(ppt, "Byte", *bytes))
|
||||
|
||||
def pragmaWord(ppt, line, result):
|
||||
"Raw data, a word at a time, little-endian"
|
||||
words = readData(line)
|
||||
result.append(IR.Node(ppt, "Word", *words))
|
||||
"Raw data, a word at a time, little-endian"
|
||||
words = readData(line)
|
||||
result.append(IR.Node(ppt, "Word", *words))
|
||||
|
||||
def pragmaDword(ppt, line, result):
|
||||
"Raw data, a double-word at a time, little-endian"
|
||||
dwords = readData(line)
|
||||
result.append(IR.Node(ppt, "Dword", *dwords))
|
||||
"Raw data, a double-word at a time, little-endian"
|
||||
dwords = readData(line)
|
||||
result.append(IR.Node(ppt, "Dword", *dwords))
|
||||
|
||||
def pragmaWordbe(ppt, line, result):
|
||||
"Raw data, a word at a time, big-endian"
|
||||
words = readData(line)
|
||||
result.append(IR.Node(ppt, "WordBE", *words))
|
||||
"Raw data, a word at a time, big-endian"
|
||||
words = readData(line)
|
||||
result.append(IR.Node(ppt, "WordBE", *words))
|
||||
|
||||
def pragmaDwordbe(ppt, line, result):
|
||||
"Raw data, a dword at a time, big-endian"
|
||||
dwords = readData(line)
|
||||
result.append(IR.Node(ppt, "DwordBE", *dwords))
|
||||
"Raw data, a dword at a time, big-endian"
|
||||
dwords = readData(line)
|
||||
result.append(IR.Node(ppt, "DwordBE", *dwords))
|
||||
|
||||
def pragmaScope(ppt, line, result):
|
||||
"Create a new lexical scoping block"
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "ScopeBegin"))
|
||||
"Create a new lexical scoping block"
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "ScopeBegin"))
|
||||
|
||||
def pragmaScend(ppt, line, result):
|
||||
"End the innermost lexical scoping block"
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "ScopeEnd"))
|
||||
"End the innermost lexical scoping block"
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "ScopeEnd"))
|
||||
|
||||
def pragmaMacro(ppt, line, result):
|
||||
"Begin a macro definition"
|
||||
lbl = line.expect("LABEL").value
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "MacroBegin", lbl))
|
||||
"Begin a macro definition"
|
||||
lbl = line.expect("LABEL").value
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "MacroBegin", lbl))
|
||||
|
||||
def pragmaMacend(ppt, line, result):
|
||||
"End a macro definition"
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "MacroEnd"))
|
||||
"End a macro definition"
|
||||
line.expect("EOL")
|
||||
result.append(IR.Node(ppt, "MacroEnd"))
|
||||
|
||||
def pragmaInvoke(ppt, line, result):
|
||||
macro = line.expect("LABEL").value
|
||||
if line.lookahead(0).type == "EOL":
|
||||
args = []
|
||||
else:
|
||||
args = readData(line)
|
||||
result.append(IR.Node(ppt, "MacroInvoke", macro, *args))
|
||||
macro = line.expect("LABEL").value
|
||||
if line.lookahead(0).type == "EOL":
|
||||
args = []
|
||||
else:
|
||||
args = readData(line)
|
||||
result.append(IR.Node(ppt, "MacroInvoke", macro, *args))
|
||||
|
@ -1,75 +1,74 @@
|
||||
"""Symbol tables and environments for P65.
|
||||
|
||||
Implements the symbol lookup, through nested environments -
|
||||
any non-temporary variable is stored at the top level."""
|
||||
|
||||
# Copyright 2002 Michael C. Martin.
|
||||
# You may use, modify, and distribute this file under the BSD
|
||||
# license: See LICENSE.txt for details.
|
||||
|
||||
from __future__ import nested_scopes
|
||||
import Ophis.Errors as Err
|
||||
|
||||
class Environment:
|
||||
"""Environment class.
|
||||
Controls the various scopes and global abstract execution variables."""
|
||||
def __init__(self):
|
||||
self.dicts = [{}]
|
||||
self.stack = [0]
|
||||
self.pc = 0
|
||||
self.segmentdict = {}
|
||||
self.segment = "*text-default*"
|
||||
self.scopecount = 0
|
||||
def __contains__(self, item):
|
||||
if item[0] == '_':
|
||||
for dict in [self.dicts[i] for i in self.stack]:
|
||||
if item in dict: return 1
|
||||
return 0
|
||||
return item in self.dicts[0]
|
||||
def __getitem__(self, item):
|
||||
if item[0] == '_':
|
||||
for dict in [self.dicts[i] for i in self.stack]:
|
||||
if item in dict: return dict[item]
|
||||
else:
|
||||
if item in self.dicts[0]: return self.dicts[0][item]
|
||||
Err.log("Unknown label '%s'" % item)
|
||||
return 0
|
||||
def __setitem__(self, item, value):
|
||||
if item[0] == '_':
|
||||
self.dicts[self.stack[0]][item] = value
|
||||
else:
|
||||
self.dicts[0][item] = value
|
||||
def __str__(self):
|
||||
return str(self.dicts)
|
||||
def getPC(self):
|
||||
return self.pc
|
||||
def setPC(self, value):
|
||||
self.pc = value
|
||||
def incPC(self, amount):
|
||||
self.pc += amount
|
||||
def getsegment(self):
|
||||
return self.segment
|
||||
def setsegment(self, segment):
|
||||
self.segmentdict[self.segment] = self.pc
|
||||
self.segment = segment
|
||||
self.pc = self.segmentdict.get(segment, 0)
|
||||
def reset(self):
|
||||
"Clears out program counter, segment, and scoping information"
|
||||
self.pc = 0
|
||||
self.segmentdict = {}
|
||||
self.segment = "*text-default*"
|
||||
self.scopecount = 0
|
||||
if len(self.stack) > 1:
|
||||
Err.log("Unmatched .scope")
|
||||
self.stack = [0]
|
||||
def newscope(self):
|
||||
"Enters a new scope for temporary labels."
|
||||
self.scopecount += 1
|
||||
self.stack.insert(0, self.scopecount)
|
||||
if len(self.dicts) <= self.scopecount: self.dicts.append({})
|
||||
def endscope(self):
|
||||
"Leaves a scope."
|
||||
if len(self.stack) == 1:
|
||||
Err.log("Unmatched .scend")
|
||||
self.stack.pop(0)
|
||||
|
||||
"""Symbol tables and environments for Ophis.
|
||||
|
||||
Implements the symbol lookup, through nested environments -
|
||||
any non-temporary variable is stored at the top level."""
|
||||
|
||||
# Copyright 2002-2012 Michael C. Martin and additional contributors.
|
||||
# You may use, modify, and distribute this file under the MIT
|
||||
# license: See README for details.
|
||||
|
||||
import Ophis.Errors as Err
|
||||
|
||||
class Environment:
|
||||
"""Environment class.
|
||||
Controls the various scopes and global abstract execution variables."""
|
||||
def __init__(self):
|
||||
self.dicts = [{}]
|
||||
self.stack = [0]
|
||||
self.pc = 0
|
||||
self.segmentdict = {}
|
||||
self.segment = "*text-default*"
|
||||
self.scopecount = 0
|
||||
def __contains__(self, item):
|
||||
if item[0] == '_':
|
||||
for dict in [self.dicts[i] for i in self.stack]:
|
||||
if item in dict: return 1
|
||||
return 0
|
||||
return item in self.dicts[0]
|
||||
def __getitem__(self, item):
|
||||
if item[0] == '_':
|
||||
for dict in [self.dicts[i] for i in self.stack]:
|
||||
if item in dict: return dict[item]
|
||||
else:
|
||||
if item in self.dicts[0]: return self.dicts[0][item]
|
||||
Err.log("Unknown label '%s'" % item)
|
||||
return 0
|
||||
def __setitem__(self, item, value):
|
||||
if item[0] == '_':
|
||||
self.dicts[self.stack[0]][item] = value
|
||||
else:
|
||||
self.dicts[0][item] = value
|
||||
def __str__(self):
|
||||
return str(self.dicts)
|
||||
def getPC(self):
|
||||
return self.pc
|
||||
def setPC(self, value):
|
||||
self.pc = value
|
||||
def incPC(self, amount):
|
||||
self.pc += amount
|
||||
def getsegment(self):
|
||||
return self.segment
|
||||
def setsegment(self, segment):
|
||||
self.segmentdict[self.segment] = self.pc
|
||||
self.segment = segment
|
||||
self.pc = self.segmentdict.get(segment, 0)
|
||||
def reset(self):
|
||||
"Clears out program counter, segment, and scoping information"
|
||||
self.pc = 0
|
||||
self.segmentdict = {}
|
||||
self.segment = "*text-default*"
|
||||
self.scopecount = 0
|
||||
if len(self.stack) > 1:
|
||||
Err.log("Unmatched .scope")
|
||||
self.stack = [0]
|
||||
def newscope(self):
|
||||
"Enters a new scope for temporary labels."
|
||||
self.scopecount += 1
|
||||
self.stack.insert(0, self.scopecount)
|
||||
if len(self.dicts) <= self.scopecount: self.dicts.append({})
|
||||
def endscope(self):
|
||||
"Leaves a scope."
|
||||
if len(self.stack) == 1:
|
||||
Err.log("Unmatched .scend")
|
||||
self.stack.pop(0)
|
||||
|
||||
|
@ -1,24 +1,24 @@
|
||||
"""Error logging
|
||||
|
||||
Keeps track of the number of errors inflicted so far, and
|
||||
where in the assembly the errors are occurring."""
|
||||
|
||||
# Copyright 2002 Michael C. Martin.
|
||||
# You may use, modify, and distribute this file under the BSD
|
||||
# license: See LICENSE.txt for details.
|
||||
|
||||
count = 0
|
||||
currentpoint = "<Top Level>"
|
||||
|
||||
def log(err):
|
||||
"""Reports an error at the current program point, and increases
|
||||
the global error count."""
|
||||
global count
|
||||
count = count+1
|
||||
print currentpoint+": "+err
|
||||
|
||||
def report():
|
||||
"Print out the number of errors."
|
||||
if count == 0: print "No errors"
|
||||
elif count == 1: print "1 error"
|
||||
else: print str(count)+" errors"
|
||||
"""Error logging
|
||||
|
||||
Keeps track of the number of errors inflicted so far, and
|
||||
where in the assembly the errors are occurring."""
|
||||
|
||||
# Copyright 2002-2012 Michael C. Martin and additional contributors.
|
||||
# You may use, modify, and distribute this file under the MIT
|
||||
# license: See README for details.
|
||||
|
||||
count = 0
|
||||
currentpoint = "<Top Level>"
|
||||
|
||||
def log(err):
|
||||
"""Reports an error at the current program point, and increases
|
||||
the global error count."""
|
||||
global count
|
||||
count = count+1
|
||||
print currentpoint+": "+err
|
||||
|
||||
def report():
|
||||
"Print out the number of errors."
|
||||
if count == 0: print "No errors"
|
||||
elif count == 1: print "1 error"
|
||||
else: print str(count)+" errors"
|
||||
|
@ -1,333 +1,331 @@
|
||||
"""Lexer and Parser
|
||||
|
||||
Constructs a list of IR nodes from a list of input strings."""
|
||||
|
||||
from __future__ import nested_scopes
|
||||
import Ophis.Errors as Err
|
||||
import Ophis.Opcodes as Ops
|
||||
import Ophis.IR as IR
|
||||
import Ophis.CmdLine as Cmd
|
||||
import os
|
||||
|
||||
# Copyright 2002 Michael C. Martin.
|
||||
# You may use, modify, and distribute this file under the BSD
|
||||
# license: See LICENSE.txt for details.
|
||||
|
||||
|
||||
class Lexeme:
|
||||
"Class for lexer tokens. Used by lexer and parser."
|
||||
def __init__(self, type="UNKNOWN", value=None):
|
||||
self.type = type.upper()
|
||||
self.value = value
|
||||
def __str__(self):
|
||||
if self.value == None:
|
||||
return self.type
|
||||
else:
|
||||
return self.type+":"+str(self.value)
|
||||
def __repr__(self):
|
||||
return "Lexeme("+`self.type`+", "+`self.value`+")"
|
||||
def matches(self, other):
|
||||
"1 if Lexemes a and b have the same type."
|
||||
return self.type == other.type
|
||||
|
||||
bases = {"$":("hexadecimal", 16),
|
||||
"%":("binary", 2),
|
||||
"0":("octal", 8)}
|
||||
|
||||
punctuation = "#,`<>():.+-*/&|^[]"
|
||||
|
||||
def lex(point, line):
|
||||
"""Turns a line of source into a sequence of lexemes."""
|
||||
Err.currentpoint = point
|
||||
result = []
|
||||
def is_opcode(op):
|
||||
"Tests whether a string is an opcode or an identifier"
|
||||
return op in Ops.opcodes
|
||||
def add_token(token):
|
||||
"Converts a substring into a single lexeme"
|
||||
if token == "":
|
||||
return
|
||||
if token == "0":
|
||||
result.append(Lexeme("NUM", 0))
|
||||
return
|
||||
firstchar = token[0]
|
||||
rest = token[1:]
|
||||
if firstchar == '"':
|
||||
result.append(Lexeme("STRING", rest))
|
||||
return
|
||||
elif firstchar in bases:
|
||||
try:
|
||||
result.append(Lexeme("NUM", long(rest, bases[firstchar][1])))
|
||||
return
|
||||
except ValueError:
|
||||
Err.log('Invalid '+bases[firstchar][0]+' constant: '+rest)
|
||||
result.append(Lexeme("NUM", 0))
|
||||
return
|
||||
elif firstchar.isdigit():
|
||||
try:
|
||||
result.append(Lexeme("NUM", long(token)))
|
||||
except ValueError:
|
||||
Err.log('Identifiers may not begin with a number')
|
||||
result.append(Lexeme("LABEL", "ERROR"))
|
||||
return
|
||||
elif firstchar == "'":
|
||||
if len(rest) == 1:
|
||||
result.append(Lexeme("NUM", ord(rest)))
|
||||
else:
|
||||
Err.log("Invalid character constant '"+rest+"'")
|
||||
result.append(Lexeme("NUM", 0))
|
||||
return
|
||||
elif firstchar in punctuation:
|
||||
if rest != "":
|
||||
Err.log("Internal lexer error! '"+token+"' can't happen!")
|
||||
result.append(Lexeme(firstchar))
|
||||
return
|
||||
else: # Label, opcode, or index register
|
||||
id = token.lower()
|
||||
if is_opcode(id):
|
||||
result.append(Lexeme("OPCODE", id))
|
||||
elif id == "x":
|
||||
result.append(Lexeme("X"))
|
||||
elif id == "y":
|
||||
result.append(Lexeme("Y"))
|
||||
else:
|
||||
result.append(Lexeme("LABEL", id))
|
||||
return
|
||||
# should never reach here
|
||||
Err.log("Internal lexer error: add_token fall-through")
|
||||
def add_EOL():
|
||||
"Adds an end-of-line lexeme"
|
||||
result.append(Lexeme("EOL"))
|
||||
# Actual routine begins here
|
||||
value = ""
|
||||
quotemode = 0
|
||||
backslashmode = 0
|
||||
for c in line.strip():
|
||||
if backslashmode:
|
||||
backslashmode = 0
|
||||
value = value + c
|
||||
elif c == "\\":
|
||||
backslashmode = 1
|
||||
elif quotemode:
|
||||
if c == '"':
|
||||
quotemode = 0
|
||||
else:
|
||||
value = value + c
|
||||
elif c == ';':
|
||||
add_token(value)
|
||||
value = ""
|
||||
break
|
||||
elif c.isspace():
|
||||
add_token(value)
|
||||
value = ""
|
||||
elif c in punctuation:
|
||||
add_token(value)
|
||||
add_token(c)
|
||||
value = ""
|
||||
elif c == '"':
|
||||
add_token(value)
|
||||
value = '"'
|
||||
quotemode = 1
|
||||
else:
|
||||
value = value + c
|
||||
if backslashmode:
|
||||
Err.log("Backslashed newline")
|
||||
if quotemode:
|
||||
Err.log("Unterminated string constant")
|
||||
add_token(value)
|
||||
add_EOL()
|
||||
return result
|
||||
|
||||
class ParseLine:
|
||||
"Maintains the parse state of a line of code. Enables arbitrary lookahead."
|
||||
def __init__(self, lexemes):
|
||||
self.lexemes = lexemes
|
||||
self.location = 0
|
||||
def lookahead(self, i):
|
||||
"""Returns the token i units ahead in the parse.
|
||||
lookahead(0) returns the next token; trying to read off the end of
|
||||
the sequence returns the last token in the sequence (usually EOL)."""
|
||||
target = self.location+i
|
||||
if target >= len(self.lexemes): target = -1
|
||||
return self.lexemes[target]
|
||||
def pop(self):
|
||||
"Returns and removes the next element in the line."
|
||||
old = self.location
|
||||
if self.location < len(self.lexemes)-1: self.location += 1
|
||||
return self.lexemes[old]
|
||||
def expect(self, *tokens):
|
||||
"""Reads a token from the ParseLine line and returns it if it's of a type
|
||||
in the sequence tokens. Otherwise, it logs an error."""
|
||||
token = self.pop()
|
||||
if token.type not in tokens:
|
||||
Err.log('Expected: "'+'", "'.join(tokens)+'"')
|
||||
return token
|
||||
|
||||
pragma_modules = []
|
||||
|
||||
def parse_expr(line):
|
||||
"Parses an Ophis arithmetic expression."
|
||||
def atom():
|
||||
"Parses lowest-priority expression components."
|
||||
next = line.lookahead(0).type
|
||||
if next == "NUM":
|
||||
return IR.ConstantExpr(line.expect("NUM").value)
|
||||
elif next == "LABEL":
|
||||
return IR.LabelExpr(line.expect("LABEL").value)
|
||||
elif next == "^":
|
||||
line.expect("^")
|
||||
return IR.PCExpr()
|
||||
elif next == "[":
|
||||
line.expect("[")
|
||||
result = parse_expr(line)
|
||||
line.expect("]")
|
||||
return result
|
||||
elif next == "+":
|
||||
offset = 0
|
||||
while next == "+":
|
||||
offset += 1
|
||||
line.expect("+")
|
||||
next = line.lookahead(0).type
|
||||
return IR.LabelExpr("*"+str(templabelcount+offset))
|
||||
elif next == "-":
|
||||
offset = 1
|
||||
while next == "-":
|
||||
offset -= 1
|
||||
line.expect("-")
|
||||
next = line.lookahead(0).type
|
||||
return IR.LabelExpr("*"+str(templabelcount+offset))
|
||||
elif next == ">":
|
||||
line.expect(">")
|
||||
return IR.HighByteExpr(atom())
|
||||
elif next == "<":
|
||||
line.expect("<")
|
||||
return IR.LowByteExpr(atom())
|
||||
else:
|
||||
Err.log('Expected: expression')
|
||||
def precedence_read(constructor, reader, separators):
|
||||
"""Handles precedence. The reader argument is a function that returns
|
||||
expressions that bind more tightly than these; separators is a list
|
||||
of strings naming the operators at this precedence level. The
|
||||
constructor argument is a class, indicating what node type holds
|
||||
objects of this precedence level.
|
||||
|
||||
Returns a list of Expr objects with separator strings between them."""
|
||||
result = [reader()] # first object
|
||||
nextop = line.lookahead(0).type
|
||||
while (nextop in separators):
|
||||
line.expect(nextop)
|
||||
result.append(nextop)
|
||||
result.append(reader())
|
||||
nextop = line.lookahead(0).type
|
||||
if len(result) == 1: return result[0]
|
||||
return constructor(result)
|
||||
def term():
|
||||
"Parses * and /"
|
||||
return precedence_read(IR.SequenceExpr, atom, ["*", "/"])
|
||||
def arith():
|
||||
"Parses + and -"
|
||||
return precedence_read(IR.SequenceExpr, term, ["+", "-"])
|
||||
def bits():
|
||||
"Parses &, |, and ^"
|
||||
return precedence_read(IR.SequenceExpr, arith, ["&", "|", "^"])
|
||||
return bits()
|
||||
|
||||
def parse_line(ppt, lexemelist):
|
||||
"Turn a line of source into an IR Node."
|
||||
Err.currentpoint = ppt
|
||||
result = []
|
||||
line = ParseLine(lexemelist)
|
||||
def aux():
|
||||
"Accumulates all IR nodes defined by this line."
|
||||
if line.lookahead(0).type == "EOL":
|
||||
pass
|
||||
elif line.lookahead(1).type == ":":
|
||||
newlabel=line.expect("LABEL").value
|
||||
line.expect(":")
|
||||
result.append(IR.Node(ppt, "Label", newlabel, IR.PCExpr()))
|
||||
aux()
|
||||
elif line.lookahead(0).type == "*":
|
||||
global templabelcount
|
||||
templabelcount = templabelcount + 1
|
||||
result.append(IR.Node(ppt, "Label", "*"+str(templabelcount), IR.PCExpr()))
|
||||
line.expect("*")
|
||||
aux()
|
||||
elif line.lookahead(0).type == "." or line.lookahead(0).type == "`":
|
||||
which = line.expect(".", "`").type
|
||||
if (which == "."): pragma = line.expect("LABEL").value
|
||||
else: pragma = "invoke"
|
||||
pragmaFunction = "pragma"+pragma.title()
|
||||
for mod in pragma_modules:
|
||||
if hasattr(mod, pragmaFunction):
|
||||
getattr(mod, pragmaFunction)(ppt, line, result)
|
||||
break
|
||||
else:
|
||||
Err.log("Unknown pragma "+pragma)
|
||||
|
||||
else: # Instruction
|
||||
opcode = line.expect("OPCODE").value
|
||||
if line.lookahead(0).type == "#":
|
||||
mode = "Immediate"
|
||||
line.expect("#")
|
||||
arg = parse_expr(line)
|
||||
line.expect("EOL")
|
||||
elif line.lookahead(0).type == "(":
|
||||
line.expect("(")
|
||||
arg = parse_expr(line)
|
||||
if line.lookahead(0).type == ",":
|
||||
mode = "PointerX"
|
||||
line.expect(",")
|
||||
line.expect("X")
|
||||
line.expect(")")
|
||||
line.expect("EOL")
|
||||
else:
|
||||
line.expect(")")
|
||||
tok = line.expect(",", "EOL").type
|
||||
if tok == "EOL":
|
||||
mode = "Pointer"
|
||||
else:
|
||||
mode = "PointerY"
|
||||
line.expect("Y")
|
||||
line.expect("EOL")
|
||||
elif line.lookahead(0).type == "EOL":
|
||||
mode = "Implied"
|
||||
arg = None
|
||||
else:
|
||||
arg = parse_expr(line)
|
||||
tok = line.expect("EOL", ",").type
|
||||
if tok == ",":
|
||||
tok = line.expect("X", "Y").type
|
||||
if tok == "X": mode = "MemoryX"
|
||||
else: mode = "MemoryY"
|
||||
line.expect("EOL")
|
||||
else: mode = "Memory"
|
||||
result.append(IR.Node(ppt, mode, opcode, arg))
|
||||
aux()
|
||||
result = [node for node in result if node is not IR.NullNode]
|
||||
if len(result) == 0: return IR.NullNode
|
||||
if len(result) == 1: return result[0]
|
||||
return IR.SequenceNode(ppt, result)
|
||||
|
||||
def parse_file(ppt, filename):
|
||||
"Loads a .P65 source file, and returns an IR list."
|
||||
Err.currentpoint = ppt
|
||||
if Cmd.verbose > 0: print "Loading "+filename
|
||||
try:
|
||||
f = file(filename)
|
||||
linelist = f.readlines()
|
||||
f.close()
|
||||
pptlist = ["%s:%d" % (filename, i+1) for i in range(len(linelist))]
|
||||
lexlist = map(lex, pptlist, linelist)
|
||||
IRlist = map(parse_line, pptlist, lexlist)
|
||||
IRlist = [node for node in IRlist if node is not IR.NullNode]
|
||||
return IR.SequenceNode(ppt, IRlist)
|
||||
except IOError:
|
||||
Err.log ("Could not read "+filename)
|
||||
return IR.NullNode
|
||||
|
||||
def parse(filename):
|
||||
"Top level parsing routine, taking a source file name and returning an IR list."
|
||||
global templabelcount
|
||||
templabelcount = 0
|
||||
return parse_file("<Top Level>", filename)
|
||||
|
||||
"""Lexer and Parser
|
||||
|
||||
Constructs a list of IR nodes from a list of input strings."""
|
||||
|
||||
import Ophis.Errors as Err
|
||||
import Ophis.Opcodes as Ops
|
||||
import Ophis.IR as IR
|
||||
import Ophis.CmdLine as Cmd
|
||||
import os
|
||||
|
||||
# Copyright 2002-2012 Michael C. Martin and additional contributors.
|
||||
# You may use, modify, and distribute this file under the MIT
|
||||
# license: See README for details.
|
||||
|
||||
class Lexeme:
|
||||
"Class for lexer tokens. Used by lexer and parser."
|
||||
def __init__(self, type="UNKNOWN", value=None):
|
||||
self.type = type.upper()
|
||||
self.value = value
|
||||
def __str__(self):
|
||||
if self.value == None:
|
||||
return self.type
|
||||
else:
|
||||
return self.type+":"+str(self.value)
|
||||
def __repr__(self):
|
||||
return "Lexeme("+`self.type`+", "+`self.value`+")"
|
||||
def matches(self, other):
|
||||
"1 if Lexemes a and b have the same type."
|
||||
return self.type == other.type
|
||||
|
||||
bases = {"$":("hexadecimal", 16),
|
||||
"%":("binary", 2),
|
||||
"0":("octal", 8)}
|
||||
|
||||
punctuation = "#,`<>():.+-*/&|^[]"
|
||||
|
||||
def lex(point, line):
|
||||
"""Turns a line of source into a sequence of lexemes."""
|
||||
Err.currentpoint = point
|
||||
result = []
|
||||
def is_opcode(op):
|
||||
"Tests whether a string is an opcode or an identifier"
|
||||
return op in Ops.opcodes
|
||||
def add_token(token):
|
||||
"Converts a substring into a single lexeme"
|
||||
if token == "":
|
||||
return
|
||||
if token == "0":
|
||||
result.append(Lexeme("NUM", 0))
|
||||
return
|
||||
firstchar = token[0]
|
||||
rest = token[1:]
|
||||
if firstchar == '"':
|
||||
result.append(Lexeme("STRING", rest))
|
||||
return
|
||||
elif firstchar in bases:
|
||||
try:
|
||||
result.append(Lexeme("NUM", long(rest, bases[firstchar][1])))
|
||||
return
|
||||
except ValueError:
|
||||
Err.log('Invalid '+bases[firstchar][0]+' constant: '+rest)
|
||||
result.append(Lexeme("NUM", 0))
|
||||
return
|
||||
elif firstchar.isdigit():
|
||||
try:
|
||||
result.append(Lexeme("NUM", long(token)))
|
||||
except ValueError:
|
||||
Err.log('Identifiers may not begin with a number')
|
||||
result.append(Lexeme("LABEL", "ERROR"))
|
||||
return
|
||||
elif firstchar == "'":
|
||||
if len(rest) == 1:
|
||||
result.append(Lexeme("NUM", ord(rest)))
|
||||
else:
|
||||
Err.log("Invalid character constant '"+rest+"'")
|
||||
result.append(Lexeme("NUM", 0))
|
||||
return
|
||||
elif firstchar in punctuation:
|
||||
if rest != "":
|
||||
Err.log("Internal lexer error! '"+token+"' can't happen!")
|
||||
result.append(Lexeme(firstchar))
|
||||
return
|
||||
else: # Label, opcode, or index register
|
||||
id = token.lower()
|
||||
if is_opcode(id):
|
||||
result.append(Lexeme("OPCODE", id))
|
||||
elif id == "x":
|
||||
result.append(Lexeme("X"))
|
||||
elif id == "y":
|
||||
result.append(Lexeme("Y"))
|
||||
else:
|
||||
result.append(Lexeme("LABEL", id))
|
||||
return
|
||||
# should never reach here
|
||||
Err.log("Internal lexer error: add_token fall-through")
|
||||
def add_EOL():
|
||||
"Adds an end-of-line lexeme"
|
||||
result.append(Lexeme("EOL"))
|
||||
# Actual routine begins here
|
||||
value = ""
|
||||
quotemode = 0
|
||||
backslashmode = 0
|
||||
for c in line.strip():
|
||||
if backslashmode:
|
||||
backslashmode = 0
|
||||
value = value + c
|
||||
elif c == "\\":
|
||||
backslashmode = 1
|
||||
elif quotemode:
|
||||
if c == '"':
|
||||
quotemode = 0
|
||||
else:
|
||||
value = value + c
|
||||
elif c == ';':
|
||||
add_token(value)
|
||||
value = ""
|
||||
break
|
||||
elif c.isspace():
|
||||
add_token(value)
|
||||
value = ""
|
||||
elif c in punctuation:
|
||||
add_token(value)
|
||||
add_token(c)
|
||||
value = ""
|
||||
elif c == '"':
|
||||
add_token(value)
|
||||
value = '"'
|
||||
quotemode = 1
|
||||
else:
|
||||
value = value + c
|
||||
if backslashmode:
|
||||
Err.log("Backslashed newline")
|
||||
if quotemode:
|
||||
Err.log("Unterminated string constant")
|
||||
add_token(value)
|
||||
add_EOL()
|
||||
return result
|
||||
|
||||
class ParseLine:
|
||||
"Maintains the parse state of a line of code. Enables arbitrary lookahead."
|
||||
def __init__(self, lexemes):
|
||||
self.lexemes = lexemes
|
||||
self.location = 0
|
||||
def lookahead(self, i):
|
||||
"""Returns the token i units ahead in the parse.
|
||||
lookahead(0) returns the next token; trying to read off the end of
|
||||
the sequence returns the last token in the sequence (usually EOL)."""
|
||||
target = self.location+i
|
||||
if target >= len(self.lexemes): target = -1
|
||||
return self.lexemes[target]
|
||||
def pop(self):
|
||||
"Returns and removes the next element in the line."
|
||||
old = self.location
|
||||
if self.location < len(self.lexemes)-1: self.location += 1
|
||||
return self.lexemes[old]
|
||||
def expect(self, *tokens):
|
||||
"""Reads a token from the ParseLine line and returns it if it's of a type
|
||||
in the sequence tokens. Otherwise, it logs an error."""
|
||||
token = self.pop()
|
||||
if token.type not in tokens:
|
||||
Err.log('Expected: "'+'", "'.join(tokens)+'"')
|
||||
return token
|
||||
|
||||
pragma_modules = []
|
||||
|
||||
def parse_expr(line):
|
||||
"Parses an Ophis arithmetic expression."
|
||||
def atom():
|
||||
"Parses lowest-priority expression components."
|
||||
next = line.lookahead(0).type
|
||||
if next == "NUM":
|
||||
return IR.ConstantExpr(line.expect("NUM").value)
|
||||
elif next == "LABEL":
|
||||
return IR.LabelExpr(line.expect("LABEL").value)
|
||||
elif next == "^":
|
||||
line.expect("^")
|
||||
return IR.PCExpr()
|
||||
elif next == "[":
|
||||
line.expect("[")
|
||||
result = parse_expr(line)
|
||||
line.expect("]")
|
||||
return result
|
||||
elif next == "+":
|
||||
offset = 0
|
||||
while next == "+":
|
||||
offset += 1
|
||||
line.expect("+")
|
||||
next = line.lookahead(0).type
|
||||
return IR.LabelExpr("*"+str(templabelcount+offset))
|
||||
elif next == "-":
|
||||
offset = 1
|
||||
while next == "-":
|
||||
offset -= 1
|
||||
line.expect("-")
|
||||
next = line.lookahead(0).type
|
||||
return IR.LabelExpr("*"+str(templabelcount+offset))
|
||||
elif next == ">":
|
||||
line.expect(">")
|
||||
return IR.HighByteExpr(atom())
|
||||
elif next == "<":
|
||||
line.expect("<")
|
||||
return IR.LowByteExpr(atom())
|
||||
else:
|
||||
Err.log('Expected: expression')
|
||||
def precedence_read(constructor, reader, separators):
|
||||
"""Handles precedence. The reader argument is a function that returns
|
||||
expressions that bind more tightly than these; separators is a list
|
||||
of strings naming the operators at this precedence level. The
|
||||
constructor argument is a class, indicating what node type holds
|
||||
objects of this precedence level.
|
||||
|
||||
Returns a list of Expr objects with separator strings between them."""
|
||||
result = [reader()] # first object
|
||||
nextop = line.lookahead(0).type
|
||||
while (nextop in separators):
|
||||
line.expect(nextop)
|
||||
result.append(nextop)
|
||||
result.append(reader())
|
||||
nextop = line.lookahead(0).type
|
||||
if len(result) == 1: return result[0]
|
||||
return constructor(result)
|
||||
def term():
|
||||
"Parses * and /"
|
||||
return precedence_read(IR.SequenceExpr, atom, ["*", "/"])
|
||||
def arith():
|
||||
"Parses + and -"
|
||||
return precedence_read(IR.SequenceExpr, term, ["+", "-"])
|
||||
def bits():
|
||||
"Parses &, |, and ^"
|
||||
return precedence_read(IR.SequenceExpr, arith, ["&", "|", "^"])
|
||||
return bits()
|
||||
|
||||
def parse_line(ppt, lexemelist):
|
||||
"Turn a line of source into an IR Node."
|
||||
Err.currentpoint = ppt
|
||||
result = []
|
||||
line = ParseLine(lexemelist)
|
||||
def aux():
|
||||
"Accumulates all IR nodes defined by this line."
|
||||
if line.lookahead(0).type == "EOL":
|
||||
pass
|
||||
elif line.lookahead(1).type == ":":
|
||||
newlabel=line.expect("LABEL").value
|
||||
line.expect(":")
|
||||
result.append(IR.Node(ppt, "Label", newlabel, IR.PCExpr()))
|
||||
aux()
|
||||
elif line.lookahead(0).type == "*":
|
||||
global templabelcount
|
||||
templabelcount = templabelcount + 1
|
||||
result.append(IR.Node(ppt, "Label", "*"+str(templabelcount), IR.PCExpr()))
|
||||
line.expect("*")
|
||||
aux()
|
||||
elif line.lookahead(0).type == "." or line.lookahead(0).type == "`":
|
||||
which = line.expect(".", "`").type
|
||||
if (which == "."): pragma = line.expect("LABEL").value
|
||||
else: pragma = "invoke"
|
||||
pragmaFunction = "pragma"+pragma.title()
|
||||
for mod in pragma_modules:
|
||||
if hasattr(mod, pragmaFunction):
|
||||
getattr(mod, pragmaFunction)(ppt, line, result)
|
||||
break
|
||||
else:
|
||||
Err.log("Unknown pragma "+pragma)
|
||||
|
||||
else: # Instruction
|
||||
opcode = line.expect("OPCODE").value
|
||||
if line.lookahead(0).type == "#":
|
||||
mode = "Immediate"
|
||||
line.expect("#")
|
||||
arg = parse_expr(line)
|
||||
line.expect("EOL")
|
||||
elif line.lookahead(0).type == "(":
|
||||
line.expect("(")
|
||||
arg = parse_expr(line)
|
||||
if line.lookahead(0).type == ",":
|
||||
mode = "PointerX"
|
||||
line.expect(",")
|
||||
line.expect("X")
|
||||
line.expect(")")
|
||||
line.expect("EOL")
|
||||
else:
|
||||
line.expect(")")
|
||||
tok = line.expect(",", "EOL").type
|
||||
if tok == "EOL":
|
||||
mode = "Pointer"
|
||||
else:
|
||||
mode = "PointerY"
|
||||
line.expect("Y")
|
||||
line.expect("EOL")
|
||||
elif line.lookahead(0).type == "EOL":
|
||||
mode = "Implied"
|
||||
arg = None
|
||||
else:
|
||||
arg = parse_expr(line)
|
||||
tok = line.expect("EOL", ",").type
|
||||
if tok == ",":
|
||||
tok = line.expect("X", "Y").type
|
||||
if tok == "X": mode = "MemoryX"
|
||||
else: mode = "MemoryY"
|
||||
line.expect("EOL")
|
||||
else: mode = "Memory"
|
||||
result.append(IR.Node(ppt, mode, opcode, arg))
|
||||
aux()
|
||||
result = [node for node in result if node is not IR.NullNode]
|
||||
if len(result) == 0: return IR.NullNode
|
||||
if len(result) == 1: return result[0]
|
||||
return IR.SequenceNode(ppt, result)
|
||||
|
||||
def parse_file(ppt, filename):
|
||||
"Loads a .P65 source file, and returns an IR list."
|
||||
Err.currentpoint = ppt
|
||||
if Cmd.verbose > 0: print "Loading "+filename
|
||||
try:
|
||||
f = file(filename)
|
||||
linelist = f.readlines()
|
||||
f.close()
|
||||
pptlist = ["%s:%d" % (filename, i+1) for i in range(len(linelist))]
|
||||
lexlist = map(lex, pptlist, linelist)
|
||||
IRlist = map(parse_line, pptlist, lexlist)
|
||||
IRlist = [node for node in IRlist if node is not IR.NullNode]
|
||||
return IR.SequenceNode(ppt, IRlist)
|
||||
except IOError:
|
||||
Err.log ("Could not read "+filename)
|
||||
return IR.NullNode
|
||||
|
||||
def parse(filename):
|
||||
"Top level parsing routine, taking a source file name and returning an IR list."
|
||||
global templabelcount
|
||||
templabelcount = 0
|
||||
return parse_file("<Top Level>", filename)
|
||||
|
||||
|
321
src/Ophis/IR.py
321
src/Ophis/IR.py
@ -1,161 +1,160 @@
|
||||
"""P65 Intermediate Representation
|
||||
|
||||
Classes for representing the Intermediate nodes upon which the
|
||||
assembler passes operate."""
|
||||
|
||||
# Copyright 2002 Michael C. Martin.
|
||||
# You may use, modify, and distribute this file under the BSD
|
||||
# license: See LICENSE.txt for details.
|
||||
|
||||
from __future__ import nested_scopes
|
||||
import Ophis.Errors as Err
|
||||
|
||||
class Node:
|
||||
"""The default IR Node
|
||||
Instances of Node always have the three fields ppt(Program Point),
|
||||
nodetype(a string), and data (a list)."""
|
||||
def __init__(self, ppt, nodetype, *data):
|
||||
self.ppt = ppt
|
||||
self.nodetype = nodetype
|
||||
self.data = list(data)
|
||||
def accept(self, asmpass, env=None):
|
||||
"""Implements the Visitor pattern for an assembler pass.
|
||||
Calls the routine 'asmpass.visitTYPE(self, env)' where
|
||||
TYPE is the value of self.nodetype."""
|
||||
Err.currentpoint = self.ppt
|
||||
routine = getattr(asmpass, "visit"+self.nodetype, asmpass.visitUnknown)
|
||||
routine(self, env)
|
||||
def __str__(self):
|
||||
if self.nodetype != "SEQUENCE":
|
||||
return str(self.ppt)+": "+self.nodetype+" - "+" ".join(map(str, self.data))
|
||||
else:
|
||||
return "\n".join(map(str, self.data))
|
||||
def __repr__(self):
|
||||
args = [self.ppt, self.nodetype] + self.data
|
||||
return "Node(" + ", ".join(map(repr, args)) + ")"
|
||||
|
||||
NullNode = Node("<none>", "None")
|
||||
|
||||
def SequenceNode(ppt, nodelist):
|
||||
return Node(ppt, "SEQUENCE", *nodelist)
|
||||
|
||||
class Expr:
|
||||
"""Base class for P65 expressions
|
||||
All expressions have a field called "data" and a boolean field
|
||||
called "hardcoded". An expression is hardcoded if it has no
|
||||
symbolic values in it."""
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
self.hardcoded = 0
|
||||
def __str__(self):
|
||||
return "<UNKNOWN: "+`self.data`+">"
|
||||
def valid(self, env=None, PCvalid=0):
|
||||
"""Returns true if the the expression can be successfully
|
||||
evaluated in the specified environment."""
|
||||
return 0
|
||||
def value(self, env=None):
|
||||
"Evaluates this expression in the given environment."
|
||||
return None
|
||||
|
||||
class ConstantExpr(Expr):
|
||||
"Represents a numeric constant"
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
self.hardcoded = 1
|
||||
def __str__(self):
|
||||
return str(self.data)
|
||||
def valid(self, env=None, PCvalid=0):
|
||||
return 1
|
||||
def value(self, env=None):
|
||||
return self.data
|
||||
|
||||
class LabelExpr(Expr):
|
||||
"Represents a symbolic constant"
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
self.hardcoded = 0
|
||||
def __str__(self):
|
||||
return self.data
|
||||
def valid(self, env=None, PCvalid=0):
|
||||
return (env is not None) and self.data in env
|
||||
def value(self, env=None):
|
||||
return env[self.data]
|
||||
|
||||
class PCExpr(Expr):
|
||||
"Represents the current program counter: ^"
|
||||
def __init__(self):
|
||||
self.hardcoded = 0
|
||||
def __str__(self):
|
||||
return "^"
|
||||
def valid(self, env=None, PCvalid=0):
|
||||
return env is not None and PCvalid
|
||||
def value(self, env=None):
|
||||
return env.getPC()
|
||||
|
||||
class HighByteExpr(Expr):
|
||||
"Represents the expression >{data}"
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
self.hardcoded = data.hardcoded
|
||||
def __str__(self):
|
||||
return ">"+str(self.data)
|
||||
def valid(self, env=None, PCvalid=0):
|
||||
return self.data.valid(env, PCvalid)
|
||||
def value |