// // FlowControl.hpp // Clock Signal // // Created by Thomas Harte on 08/11/2023. // Copyright © 2023 Thomas Harte. All rights reserved. // #pragma once #include "Resolver.hpp" #include "Stack.hpp" #include "../AccessType.hpp" #include namespace InstructionSet::x86::Primitive { template void jump( const bool condition, const IntT displacement, ContextT &context ) { /* IF condition THEN EIP ← EIP + SignExtend(DEST); IF OperandSize = 16 THEN EIP ← EIP AND 0000FFFFH; FI; FI; */ // TODO: proper behaviour in 32-bit. if(condition) { context.flow_controller.template jump(uint16_t(context.registers.ip() + displacement)); } } template void loop( modify_t counter, const OffsetT displacement, ContextT &context ) { --counter; if(counter) { context.flow_controller.template jump(context.registers.ip() + displacement); } } template void loope( modify_t counter, const OffsetT displacement, ContextT &context ) { --counter; if(counter && context.flags.template flag()) { context.flow_controller.template jump(context.registers.ip() + displacement); } } template void loopne( modify_t counter, const OffsetT displacement, ContextT &context ) { --counter; if(counter && !context.flags.template flag()) { context.flow_controller.template jump(context.registers.ip() + displacement); } } template void call_relative( typename std::make_signed::type offset, ContextT &context ) { if constexpr (std::is_same_v) { push(context.registers.ip(), context); context.flow_controller.template jump(AddressT(context.registers.ip() + offset)); } else { assert(false); } } template void call_absolute( read_t target, ContextT &context ) { push(context.registers.ip(), context); context.flow_controller.template jump(AddressT(target)); } template void jump_absolute( read_t target, ContextT &context ) { context.flow_controller.template jump(target); } template void call_far( InstructionT &instruction, ContextT &context ) { // TODO: eliminate 16-bit assumption below. const Source source_segment = instruction.data_segment(); context.memory.preauthorise_stack_write(sizeof(uint16_t) * 2); uint16_t source_address; const auto pointer = instruction.destination(); switch(pointer.source()) { default: case Source::Immediate: push(context.registers.cs(), context); push(context.registers.ip(), context); context.flow_controller.template jump(instruction.segment(), instruction.offset()); return; case Source::Indirect: source_address = uint16_t( address(instruction, pointer, context) ); break; case Source::IndirectNoBase: source_address = uint16_t( address(instruction, pointer, context) ); break; case Source::DirectAddress: source_address = uint16_t( address(instruction, pointer, context) ); break; } context.memory.preauthorise_read(source_segment, source_address, sizeof(uint16_t) * 2); const auto offset = context.memory.template access(source_segment, source_address); source_address += 2; const auto segment = context.memory.template access(source_segment, source_address); // At least on an 8086, the stack writes occur after the target address read. push(context.registers.cs(), context); push(context.registers.ip(), context); context.flow_controller.template jump(segment, offset); } template void jump_far( InstructionT &instruction, ContextT &context ) { // TODO: eliminate 16-bit assumption below. uint16_t source_address = 0; const auto pointer = instruction.destination(); switch(pointer.source()) { default: case Source::Immediate: context.flow_controller.template jump(instruction.segment(), instruction.offset()); return; case Source::Indirect: source_address = uint16_t( address(instruction, pointer, context) ); break; case Source::IndirectNoBase: source_address = uint16_t( address(instruction, pointer, context) ); break; case Source::DirectAddress: source_address = uint16_t( address(instruction, pointer, context) ); break; } const Source source_segment = instruction.data_segment(); context.memory.preauthorise_read(source_segment, source_address, sizeof(uint16_t) * 2); const auto offset = context.memory.template access(source_segment, source_address); source_address += 2; const auto segment = context.memory.template access(source_segment, source_address); context.flow_controller.template jump(segment, offset); } template void iret( ContextT &context ) { // TODO: all modes other than 16-bit real mode. context.memory.preauthorise_stack_read(sizeof(uint16_t) * 3); const auto ip = pop(context); const auto cs = pop(context); context.flags.set(pop(context)); context.flow_controller.template jump(cs, ip); } template void ret_near( const InstructionT instruction, ContextT &context ) { const auto ip = pop(context); context.registers.sp() += instruction.operand(); context.flow_controller.template jump(ip); } template void ret_far( const InstructionT instruction, ContextT &context ) { context.memory.preauthorise_stack_read(sizeof(uint16_t) * 2); const auto ip = pop(context); const auto cs = pop(context); context.registers.sp() += instruction.operand(); context.flow_controller.template jump(cs, ip); } template void into( ContextT &context ) { if(context.flags.template flag()) { interrupt(Interrupt::Overflow, context); } } template void bound( const InstructionT &instruction, read_t destination, read_t source, ContextT &context ) { using sIntT = typename std::make_signed::type; const auto source_segment = instruction.data_segment(); context.memory.preauthorise_read(source_segment, source, 2*sizeof(IntT)); const auto lower_bound = sIntT(context.memory.template access(source_segment, source)); source += 2; const auto upper_bound = sIntT(context.memory.template access(source_segment, source)); if(sIntT(destination) < lower_bound || sIntT(destination) > upper_bound) { interrupt(Interrupt::BoundRangeExceeded, context); } } }