use crate::{ion::data_structures::u64_key, Allocation, PReg};
use smallvec::{smallvec, SmallVec};
use std::fmt::Debug;
pub type MoveVec<T> = SmallVec<[(Allocation, Allocation, T); 16]>;
#[derive(Clone, Debug)]
pub enum MoveVecWithScratch<T> {
NoScratch(MoveVec<T>),
Scratch(MoveVec<T>),
}
pub struct ParallelMoves<T: Clone + Copy + Default> {
parallel_moves: MoveVec<T>,
}
impl<T: Clone + Copy + Default> ParallelMoves<T> {
pub fn new() -> Self {
Self {
parallel_moves: smallvec![],
}
}
pub fn add(&mut self, from: Allocation, to: Allocation, t: T) {
self.parallel_moves.push((from, to, t));
}
fn sources_overlap_dests(&self) -> bool {
for &(_, dst, _) in &self.parallel_moves {
if self
.parallel_moves
.binary_search_by_key(&dst, |&(src, _, _)| src)
.is_ok()
{
return true;
}
}
false
}
pub fn resolve(mut self) -> MoveVecWithScratch<T> {
if self.parallel_moves.len() <= 1 {
return MoveVecWithScratch::NoScratch(self.parallel_moves);
}
self.parallel_moves
.sort_by_key(|&(src, dst, _)| u64_key(src.bits(), dst.bits()));
if !self.sources_overlap_dests() {
return MoveVecWithScratch::NoScratch(self.parallel_moves);
}
self.parallel_moves.sort_by_key(|&(_, dst, _)| dst);
if cfg!(debug_assertions) {
let mut last_dst = None;
for &(_, dst, _) in &self.parallel_moves {
if last_dst.is_some() {
debug_assert!(last_dst.unwrap() != dst);
}
last_dst = Some(dst);
}
}
let mut must_come_before: SmallVec<[Option<usize>; 16]> =
smallvec![None; self.parallel_moves.len()];
for (i, &(src, _, _)) in self.parallel_moves.iter().enumerate() {
if let Ok(move_to_dst_idx) = self
.parallel_moves
.binary_search_by_key(&src, |&(_, dst, _)| dst)
{
must_come_before[i] = Some(move_to_dst_idx);
}
}
let mut ret: MoveVec<T> = smallvec![];
let mut stack: SmallVec<[usize; 16]> = smallvec![];
let mut visited: SmallVec<[bool; 16]> = smallvec![false; self.parallel_moves.len()];
let mut onstack: SmallVec<[bool; 16]> = smallvec![false; self.parallel_moves.len()];
let mut scratch_used = false;
stack.push(0);
onstack[0] = true;
loop {
if stack.is_empty() {
if let Some(next) = visited.iter().position(|&flag| !flag) {
stack.push(next);
onstack[next] = true;
} else {
break;
}
}
let top = *stack.last().unwrap();
visited[top] = true;
match must_come_before[top] {
None => {
ret.push(self.parallel_moves[top]);
onstack[top] = false;
stack.pop();
while let Some(top) = stack.pop() {
ret.push(self.parallel_moves[top]);
onstack[top] = false;
}
}
Some(next) if visited[next] && !onstack[next] => {
ret.push(self.parallel_moves[top]);
onstack[top] = false;
stack.pop();
while let Some(top) = stack.pop() {
ret.push(self.parallel_moves[top]);
onstack[top] = false;
}
}
Some(next) if !visited[next] && !onstack[next] => {
stack.push(next);
onstack[next] = true;
continue;
}
Some(next) => {
let mut last_dst = None;
let mut scratch_src = None;
while let Some(move_idx) = stack.pop() {
onstack[move_idx] = false;
let (mut src, dst, dst_t) = self.parallel_moves[move_idx];
if last_dst.is_none() {
scratch_src = Some(src);
src = Allocation::none();
scratch_used = true;
} else {
debug_assert_eq!(last_dst.unwrap(), src);
}
ret.push((src, dst, dst_t));
last_dst = Some(dst);
if move_idx == next {
break;
}
}
if let Some(src) = scratch_src {
ret.push((src, Allocation::none(), T::default()));
}
}
}
}
ret.reverse();
if scratch_used {
MoveVecWithScratch::Scratch(ret)
} else {
MoveVecWithScratch::NoScratch(ret)
}
}
}
impl<T> MoveVecWithScratch<T> {
pub fn with_scratch(self, scratch: Allocation) -> MoveVec<T> {
match self {
MoveVecWithScratch::NoScratch(moves) => moves,
MoveVecWithScratch::Scratch(mut moves) => {
for (src, dst, _) in &mut moves {
debug_assert!(
*src != scratch && *dst != scratch,
"Scratch register should not also be an actual source or dest of moves"
);
debug_assert!(
!(src.is_none() && dst.is_none()),
"Move resolution should not have produced a scratch-to-scratch move"
);
if src.is_none() {
*src = scratch;
}
if dst.is_none() {
*dst = scratch;
}
}
moves
}
}
}
pub fn without_scratch(self) -> Option<MoveVec<T>> {
match self {
MoveVecWithScratch::NoScratch(moves) => Some(moves),
MoveVecWithScratch::Scratch(..) => None,
}
}
pub fn needs_scratch(&self) -> bool {
match self {
MoveVecWithScratch::NoScratch(..) => false,
MoveVecWithScratch::Scratch(..) => true,
}
}
pub fn stack_to_stack(&self, is_stack_alloc: impl Fn(Allocation) -> bool) -> bool {
match self {
MoveVecWithScratch::NoScratch(moves) | MoveVecWithScratch::Scratch(moves) => moves
.iter()
.any(|&(src, dst, _)| is_stack_alloc(src) && is_stack_alloc(dst)),
}
}
}
pub struct MoveAndScratchResolver<GetReg, GetStackSlot, IsStackAlloc>
where
GetReg: FnMut() -> Option<Allocation>,
GetStackSlot: FnMut() -> Allocation,
IsStackAlloc: Fn(Allocation) -> bool,
{
stack_stack_scratch_reg: Option<Allocation>,
stack_stack_scratch_reg_save: Option<Allocation>,
find_free_reg: GetReg,
get_stackslot: GetStackSlot,
is_stack_alloc: IsStackAlloc,
victim: PReg,
}
impl<GetReg, GetStackSlot, IsStackAlloc> MoveAndScratchResolver<GetReg, GetStackSlot, IsStackAlloc>
where
GetReg: FnMut() -> Option<Allocation>,
GetStackSlot: FnMut() -> Allocation,
IsStackAlloc: Fn(Allocation) -> bool,
{
pub fn new(
find_free_reg: GetReg,
get_stackslot: GetStackSlot,
is_stack_alloc: IsStackAlloc,
victim: PReg,
) -> Self {
Self {
stack_stack_scratch_reg: None,
stack_stack_scratch_reg_save: None,
find_free_reg,
get_stackslot,
is_stack_alloc,
victim,
}
}
pub fn compute<T: Debug + Copy>(mut self, moves: MoveVecWithScratch<T>) -> MoveVec<T> {
if !moves.needs_scratch() && !moves.stack_to_stack(&self.is_stack_alloc) {
return moves.without_scratch().unwrap();
}
let mut result = smallvec![];
let scratch = (self.find_free_reg)().unwrap_or_else(|| (self.get_stackslot)());
trace!("scratch resolver: scratch alloc {:?}", scratch);
let moves = moves.with_scratch(scratch);
for &(src, dst, data) in &moves {
if (self.is_stack_alloc)(src) && (self.is_stack_alloc)(dst) {
trace!("scratch resolver: stack to stack: {:?} -> {:?}", src, dst);
if self.stack_stack_scratch_reg.is_none() {
if let Some(reg) = (self.find_free_reg)() {
trace!(
"scratch resolver: have free stack-to-stack scratch preg: {:?}",
reg
);
self.stack_stack_scratch_reg = Some(reg);
} else {
self.stack_stack_scratch_reg = Some(Allocation::reg(self.victim));
self.stack_stack_scratch_reg_save = Some((self.get_stackslot)());
trace!("scratch resolver: stack-to-stack using victim {:?} with save stackslot {:?}",
self.stack_stack_scratch_reg,
self.stack_stack_scratch_reg_save);
}
}
if self.stack_stack_scratch_reg_save.is_none() {
result.push((src, self.stack_stack_scratch_reg.unwrap(), data));
result.push((self.stack_stack_scratch_reg.unwrap(), dst, data));
}
else {
result.push((
self.stack_stack_scratch_reg.unwrap(),
self.stack_stack_scratch_reg_save.unwrap(),
data,
));
result.push((src, self.stack_stack_scratch_reg.unwrap(), data));
result.push((self.stack_stack_scratch_reg.unwrap(), dst, data));
result.push((
self.stack_stack_scratch_reg_save.unwrap(),
self.stack_stack_scratch_reg.unwrap(),
data,
));
}
} else {
result.push((src, dst, data));
}
}
trace!("scratch resolver: got {:?}", result);
result
}
}