Unverified Commit ac9203df by Enkelmann Committed by GitHub

better Subpiece handling for IntervalDomain (#178)

parent 11782c04
......@@ -259,6 +259,113 @@ impl IntervalDomain {
min.checked_sle(&self.interval.start).unwrap()
&& max.checked_sge(&self.interval.end).unwrap()
}
/// Truncate the bitvectors in the interval domain
/// by removing the least significant bytes lower than the `low_byte` from them.
///
/// The widening delay also is right shifted by the corresponding number of bits.
fn subpiece_higher(self, low_byte: ByteSize) -> Self {
let old_size = self.bytesize();
let interval = self.interval.subpiece_higher(low_byte);
let mut lower_bound = None;
if let Some(bound) = self.widening_lower_bound {
let bound = bound.subpiece(low_byte, old_size - low_byte);
if bound.checked_slt(&interval.start).unwrap() {
lower_bound = Some(bound);
}
}
let mut upper_bound = None;
if let Some(bound) = self.widening_upper_bound {
let bound = bound.subpiece(low_byte, old_size - low_byte);
if bound.checked_sgt(&interval.end).unwrap() {
upper_bound = Some(bound);
}
}
IntervalDomain {
interval,
widening_lower_bound: lower_bound,
widening_upper_bound: upper_bound,
widening_delay: self.widening_delay >> low_byte.as_bit_length(),
}
}
/// Truncate the bitvectors in the interval to `size`,
/// i.e. the most significant bytes (higher than `size`) are removed from all values.
fn subpiece_lower(self, size: ByteSize) -> Self {
let max_length = Bitvector::unsigned_max_value(size.into())
.into_zero_extend(self.bytesize())
.unwrap();
let truncated_interval = self.interval.clone().subpiece_lower(size);
let mut lower_bound = None;
if let Some(bound) = self.widening_lower_bound {
if (self.interval.start - &bound)
.checked_ult(&max_length)
.unwrap()
{
let truncated_bound = bound.subpiece(ByteSize::new(0), size);
if truncated_bound
.checked_slt(&truncated_interval.start)
.unwrap()
{
lower_bound = Some(truncated_bound);
}
}
}
let mut upper_bound = None;
if let Some(bound) = self.widening_upper_bound {
if (bound.clone() - &self.interval.end)
.checked_ult(&max_length)
.unwrap()
{
let truncated_bound = bound.subpiece(ByteSize::new(0), size);
if truncated_bound
.checked_sgt(&truncated_interval.end)
.unwrap()
{
upper_bound = Some(truncated_bound);
}
}
}
IntervalDomain {
interval: truncated_interval,
widening_lower_bound: lower_bound,
widening_upper_bound: upper_bound,
widening_delay: self.widening_delay,
}
}
/// Piece two interval domains together, where `self` contains the most signifcant bytes
/// and `other` contains the least significant bytes of the resulting values.
///
/// The result retains the widening bounds of `other` if self contains only one value.
/// Else the result has no widening bounds.
fn piece(&self, other: &IntervalDomain) -> IntervalDomain {
let pieced_interval = self.interval.piece(&other.interval);
let mut lower_bound = None;
let mut upper_bound = None;
let mut widening_delay = 0;
if let Ok(upper_piece) = self.try_to_bitvec() {
if let Some(bound) = &other.widening_lower_bound {
let pieced_bound = upper_piece.bin_op(BinOpType::Piece, bound).unwrap();
if pieced_bound.checked_slt(&pieced_interval.start).unwrap() {
lower_bound = Some(pieced_bound);
}
}
if let Some(bound) = &other.widening_upper_bound {
let pieced_bound = upper_piece.bin_op(BinOpType::Piece, bound).unwrap();
if pieced_bound.checked_sgt(&pieced_interval.end).unwrap() {
upper_bound = Some(pieced_bound);
}
}
widening_delay = other.widening_delay;
}
IntervalDomain {
interval: self.interval.piece(&other.interval),
widening_lower_bound: lower_bound,
widening_upper_bound: upper_bound,
widening_delay,
}
}
}
impl SpecializeByConditional for IntervalDomain {
......@@ -404,7 +511,7 @@ impl RegisterDomain for IntervalDomain {
fn bin_op(&self, op: BinOpType, rhs: &Self) -> Self {
use BinOpType::*;
match op {
Piece | IntEqual | IntNotEqual | IntLess | IntSLess | IntLessEqual | IntSLessEqual
IntEqual | IntNotEqual | IntLess | IntSLess | IntLessEqual | IntSLessEqual
| IntCarry | IntSCarry | IntSBorrow | IntAnd | IntOr | IntXOr | IntRight
| IntSRight | IntDiv | IntSDiv | IntRem | IntSRem | BoolAnd | BoolOr | BoolXOr
| FloatEqual | FloatNotEqual | FloatLess | FloatLessEqual | FloatAdd | FloatSub
......@@ -427,6 +534,7 @@ impl RegisterDomain for IntervalDomain {
widening_delay: std::cmp::max(self.widening_delay, rhs.widening_delay),
}
}
Piece => self.piece(rhs),
IntAdd => self.add(rhs),
IntSub => self.sub(rhs),
IntMult => self.signed_mul(rhs),
......@@ -482,35 +590,15 @@ impl RegisterDomain for IntervalDomain {
}
/// Take a sub-bitvector of the values in the interval domain.
///
/// If `low_byte` is not zero, the result will generally be not exact.
fn subpiece(&self, low_byte: ByteSize, size: ByteSize) -> Self {
let new_interval = self.interval.clone().subpiece(low_byte, size);
let (mut new_lower_bound, mut new_upper_bound) = (None, None);
if low_byte == ByteSize::new(0) {
if let (Some(lower_bound), Some(upper_bound)) =
(&self.widening_lower_bound, &self.widening_upper_bound)
{
let new_min = Bitvector::signed_min_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
let new_max = Bitvector::signed_max_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
if lower_bound.checked_sge(&new_min).unwrap()
&& upper_bound.checked_sle(&new_max).unwrap()
{
new_lower_bound = Some(lower_bound.clone().into_truncate(size).unwrap());
new_upper_bound = Some(upper_bound.clone().into_truncate(size).unwrap());
}
}
let mut interval_domain = self.clone();
if low_byte != ByteSize::new(0) {
interval_domain = interval_domain.subpiece_higher(low_byte);
}
IntervalDomain {
interval: new_interval,
widening_lower_bound: new_lower_bound,
widening_upper_bound: new_upper_bound,
widening_delay: self.widening_delay,
if interval_domain.bytesize() > size {
interval_domain = interval_domain.subpiece_lower(size);
}
interval_domain
}
/// Compute the result of a cast operation on the interval domain.
......
......@@ -97,31 +97,68 @@ impl Interval {
}
}
/// Truncate the bitvectors in the interval
/// by removing the least significant bytes lower than the `low_byte` from them.
pub fn subpiece_higher(self, low_byte: ByteSize) -> Self {
Interval {
start: self.start.subpiece(low_byte, self.bytesize() - low_byte),
end: self.end.subpiece(low_byte, self.bytesize() - low_byte),
}
}
/// Truncate the bitvectors in the interval to `size`,
/// i.e. the most significant bytes (higher than `size`) are removed from all values.
pub fn subpiece_lower(self, size: ByteSize) -> Self {
let length = self.length();
if !length.is_zero()
&& length
.checked_ule(
&Bitvector::unsigned_max_value(size.into())
.into_zero_extend(self.bytesize())
.unwrap(),
)
.unwrap()
{
let start = self.start.into_truncate(size).unwrap();
let end = self.end.into_truncate(size).unwrap();
if start.checked_sle(&end).unwrap() {
return Interval { start, end };
}
}
Self::new_top(size)
}
/// Take a subpiece of the bitvectors.
///
/// The function only tries to be exact if the interval contains exactly one value
/// or if the `low_byte` is zero.
pub fn subpiece(self, low_byte: ByteSize, size: ByteSize) -> Self {
if self.start == self.end {
self.start.subpiece(low_byte, size).into()
} else if low_byte == ByteSize::new(0) {
let new_min = Bitvector::signed_min_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
let new_max = Bitvector::signed_max_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
if self.start.checked_sge(&new_min).unwrap() && self.end.checked_sle(&new_max).unwrap()
{
Interval {
start: self.start.into_truncate(size).unwrap(),
end: self.end.into_truncate(size).unwrap(),
}
} else {
Interval::new_top(size)
pub fn subpiece(mut self, low_byte: ByteSize, size: ByteSize) -> Self {
if low_byte != ByteSize::new(0) {
self = self.subpiece_higher(low_byte);
}
if self.bytesize() > size {
self = self.subpiece_lower(size);
}
self
}
/// Piece two intervals together, where `self` contains the most signifcant bytes
/// and `other` contains the least significant bytes of the resulting values.
pub fn piece(&self, other: &Interval) -> Self {
if other.start.sign_bit().to_bool() && !other.end.sign_bit().to_bool() {
// The `other` interval contains both -1 and 0.
Interval {
start: self
.start
.bin_op(BinOpType::Piece, &Bitvector::zero(other.start.width()))
.unwrap(),
end: self
.end
.bin_op(BinOpType::Piece, &(-Bitvector::one(other.end.width())))
.unwrap(),
}
} else {
Interval::new_top(size)
Interval {
start: self.start.bin_op(BinOpType::Piece, &other.start).unwrap(),
end: self.end.bin_op(BinOpType::Piece, &other.end).unwrap(),
}
}
}
......
......@@ -187,7 +187,7 @@ fn subpiece() {
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(
subpieced_val,
IntervalDomain::mock_i8_with_bounds(None, -3, 5, None)
IntervalDomain::mock_i8_with_bounds(None, -3, 5, Some(10))
);
let val = IntervalDomain::mock_with_bounds(Some(-30), 2, 5, Some(10));
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
......@@ -199,20 +199,64 @@ fn subpiece() {
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(
subpieced_val,
IntervalDomain::mock_i8_with_bounds(None, 2, 5, None)
IntervalDomain::mock_i8_with_bounds(None, 2, 5, Some(10))
);
let val = IntervalDomain::mock_with_bounds(Some(-30), 2, 567, Some(777));
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(subpieced_val, IntervalDomain::new_top(ByteSize::new(1)));
let val = IntervalDomain::mock_with_bounds(Some(-30), 2, 3, Some(777));
let val = IntervalDomain::mock_with_bounds(Some(-30), 2, 3, Some(1024));
let subpieced_val = val.subpiece(ByteSize::new(1), ByteSize::new(1));
assert_eq!(subpieced_val, IntervalDomain::new_top(ByteSize::new(1)));
let val = IntervalDomain::mock_with_bounds(Some(-30), 512, 512, Some(777));
assert_eq!(
subpieced_val,
IntervalDomain::mock_i8_with_bounds(Some(-1), 0, 0, Some(4))
);
let val = IntervalDomain::mock_with_bounds(Some(-30), 512, 512, Some(1025));
let subpieced_val = val.subpiece(ByteSize::new(1), ByteSize::new(1));
assert_eq!(
subpieced_val,
IntervalDomain::mock_i8_with_bounds(None, 2, 2, None)
IntervalDomain::mock_i8_with_bounds(Some(-1), 2, 2, Some(4))
);
let val = IntervalDomain::mock_with_bounds(Some(-30), 120, 130, Some(1024));
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(subpieced_val, IntervalDomain::new_top(ByteSize::new(1)));
let val = IntervalDomain::mock_with_bounds(Some(-30), 250, 260, Some(1024));
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(subpieced_val, IntervalDomain::mock_i8(-6, 4));
let val = IntervalDomain::mock_with_bounds(Some(110), 376, 376, Some(390));
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(
subpieced_val,
IntervalDomain::mock_i8_with_bounds(None, 120, 120, None)
);
}
#[test]
fn piece() {
let higher_half = IntervalDomain::mock_i8_with_bounds(None, 0, 0, None);
let lower_half = IntervalDomain::mock_i8_with_bounds(Some(0), 3, 5, Some(10));
let result = higher_half.piece(&lower_half).sign_extend(ByteSize::new(8));
assert_eq!(
result,
IntervalDomain::mock_with_bounds(Some(0), 3, 5, Some(10))
);
let higher_half = IntervalDomain::mock_i8_with_bounds(None, 0, 0, None);
let lower_half = IntervalDomain::mock_i8_with_bounds(Some(-1), 3, 5, Some(10));
let result = higher_half.piece(&lower_half).sign_extend(ByteSize::new(8));
assert_eq!(
result,
IntervalDomain::mock_with_bounds(None, 3, 5, Some(10))
);
let higher_half = IntervalDomain::mock_i8_with_bounds(Some(-30), 1, 2, Some(30));
let lower_half = IntervalDomain::mock_i8_with_bounds(Some(0), 3, 5, Some(10));
let result = higher_half.piece(&lower_half).sign_extend(ByteSize::new(8));
assert_eq!(
result,
IntervalDomain::mock_with_bounds(None, 259, 517, None)
);
let higher_half = IntervalDomain::mock_i8_with_bounds(None, 0, 1, None);
let lower_half = IntervalDomain::mock_i8_with_bounds(Some(-10), -5, 5, Some(10));
let result = higher_half.piece(&lower_half).sign_extend(ByteSize::new(8));
assert_eq!(result, IntervalDomain::mock_with_bounds(None, 0, 511, None));
}
#[test]
......
......@@ -198,16 +198,15 @@ impl<'a, T: Context<'a>> GeneralFPContext for GeneralizedContext<'a, T> {
_ => panic!("Malformed Control flow graph"),
};
let call_term = &call_block.term.jmps[0];
match self.context.update_callsite(
interprocedural_flow.as_ref(),
call_stub.as_ref(),
caller_sub,
call_term,
return_term,
) {
Some(val) => Some(NodeValue::Value(val)),
None => None,
}
self.context
.update_callsite(
interprocedural_flow.as_ref(),
call_stub.as_ref(),
caller_sub,
call_term,
return_term,
)
.map(NodeValue::Value)
}
},
Edge::ExternCallStub(call) => self
......
......@@ -187,15 +187,14 @@ impl<'a, T: Context<'a>> GeneralFPContext for GeneralizedContext<'a, T> {
_ => panic!("Malformed Control flow graph"),
};
let return_from_jmp = &return_from_block.term.jmps[0];
match self.context.update_return(
interprocedural_flow.as_ref(),
call_stub.as_ref(),
call_term,
return_from_jmp,
) {
Some(val) => Some(NodeValue::Value(val)),
None => None,
}
self.context
.update_return(
interprocedural_flow.as_ref(),
call_stub.as_ref(),
call_term,
return_from_jmp,
)
.map(NodeValue::Value)
}
},
Edge::ExternCallStub(call) => self
......
......@@ -202,11 +202,9 @@ fn get_entry_sub_to_entry_node_map(
entry_sub_to_entry_blocks_map
.into_iter()
.filter_map(|((sub_tid, name), block_tid)| {
if let Some(start_node_index) = tid_to_graph_indices_map.get(&(block_tid, sub_tid)) {
Some((name, *start_node_index))
} else {
None
}
tid_to_graph_indices_map
.get(&(block_tid, sub_tid))
.map(|start_node_index| (name, *start_node_index))
})
.collect()
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment