Unverified Commit 4bdf6504 by Enkelmann Committed by GitHub

add a widening delay to IntervalDomain (#166)

parent 2ba249af
......@@ -26,6 +26,9 @@ pub struct IntervalDomain {
widening_upper_bound: Option<Bitvector>,
/// An upper bound for widening operations.
widening_lower_bound: Option<Bitvector>,
/// A delay counter to prevent unnecessary widenings.
/// See the [`IntervalDomain::signed_merge_and_widen`] method for its usage in the widening strategy.
widening_delay: u64,
}
impl From<Interval> for IntervalDomain {
......@@ -35,6 +38,7 @@ impl From<Interval> for IntervalDomain {
interval,
widening_lower_bound: None,
widening_upper_bound: None,
widening_delay: 0,
}
}
}
......@@ -49,6 +53,7 @@ impl IntervalDomain {
interval: Interval::new(start, end),
widening_upper_bound: None,
widening_lower_bound: None,
widening_delay: 0,
}
}
......@@ -99,22 +104,37 @@ impl IntervalDomain {
merged_domain.update_widening_lower_bound(&other.widening_lower_bound);
merged_domain.update_widening_upper_bound(&self.widening_upper_bound);
merged_domain.update_widening_upper_bound(&other.widening_upper_bound);
merged_domain.widening_delay = std::cmp::max(self.widening_delay, other.widening_delay);
merged_domain
}
/// Merge as signed intervals and perform widening if necessary.
///
/// No widening is performed for very small intervals (currently set to interval lengths not greater than 2)
/// or if the merged interval (as value set) equals one of the input intervals.
/// In all other cases widening is performed after merging the underlying intervals.
/// ## Widening Strategy
///
/// ### Widening Strategy
/// ### The widening delay
///
/// Each interval has a `widening_delay` counter,
/// which denotes the length of the interval after the last time that widening was performed.
/// For operations with more than one input,
/// the widening delay is set to the maximum of the input widening delays.
/// The only exception to this is the [`IntervalDomain::intersect()`] method,
/// which may lower the value of the widening delay.
///
/// ### When to widen
///
/// If the merged interval equals one of the input intervals as value sets, do not perform widening.
/// Else widening is performed if and only if the length of the interval is greater than `widening_delay + 2`.
///
/// ### How to widen
///
/// If no suitable widening bounds for widening exist, widen to the `Top` value.
/// If exactly one widening bound exists, widen up to the bound,
/// but do not perform widening in the other direction of the interval.
/// If widening bounds for both directions exist, widen up to the bounds in both directions.
///
/// After that the `widening_delay` is set to the length of the resulting interval.
pub fn signed_merge_and_widen(&self, other: &IntervalDomain) -> IntervalDomain {
let mut merged_domain = self.signed_merge(other);
if merged_domain.equal_as_value_sets(self) || merged_domain.equal_as_value_sets(other) {
......@@ -122,8 +142,11 @@ impl IntervalDomain {
return merged_domain;
}
if let Ok(length) = merged_domain.interval.length().try_to_u64() {
if length <= 2 {
// Do not widen for very small intervals or for already unconstrained intervals (case length() returning zero).
if length <= merged_domain.widening_delay + 2 {
// Do not widen for already unconstrained intervals (case length() returning zero)
// or if the interval length is not larger than `widening_delay + 2`.
// FIXME: `widening_delay + 2` may overflow.
// But such a large delay is probably incorrect anyway, so this should not cause unnecessary widenings.
return merged_domain;
}
}
......@@ -143,6 +166,8 @@ impl IntervalDomain {
has_been_widened = true;
}
if has_been_widened {
merged_domain.widening_delay =
merged_domain.interval.length().try_to_u64().unwrap_or(0);
merged_domain
} else {
// No widening bounds could be used for widening, so we have to widen to the `Top` value.
......@@ -162,7 +187,7 @@ impl IntervalDomain {
}
_ => None,
};
let mut upper_bound = match self.widening_upper_bound {
let upper_bound = match self.widening_upper_bound {
Some(bound)
if (bound.sign_bit().to_bool() == self.interval.end.sign_bit().to_bool())
&& (self.interval.start.sign_bit().to_bool()
......@@ -172,38 +197,18 @@ impl IntervalDomain {
}
_ => None,
};
let old_width = self.interval.start.width();
let new_interval = self.interval.zero_extend(width);
if upper_bound.is_none() {
let max_val = Bitvector::unsigned_max_value(old_width)
.into_zero_extend(width)
.unwrap();
if new_interval.end.checked_ult(&max_val).unwrap() {
upper_bound = Some(max_val);
}
}
IntervalDomain {
interval: new_interval,
widening_lower_bound: lower_bound,
widening_upper_bound: upper_bound,
widening_delay: self.widening_delay,
}
}
/// Sign-extend the values in the interval to the given width.
pub fn sign_extend(mut self, width: ByteSize) -> Self {
pub fn sign_extend(self, width: ByteSize) -> Self {
assert!(self.bytesize() <= width);
if self.widening_lower_bound.is_none() {
let min_val = Bitvector::signed_min_value(self.interval.start.width());
if min_val.checked_slt(&self.interval.start).unwrap() {
self.widening_lower_bound = Some(min_val);
}
}
if self.widening_upper_bound.is_none() {
let max_val = Bitvector::signed_max_value(self.interval.end.width());
if max_val.checked_sgt(&self.interval.end).unwrap() {
self.widening_upper_bound = Some(max_val);
}
}
IntervalDomain {
interval: Interval {
start: self.interval.start.clone().into_sign_extend(width).unwrap(),
......@@ -215,6 +220,7 @@ impl IntervalDomain {
widening_upper_bound: self
.widening_upper_bound
.map(|bitvec| bitvec.into_sign_extend(width).unwrap()),
widening_delay: self.widening_delay,
}
}
......@@ -227,8 +233,32 @@ impl IntervalDomain {
intersected_domain.update_widening_lower_bound(&other.widening_lower_bound);
intersected_domain.update_widening_upper_bound(&self.widening_upper_bound);
intersected_domain.update_widening_upper_bound(&other.widening_upper_bound);
intersected_domain.widening_delay =
std::cmp::max(self.widening_delay, other.widening_delay);
if let Ok(interval_length) = intersected_domain.interval.length().try_to_u64() {
intersected_domain.widening_delay =
std::cmp::min(intersected_domain.widening_delay, interval_length);
}
Ok(intersected_domain)
}
/// Check whether all values in the interval are representable by bitvectors of the given `size`.
/// Does not check whether this is also true for the widening hints.
pub fn fits_into_size(&self, size: ByteSize) -> bool {
if size >= self.bytesize() {
return true;
}
let min = Bitvector::signed_min_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
let max = Bitvector::signed_max_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
min.checked_sle(&self.interval.start).unwrap()
&& max.checked_sge(&self.interval.end).unwrap()
}
}
impl SpecializeByConditional for IntervalDomain {
......@@ -354,6 +384,7 @@ impl SizedDomain for IntervalDomain {
},
widening_lower_bound: None,
widening_upper_bound: None,
widening_delay: 0,
}
}
}
......@@ -393,6 +424,7 @@ impl RegisterDomain for IntervalDomain {
interval: new_interval,
widening_lower_bound: None,
widening_upper_bound: None,
widening_delay: std::cmp::max(self.widening_delay, rhs.widening_delay),
}
}
IntAdd => self.add(rhs),
......@@ -422,12 +454,14 @@ impl RegisterDomain for IntervalDomain {
interval,
widening_lower_bound: new_lower_bound,
widening_upper_bound: new_upper_bound,
widening_delay: self.widening_delay,
}
}
IntNegate => IntervalDomain {
interval: self.interval.clone().bitwise_not(),
widening_lower_bound: None,
widening_upper_bound: None,
widening_delay: self.widening_delay,
},
BoolNegate => {
if self.interval.start == self.interval.end {
......@@ -475,6 +509,7 @@ impl RegisterDomain for IntervalDomain {
interval: new_interval,
widening_lower_bound: new_lower_bound,
widening_upper_bound: new_upper_bound,
widening_delay: self.widening_delay,
}
}
......@@ -541,6 +576,7 @@ impl From<Bitvector> for IntervalDomain {
interval: bitvec.into(),
widening_lower_bound: None,
widening_upper_bound: None,
widening_delay: 0,
}
}
}
......@@ -574,7 +610,7 @@ impl Display for IntervalDomain {
} else if self.interval.start == self.interval.end {
write!(
f,
"{:016x}:i{}",
"0x{:016x}:i{}",
apint::Int::from(self.interval.start.clone()),
self.bytesize().as_bit_length()
)
......@@ -583,7 +619,7 @@ impl Display for IntervalDomain {
let end_int = apint::Int::from(self.interval.end.clone());
write!(
f,
"[{:016x}, {:016x}]:i{}",
"[0x{:016x}, 0x{:016x}]:i{}",
start_int,
end_int,
self.bytesize().as_bit_length()
......
......@@ -8,6 +8,7 @@ impl IntervalDomain {
if interval.is_top() {
interval
} else {
interval.widening_delay = std::cmp::max(self.widening_delay, rhs.widening_delay);
interval.update_widening_lower_bound(
&self
.widening_lower_bound
......@@ -41,6 +42,7 @@ impl IntervalDomain {
if interval.is_top() {
interval
} else {
interval.widening_delay = std::cmp::max(self.widening_delay, rhs.widening_delay);
interval.update_widening_lower_bound(
&self
.widening_lower_bound
......@@ -131,6 +133,7 @@ impl IntervalDomain {
interval,
widening_lower_bound: lower_bound,
widening_upper_bound: upper_bound,
widening_delay: std::cmp::max(self.widening_delay, rhs.widening_delay),
}
}
}
......
......@@ -34,6 +34,13 @@ impl IntervalDomain {
domain.update_widening_upper_bound(&upper_bound.map(|b| Bitvector::from_i8(b)));
domain
}
/// Set the widening delay to the interval length
/// to simulate that `self` was just widened.
pub fn as_freshly_widened(mut self) -> Self {
self.widening_delay = self.interval.length().try_to_u64().unwrap();
self
}
}
#[test]
......@@ -43,7 +50,7 @@ fn signed_merge() {
let b = IntervalDomain::mock_with_bounds(None, 2, 5, None);
assert_eq!(
a.merge(&b),
IntervalDomain::mock_with_bounds(None, 0, 10, None)
IntervalDomain::mock_with_bounds(None, 0, 10, None).as_freshly_widened()
);
let a = IntervalDomain::mock_with_bounds(Some(-3), 1, 1, None);
let b = IntervalDomain::mock_with_bounds(None, 2, 2, Some(5));
......@@ -55,7 +62,7 @@ fn signed_merge() {
let b = IntervalDomain::mock_with_bounds(None, 3, 3, Some(5));
assert_eq!(
a.merge(&b),
IntervalDomain::mock_with_bounds(None, -3, 5, None)
IntervalDomain::mock_with_bounds(None, -3, 5, None).as_freshly_widened()
);
let a = IntervalDomain::mock_with_bounds(None, 1, 5, None);
let b = IntervalDomain::mock_with_bounds(None, -1, -1, Some(5));
......@@ -78,10 +85,16 @@ fn signed_merge() {
assert_eq!(var, IntervalDomain::mock_with_bounds(None, 0, 1, Some(99)));
let update = IntervalDomain::mock_with_bounds(None, 1, 2, Some(99));
var = var.merge(&update);
assert_eq!(var, IntervalDomain::mock_with_bounds(None, 0, 99, None));
assert_eq!(
var,
IntervalDomain::mock_with_bounds(None, 0, 99, None).as_freshly_widened()
);
let update = IntervalDomain::mock_with_bounds(None, 1, 99, None);
var = var.merge(&update);
assert_eq!(var, IntervalDomain::mock_with_bounds(None, 0, 99, None));
assert_eq!(
var,
IntervalDomain::mock_with_bounds(None, 0, 99, None).as_freshly_widened()
);
// Widening process corresponding to a loop counter variable with bound in the wrong direction
let mut var = IntervalDomain::mock(0, 0);
......@@ -90,10 +103,15 @@ fn signed_merge() {
assert_eq!(var, IntervalDomain::mock_with_bounds(Some(-3), 0, 1, None));
let update = IntervalDomain::mock_with_bounds(Some(-3), 1, 2, None);
var = var.merge(&update);
assert_eq!(var, IntervalDomain::mock_with_bounds(None, -3, 2, None));
assert_eq!(
var,
IntervalDomain::mock_with_bounds(None, -3, 2, None).as_freshly_widened()
);
let update = IntervalDomain::mock_with_bounds(Some(-3), -2, 3, None);
var = var.merge(&update);
assert_eq!(var, IntervalDomain::new_top(ByteSize::new(8)));
let mut expected_result = IntervalDomain::mock_with_bounds(None, -3, 3, None);
expected_result.widening_delay = 6;
assert_eq!(var, expected_result);
}
#[test]
......@@ -127,7 +145,7 @@ fn cast_zero_and_signed_extend() {
let extended_val = val.cast(CastOpType::IntZExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(236), 246, 251, Some(255))
IntervalDomain::mock_with_bounds(Some(236), 246, 251, None)
);
// Sign extend
......@@ -153,7 +171,7 @@ fn cast_zero_and_signed_extend() {
let extended_val = val.cast(CastOpType::IntSExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(-128), -10, -5, Some(127))
IntervalDomain::mock_with_bounds(None, -10, -5, None)
);
let val = IntervalDomain::mock_i8_with_bounds(Some(-20), -10, -5, Some(3));
let extended_val = val.cast(CastOpType::IntSExt, ByteSize::new(8));
......@@ -490,6 +508,13 @@ fn add_not_equal_bounds() {
let interval = IntervalDomain::mock(5, 6);
let x = interval.add_not_equal_bound(&Bitvector::from_i64(5));
assert_eq!(x.unwrap(), IntervalDomain::mock(6, 6));
let interval = IntervalDomain::mock_with_bounds(None, 5, 6, Some(100));
let x = interval.add_not_equal_bound(&Bitvector::from_i64(10));
assert_eq!(
x.unwrap(),
IntervalDomain::mock_with_bounds(None, 5, 6, Some(9))
);
}
#[test]
......@@ -505,6 +530,14 @@ fn intersection() {
}
#[test]
fn fits_into_size() {
let interval = IntervalDomain::mock_with_bounds(Some(-300), -10, 10, Some(100));
assert!(interval.fits_into_size(ByteSize::new(1)));
let interval = IntervalDomain::mock_with_bounds(Some(-300), -128, 128, None);
assert!(!interval.fits_into_size(ByteSize::new(1)));
}
#[test]
fn float_nan_bytesize() {
let top_value = IntervalDomain::new_top(ByteSize::new(8));
let result = top_value.un_op(UnOpType::FloatNaN);
......
......@@ -351,15 +351,19 @@ impl State {
Ok(())
} else if let Expression::BinOp { op, lhs, rhs } = expression {
self.specialize_by_binop_expression_result(op, lhs, rhs, result)
} else if let Ok(result_bitvec) = result.try_to_bitvec() {
} else {
match expression {
Expression::Var(_) => panic!(),
Expression::Const(input_bitvec) => {
if let Ok(result_bitvec) = result.try_to_bitvec() {
if *input_bitvec == result_bitvec {
Ok(())
} else {
Err(anyhow!("Unsatisfiable state"))
}
} else {
Ok(())
}
}
Expression::BinOp { .. } => {
panic!() // Already handled above
......@@ -385,12 +389,26 @@ impl State {
description: _,
size: _,
} => Ok(()),
Expression::Subpiece { .. } => Ok(()),
Expression::Subpiece {
low_byte,
size,
arg,
} => {
if *low_byte == ByteSize::new(0) {
if let Data::Value(arg_value) = self.eval(expression) {
if arg_value.fits_into_size(*size) {
let intermediate_result =
result.cast(CastOpType::IntSExt, arg.bytesize());
return self
.specialize_by_expression_result(arg, intermediate_result);
}
}
}
} else {
Ok(())
}
}
}
}
/// Try to restrict the input variables of the given binary operation
/// so that it only evaluates to the given `result_bitvec`.
......@@ -404,25 +422,25 @@ impl State {
match op {
BinOpType::IntAdd => {
if let Ok(bitvec) = self.eval(lhs).try_to_bitvec() {
let intermediate_result = result.clone() - bitvec.into();
self.specialize_by_expression_result(rhs, intermediate_result)?;
}
if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
let intermediate_result = result - bitvec.into();
return self.specialize_by_expression_result(rhs, intermediate_result);
} else if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
let intermediate_result = result - bitvec.into();
return self.specialize_by_expression_result(lhs, intermediate_result);
} else {
return Ok(());
self.specialize_by_expression_result(lhs, intermediate_result)?;
}
return Ok(());
}
BinOpType::IntSub => {
if let Ok(bitvec) = self.eval(lhs).try_to_bitvec() {
let intermediate_result: Data = Data::from(bitvec) - result;
return self.specialize_by_expression_result(rhs, intermediate_result);
} else if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
let intermediate_result: Data = Data::from(bitvec) - result.clone();
self.specialize_by_expression_result(rhs, intermediate_result)?;
}
if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
let intermediate_result = result + bitvec.into();
return self.specialize_by_expression_result(lhs, intermediate_result);
} else {
return Ok(());
self.specialize_by_expression_result(lhs, intermediate_result)?;
}
return Ok(());
}
_ => (),
}
......@@ -430,12 +448,18 @@ impl State {
match op {
BinOpType::IntXOr | BinOpType::BoolXOr => {
if let Ok(bitvec) = self.eval(lhs).try_to_bitvec() {
self.specialize_by_expression_result(rhs, (result_bitvec ^ &bitvec).into())
} else if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
self.specialize_by_expression_result(lhs, (result_bitvec ^ &bitvec).into())
} else {
Ok(())
self.specialize_by_expression_result(
rhs,
(result_bitvec.clone() ^ &bitvec).into(),
)?;
}
if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
self.specialize_by_expression_result(
lhs,
(result_bitvec ^ &bitvec).into(),
)?;
}
Ok(())
}
BinOpType::IntOr | BinOpType::BoolOr => {
if result_bitvec.is_zero() {
......@@ -482,24 +506,24 @@ impl State {
(BinOpType::IntEqual, true) | (BinOpType::IntNotEqual, false) => {
// lhs == rhs
if let Ok(bitvec) = self.eval(lhs).try_to_bitvec() {
self.specialize_by_expression_result(rhs, bitvec.into())
} else if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
self.specialize_by_expression_result(lhs, bitvec.into())
} else {
Ok(())
self.specialize_by_expression_result(rhs, bitvec.into())?;
}
if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
self.specialize_by_expression_result(lhs, bitvec.into())?;
}
Ok(())
}
(BinOpType::IntEqual, false) | (BinOpType::IntNotEqual, true) => {
// lhs != rhs
if let Ok(bitvec) = self.eval(lhs).try_to_bitvec() {
let new_result = self.eval(rhs).add_not_equal_bound(&bitvec)?;
self.specialize_by_expression_result(rhs, new_result)
} else if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
self.specialize_by_expression_result(rhs, new_result)?;
}
if let Ok(bitvec) = self.eval(rhs).try_to_bitvec() {
let new_result = self.eval(lhs).add_not_equal_bound(&bitvec)?;
self.specialize_by_expression_result(lhs, new_result)
} else {
Ok(())
self.specialize_by_expression_result(lhs, new_result)?;
}
Ok(())
}
_ => panic!(),
}
......@@ -563,11 +587,11 @@ impl State {
}
lhs_bound += &Bitvector::one(lhs_bound.width());
let new_result = self.eval(rhs).add_signed_greater_equal_bound(&lhs_bound)?;
return self.specialize_by_expression_result(rhs, new_result);
self.specialize_by_expression_result(rhs, new_result)?;
}
IntSLessEqual => {
let new_result = self.eval(rhs).add_signed_greater_equal_bound(&lhs_bound)?;
return self.specialize_by_expression_result(rhs, new_result);
self.specialize_by_expression_result(rhs, new_result)?;
}
IntLess => {
if lhs_bound == Bitvector::unsigned_max_value(lhs_bound.width()) {
......@@ -577,17 +601,18 @@ impl State {
let new_result = self
.eval(rhs)
.add_unsigned_greater_equal_bound(&lhs_bound)?;
return self.specialize_by_expression_result(rhs, new_result);
self.specialize_by_expression_result(rhs, new_result)?;
}
IntLessEqual => {
let new_result = self
.eval(rhs)
.add_unsigned_greater_equal_bound(&lhs_bound)?;
return self.specialize_by_expression_result(rhs, new_result);
self.specialize_by_expression_result(rhs, new_result)?;
}
_ => panic!(),
}
} else if let Ok(mut rhs_bound) = self.eval(rhs).try_to_bitvec() {
}
if let Ok(mut rhs_bound) = self.eval(rhs).try_to_bitvec() {
match op {
IntSLess => {
if rhs_bound == Bitvector::signed_min_value(rhs_bound.width()) {
......@@ -595,11 +620,11 @@ impl State {
}
rhs_bound -= &Bitvector::one(rhs_bound.width());
let new_result = self.eval(lhs).add_signed_less_equal_bound(&rhs_bound)?;
return self.specialize_by_expression_result(lhs, new_result);
self.specialize_by_expression_result(lhs, new_result)?;
}
IntSLessEqual => {
let new_result = self.eval(lhs).add_signed_less_equal_bound(&rhs_bound)?;
return self.specialize_by_expression_result(lhs, new_result);
self.specialize_by_expression_result(lhs, new_result)?;
}
IntLess => {
if rhs_bound == Bitvector::zero(rhs_bound.width()) {
......@@ -607,11 +632,11 @@ impl State {
}
rhs_bound -= &Bitvector::one(rhs_bound.width());
let new_result = self.eval(lhs).add_unsigned_less_equal_bound(&rhs_bound)?;
return self.specialize_by_expression_result(lhs, new_result);
self.specialize_by_expression_result(lhs, new_result)?;
}
IntLessEqual => {
let new_result = self.eval(lhs).add_unsigned_less_equal_bound(&rhs_bound)?;
return self.specialize_by_expression_result(lhs, new_result);
self.specialize_by_expression_result(lhs, new_result)?;
}
_ => panic!(),
}
......
......@@ -542,6 +542,30 @@ fn specialize_by_expression_results() {
state.get_register(&eax_register),
Bitvector::from_i32(-7).into()
);
// Expr = Subpiece(Var(RAX))
let mut state = State::new(&register("RSP"), Tid::new("func_tid"));
let rax_register = Variable {
name: "RAX".to_string(),
size: ByteSize::new(8),
is_temp: false,
};
let x = state.specialize_by_expression_result(
&Expression::Var(rax_register.clone()).subpiece(ByteSize::new(0), ByteSize::new(1)),
Bitvector::from_i8(5).into(),
);
assert!(x.is_ok());
assert!(state.get_register(&rax_register).is_top());
state.set_register(&rax_register, IntervalDomain::mock(3, 10).into());
let x = state.specialize_by_expression_result(
&Expression::Var(rax_register.clone()).subpiece(ByteSize::new(0), ByteSize::new(1)),
Bitvector::from_i8(5).into(),
);
assert!(x.is_ok());
assert_eq!(
state.get_register(&rax_register),
IntervalDomain::mock(5, 5).into()
);
}
/// Test expression specialization for binary operations
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment