Unverified Commit 5434967e by Enkelmann Committed by GitHub

implement an abstract interval domain (#152)

parent 784569d5
......@@ -61,165 +61,12 @@ impl RegisterDomain for BitvectorDomain {
_ => assert_eq!(self.bytesize(), rhs.bytesize()),
}
match (self, rhs) {
(BitvectorDomain::Value(lhs_bitvec), BitvectorDomain::Value(rhs_bitvec)) => match op {
Piece => {
let new_bitwidth = (self.bytesize() + rhs.bytesize()).as_bit_length();
let upper_bits = lhs_bitvec
.clone()
.into_zero_extend(new_bitwidth)
.unwrap()
.into_checked_shl(rhs.bytesize().as_bit_length())
.unwrap();
let lower_bits = rhs_bitvec.clone().into_zero_extend(new_bitwidth).unwrap();
BitvectorDomain::Value(upper_bits | &lower_bits)
(BitvectorDomain::Value(lhs_bitvec), BitvectorDomain::Value(rhs_bitvec)) => {
match lhs_bitvec.bin_op(op, rhs_bitvec) {
Ok(val) => BitvectorDomain::Value(val),
Err(_) => BitvectorDomain::new_top(self.bin_op_bytesize(op, rhs)),
}
IntAdd => BitvectorDomain::Value(lhs_bitvec + rhs_bitvec),
IntSub => BitvectorDomain::Value(lhs_bitvec - rhs_bitvec),
IntCarry => {
let result = lhs_bitvec + rhs_bitvec;
if result.checked_ult(lhs_bitvec).unwrap()
|| result.checked_ult(rhs_bitvec).unwrap()
{
Bitvector::from_u8(1).into()
} else {
Bitvector::from_u8(0).into()
}
}
IntSCarry => {
let result = apint::Int::from(lhs_bitvec + rhs_bitvec);
let lhs_bitvec = apint::Int::from(lhs_bitvec.clone());
let rhs_bitvec = apint::Int::from(rhs_bitvec.clone());
if (result.is_negative()
&& lhs_bitvec.is_positive()
&& rhs_bitvec.is_positive())
|| (!result.is_negative()
&& lhs_bitvec.is_negative()
&& rhs_bitvec.is_negative())
{
Bitvector::from_u8(1).into()
} else {
Bitvector::from_u8(0).into()
}
}
IntSBorrow => {
let result = apint::Int::from(lhs_bitvec - rhs_bitvec);
let lhs_bitvec = apint::Int::from(lhs_bitvec.clone());
let rhs_bitvec = apint::Int::from(rhs_bitvec.clone());
if (result.is_negative()
&& !lhs_bitvec.is_positive()
&& rhs_bitvec.is_negative())
|| (result.is_positive()
&& lhs_bitvec.is_negative()
&& rhs_bitvec.is_positive())
{
Bitvector::from_u8(1).into()
} else {
Bitvector::from_u8(0).into()
}
}
IntMult => {
// FIXME: Multiplication for bitvectors larger than 8 bytes is not yet implemented in the `apint` crate (version 0.2).
if u64::from(self.bytesize()) > 8 {
BitvectorDomain::Top(self.bytesize())
} else {
BitvectorDomain::Value(lhs_bitvec * rhs_bitvec)
}
}
IntDiv => {
// FIXME: Division for bitvectors larger than 8 bytes is not yet implemented in the `apint` crate (version 0.2).
if u64::from(self.bytesize()) > 8 {
BitvectorDomain::Top(self.bytesize())
} else {
BitvectorDomain::Value(
lhs_bitvec.clone().into_checked_udiv(rhs_bitvec).unwrap(),
)
}
}
IntSDiv => {
// FIXME: Division for bitvectors larger than 8 bytes is not yet implemented in the `apint` crate (version 0.2).
if u64::from(self.bytesize()) > 8 {
BitvectorDomain::Top(self.bytesize())
} else {
BitvectorDomain::Value(
lhs_bitvec.clone().into_checked_sdiv(rhs_bitvec).unwrap(),
)
}
}
IntRem => BitvectorDomain::Value(
lhs_bitvec.clone().into_checked_urem(rhs_bitvec).unwrap(),
),
IntSRem => BitvectorDomain::Value(
lhs_bitvec.clone().into_checked_srem(rhs_bitvec).unwrap(),
),
IntLeft => {
let shift_amount = rhs_bitvec.try_to_u64().unwrap() as usize;
if shift_amount < lhs_bitvec.width().to_usize() {
BitvectorDomain::Value(
lhs_bitvec.clone().into_checked_shl(shift_amount).unwrap(),
)
} else {
BitvectorDomain::Value(Bitvector::zero(lhs_bitvec.width()))
}
}
IntRight => {
let shift_amount = rhs_bitvec.try_to_u64().unwrap() as usize;
if shift_amount < lhs_bitvec.width().to_usize() {
BitvectorDomain::Value(
lhs_bitvec.clone().into_checked_lshr(shift_amount).unwrap(),
)
} else {
BitvectorDomain::Value(Bitvector::zero(lhs_bitvec.width()))
}
}
IntSRight => {
let shift_amount = rhs_bitvec.try_to_u64().unwrap() as usize;
if shift_amount < lhs_bitvec.width().to_usize() {
BitvectorDomain::Value(
lhs_bitvec.clone().into_checked_ashr(shift_amount).unwrap(),
)
} else {
let signed_bitvec = apint::Int::from(lhs_bitvec.clone());
if signed_bitvec.is_negative() {
let minus_one = Bitvector::zero(lhs_bitvec.width())
- &Bitvector::one(lhs_bitvec.width());
BitvectorDomain::Value(minus_one)
} else {
BitvectorDomain::Value(Bitvector::zero(lhs_bitvec.width()))
}
}
}
IntAnd | BoolAnd => BitvectorDomain::Value(lhs_bitvec & rhs_bitvec),
IntOr | BoolOr => BitvectorDomain::Value(lhs_bitvec | rhs_bitvec),
IntXOr | BoolXOr => BitvectorDomain::Value(lhs_bitvec ^ rhs_bitvec),
IntEqual => {
assert_eq!(lhs_bitvec.width(), rhs_bitvec.width());
BitvectorDomain::Value(Bitvector::from((lhs_bitvec == rhs_bitvec) as u8))
}
IntNotEqual => {
assert_eq!(lhs_bitvec.width(), rhs_bitvec.width());
BitvectorDomain::Value(Bitvector::from((lhs_bitvec != rhs_bitvec) as u8))
}
IntLess => BitvectorDomain::Value(Bitvector::from(
lhs_bitvec.checked_ult(rhs_bitvec).unwrap() as u8,
)),
IntLessEqual => BitvectorDomain::Value(Bitvector::from(
lhs_bitvec.checked_ule(rhs_bitvec).unwrap() as u8,
)),
IntSLess => BitvectorDomain::Value(Bitvector::from(
lhs_bitvec.checked_slt(rhs_bitvec).unwrap() as u8,
)),
IntSLessEqual => BitvectorDomain::Value(Bitvector::from(
lhs_bitvec.checked_sle(rhs_bitvec).unwrap() as u8,
)),
FloatEqual | FloatNotEqual | FloatLess | FloatLessEqual => {
// TODO: Implement floating point comparison operators!
BitvectorDomain::new_top(ByteSize::new(1))
}
FloatAdd | FloatSub | FloatMult | FloatDiv => {
// TODO: Implement floating point arithmetic operators!
BitvectorDomain::new_top(self.bytesize())
}
},
}
_ => BitvectorDomain::new_top(self.bin_op_bytesize(op, rhs)),
}
}
......@@ -228,18 +75,9 @@ impl RegisterDomain for BitvectorDomain {
fn un_op(&self, op: UnOpType) -> Self {
use UnOpType::*;
if let BitvectorDomain::Value(bitvec) = self {
match op {
Int2Comp => BitvectorDomain::Value(-bitvec),
IntNegate => BitvectorDomain::Value(bitvec.clone().into_bitnot()),
BoolNegate => {
if bitvec.is_zero() {
BitvectorDomain::Value(Bitvector::from_u8(1))
} else {
BitvectorDomain::Value(Bitvector::from_u8(0))
}
}
FloatNegate | FloatAbs | FloatSqrt | FloatCeil | FloatFloor | FloatRound
| FloatNaN => BitvectorDomain::new_top(self.bytesize()),
match bitvec.un_op(op) {
Ok(val) => BitvectorDomain::Value(val),
Err(_) => BitvectorDomain::new_top(self.bytesize()),
}
} else {
match op {
......@@ -252,14 +90,7 @@ impl RegisterDomain for BitvectorDomain {
/// Extract a sub-bitvector out of a bitvector
fn subpiece(&self, low_byte: ByteSize, size: ByteSize) -> Self {
if let BitvectorDomain::Value(bitvec) = self {
BitvectorDomain::Value(
bitvec
.clone()
.into_checked_lshr(low_byte.as_bit_length())
.unwrap()
.into_truncate(size.as_bit_length())
.unwrap(),
)
BitvectorDomain::Value(bitvec.subpiece(low_byte, size))
} else {
BitvectorDomain::new_top(size)
}
......@@ -268,26 +99,9 @@ impl RegisterDomain for BitvectorDomain {
/// Perform a size-changing cast on a bitvector.
fn cast(&self, kind: CastOpType, width: ByteSize) -> Self {
if let BitvectorDomain::Value(bitvec) = self {
use CastOpType::*;
match kind {
IntZExt => BitvectorDomain::Value(
bitvec
.clone()
.into_zero_extend(apint::BitWidth::from(width))
.unwrap(),
),
IntSExt => BitvectorDomain::Value(
bitvec
.clone()
.into_sign_extend(apint::BitWidth::from(width))
.unwrap(),
),
PopCount => BitvectorDomain::Value(
Bitvector::from_u64(bitvec.count_ones() as u64)
.into_truncate(apint::BitWidth::from(width))
.unwrap(),
),
Int2Float | Float2Float | Trunc => BitvectorDomain::new_top(width),
match bitvec.cast(kind, width) {
Ok(val) => BitvectorDomain::Value(val),
Err(_) => BitvectorDomain::new_top(width),
}
} else {
BitvectorDomain::new_top(width)
......
use std::fmt::Display;
use crate::intermediate_representation::*;
use crate::prelude::*;
use super::{AbstractDomain, HasTop, RegisterDomain, SizedDomain};
mod simple_interval;
use simple_interval::*;
mod bin_ops;
/// An abstract domain representing values in an interval range.
///
/// The interval bounds are signed integers,
/// i.e. the domain looses precision if tasked to represent large unsigned integers.
///
/// The domain also contains widening hints to faciliate fast and exact widening for simple loop counter variables.
/// See the [`IntervalDomain::signed_merge_and_widen`] method for details on the widening strategy.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone)]
struct IntervalDomain {
/// The underlying interval.
interval: Interval,
/// A lower bound for widening operations.
widening_upper_bound: Option<Bitvector>,
/// An upper bound for widening operations.
widening_lower_bound: Option<Bitvector>,
}
impl From<Interval> for IntervalDomain {
/// Generate an interval domain without widening hints.
fn from(interval: Interval) -> IntervalDomain {
IntervalDomain {
interval,
widening_lower_bound: None,
widening_upper_bound: None,
}
}
}
impl IntervalDomain {
/// Create a new interval domain with the given bounds.
///
/// Both `start` and `end` are inclusive, i.e. contained in the interval.
/// The widening hints are set to `None`.
pub fn new(start: Bitvector, end: Bitvector) -> Self {
IntervalDomain {
interval: Interval::new(start, end),
widening_upper_bound: None,
widening_lower_bound: None,
}
}
/// Returns true if the two intervals represent the same value sets.
/// This function ignores differences in the widening hints of the two intervals.
pub fn equal_as_value_sets(&self, other: &IntervalDomain) -> bool {
self.interval == other.interval
}
/// If `bound` is more exact/restrictive than the current lower bound of `self`,
/// set the lower bound to `bound`.
/// Otherwise keep the old lower bound.
pub fn update_widening_lower_bound(&mut self, bound: &Option<Bitvector>) {
if let Some(bound_value) = bound {
if bound_value.checked_slt(&self.interval.start).unwrap() {
if let Some(ref previous_bound) = self.widening_lower_bound {
if bound_value.checked_sgt(previous_bound).unwrap() {
self.widening_lower_bound = Some(bound_value.clone());
}
} else {
self.widening_lower_bound = Some(bound_value.clone());
}
}
}
}
/// If `bound` is more exact/restrictive than the current upper bound of `self`,
/// set the upper bound to `bound`.
/// Otherwise keep the old upper bound.
pub fn update_widening_upper_bound(&mut self, bound: &Option<Bitvector>) {
if let Some(bound_value) = bound {
if bound_value.checked_sgt(&self.interval.end).unwrap() {
if let Some(ref previous_bound) = self.widening_upper_bound {
if bound_value.checked_slt(previous_bound).unwrap() {
self.widening_upper_bound = Some(bound_value.clone());
}
} else {
self.widening_upper_bound = Some(bound_value.clone());
}
}
}
}
/// Merge as signed intervals without performing widenings.
pub fn signed_merge(&self, other: &IntervalDomain) -> IntervalDomain {
let mut merged_domain: IntervalDomain = self.interval.signed_merge(&other.interval).into();
merged_domain.update_widening_lower_bound(&self.widening_lower_bound);
merged_domain.update_widening_lower_bound(&other.widening_lower_bound);
merged_domain.update_widening_upper_bound(&self.widening_upper_bound);
merged_domain.update_widening_upper_bound(&other.widening_upper_bound);
merged_domain
}
/// Merge as signed intervals and perform widening if necessary.
///
/// No widening is performed for very small intervals (currently set to interval lengths not greater than 2)
/// or if the merged interval (as value set) equals one of the input intervals.
/// In all other cases widening is performed after merging the underlying intervals.
///
/// ### Widening Strategy
///
/// If no suitable widening bounds for widening exist, widen to the `Top` value.
/// If exactly one widening bound exists, widen up to the bound,
/// but do not perform widening in the other direction of the interval.
/// If widening bounds for both directions exist, widen up to the bounds in both directions.
pub fn signed_merge_and_widen(&self, other: &IntervalDomain) -> IntervalDomain {
let mut merged_domain = self.signed_merge(other);
if merged_domain.equal_as_value_sets(self) || merged_domain.equal_as_value_sets(other) {
// Do not widen if the value set itself is already contained in either `self` or `other`.
return merged_domain;
}
if let Ok(length) = merged_domain.interval.length().try_to_u64() {
if length <= 2 {
// Do not widen for very small intervals or for already unconstrained intervals (case length() returning zero).
return merged_domain;
}
}
let mut has_been_widened = false;
if self.interval.start != other.interval.start
&& merged_domain.widening_lower_bound.is_some()
{
// widen to the lower bound
merged_domain.interval.start = merged_domain.widening_lower_bound.unwrap();
merged_domain.widening_lower_bound = None;
has_been_widened = true;
}
if self.interval.end != other.interval.end && merged_domain.widening_upper_bound.is_some() {
// widen to the upper bound
merged_domain.interval.end = merged_domain.widening_upper_bound.unwrap();
merged_domain.widening_upper_bound = None;
has_been_widened = true;
}
if has_been_widened {
merged_domain
} else {
// No widening bounds could be used for widening, so we have to widen to the `Top` value.
IntervalDomain::new_top(merged_domain.bytesize())
}
}
/// If the interval contains exactly one value, return the value.
pub fn try_to_bitvec(&self) -> Result<Bitvector, ()> {
if self.interval.start == self.interval.end {
Ok(self.interval.start.clone())
} else {
Err(())
}
}
/// Zero-extend the values in the interval to the given width.
pub fn zero_extend(self, width: ByteSize) -> IntervalDomain {
let lower_bound = match self.widening_lower_bound {
Some(bound)
if (bound.sign_bit().to_bool() == self.interval.start.sign_bit().to_bool())
&& (self.interval.start.sign_bit().to_bool()
== self.interval.end.sign_bit().to_bool()) =>
{
Some(bound.into_zero_extend(width).unwrap())
}
_ => None,
};
let mut upper_bound = match self.widening_upper_bound {
Some(bound)
if (bound.sign_bit().to_bool() == self.interval.end.sign_bit().to_bool())
&& (self.interval.start.sign_bit().to_bool()
== self.interval.end.sign_bit().to_bool()) =>
{
Some(bound.into_zero_extend(width).unwrap())
}
_ => None,
};
let old_width = self.interval.start.width();
let new_interval = self.interval.zero_extend(width);
if upper_bound.is_none() {
let max_val = Bitvector::unsigned_max_value(old_width)
.into_zero_extend(width)
.unwrap();
if new_interval.end.checked_ult(&max_val).unwrap() {
upper_bound = Some(max_val);
}
}
IntervalDomain {
interval: new_interval,
widening_lower_bound: lower_bound,
widening_upper_bound: upper_bound,
}
}
/// Sign-extend the values in the interval to the given width.
pub fn sign_extend(mut self, width: ByteSize) -> Self {
assert!(self.bytesize() <= width);
if self.widening_lower_bound.is_none() {
let min_val = Bitvector::signed_min_value(self.interval.start.width());
if min_val.checked_slt(&self.interval.start).unwrap() {
self.widening_lower_bound = Some(min_val);
}
}
if self.widening_upper_bound.is_none() {
let max_val = Bitvector::signed_max_value(self.interval.end.width());
if max_val.checked_sgt(&self.interval.end).unwrap() {
self.widening_upper_bound = Some(max_val);
}
}
IntervalDomain {
interval: Interval {
start: self.interval.start.clone().into_sign_extend(width).unwrap(),
end: self.interval.end.clone().into_sign_extend(width).unwrap(),
},
widening_lower_bound: self
.widening_lower_bound
.map(|bitvec| bitvec.into_sign_extend(width).unwrap()),
widening_upper_bound: self
.widening_upper_bound
.map(|bitvec| bitvec.into_sign_extend(width).unwrap()),
}
}
}
impl AbstractDomain for IntervalDomain {
/// Merge two interval domains and perform widening if necessary.
/// See [`IntervalDomain::signed_merge_and_widen`] for the widening strategy.
fn merge(&self, other: &IntervalDomain) -> IntervalDomain {
self.signed_merge_and_widen(other)
}
/// Return `true` if the interval spans all possible values.
fn is_top(&self) -> bool {
self.interval.is_top()
}
}
impl SizedDomain for IntervalDomain {
/// Return the size in bytes of the represented values.
fn bytesize(&self) -> ByteSize {
self.interval.start.width().into()
}
/// Return a new `Top` value with the given bytesize.
fn new_top(bytesize: ByteSize) -> Self {
IntervalDomain {
interval: Interval {
start: Bitvector::signed_min_value(bytesize.into()),
end: Bitvector::signed_max_value(bytesize.into()),
},
widening_lower_bound: None,
widening_upper_bound: None,
}
}
}
impl HasTop for IntervalDomain {
/// Return a new interval with the same byte size as `self` and representing the `Top` value of the domain.
fn top(&self) -> Self {
Self::new_top(self.bytesize())
}
}
impl RegisterDomain for IntervalDomain {
/// Compute the result of a binary operation between two interval domains.
///
/// For binary operations that are not explicitly implemented
/// the result is only exact if both intervals contain exactly one value.
fn bin_op(&self, op: BinOpType, rhs: &Self) -> Self {
use BinOpType::*;
match op {
Piece | IntEqual | IntNotEqual | IntLess | IntSLess | IntLessEqual | IntSLessEqual
| IntCarry | IntSCarry | IntSBorrow | IntAnd | IntOr | IntXOr | IntRight
| IntSRight | IntDiv | IntSDiv | IntRem | IntSRem | BoolAnd | BoolOr | BoolXOr
| FloatEqual | FloatNotEqual | FloatLess | FloatLessEqual | FloatAdd | FloatSub
| FloatMult | FloatDiv => {
let new_interval = if self.interval.start == self.interval.end
&& rhs.interval.start == rhs.interval.end
{
if let Ok(bitvec) = self.interval.start.bin_op(op, &rhs.interval.start) {
bitvec.into()
} else {
Interval::new_top(self.bin_op_bytesize(op, rhs))
}
} else {
Interval::new_top(self.bin_op_bytesize(op, rhs))
};
IntervalDomain {
interval: new_interval,
widening_lower_bound: None,
widening_upper_bound: None,
}
}
IntAdd => self.add(rhs),
IntSub => self.sub(rhs),
IntMult => self.signed_mul(rhs),
IntLeft => self.shift_left(rhs),
}
}
/// Compute the result of an unary operation on the interval domain.
fn un_op(&self, op: UnOpType) -> Self {
use UnOpType::*;
match op {
Int2Comp => {
let interval = self.interval.clone().int_2_comp();
let mut new_upper_bound = None;
if let Some(bound) = self.widening_lower_bound.clone() {
if bound
.checked_sgt(&Bitvector::signed_min_value(self.bytesize().into()))
.unwrap()
{
new_upper_bound = Some(-bound);
}
};
let new_lower_bound = self.widening_upper_bound.clone().map(|bound| -bound);
IntervalDomain {
interval,
widening_lower_bound: new_lower_bound,
widening_upper_bound: new_upper_bound,
}
}
IntNegate => IntervalDomain {
interval: self.interval.clone().bitwise_not(),
widening_lower_bound: None,
widening_upper_bound: None,
},
BoolNegate => {
if self.interval.start == self.interval.end {
if self.interval.start == Bitvector::zero(ByteSize::new(1).into()) {
Bitvector::one(ByteSize::new(1).into()).into()
} else {
Bitvector::zero(ByteSize::new(1).into()).into()
}
} else {
IntervalDomain::new_top(self.bytesize())
}
}
FloatAbs | FloatCeil | FloatFloor | FloatNaN | FloatNegate | FloatRound | FloatSqrt => {
IntervalDomain::new_top(self.bytesize())
}
}
}
/// Take a sub-bitvector of the values in the interval domain.
///
/// If `low_byte` is not zero, the result will generally be not exact.
fn subpiece(&self, low_byte: ByteSize, size: ByteSize) -> Self {
let new_interval = self.interval.clone().subpiece(low_byte, size);
let (mut new_lower_bound, mut new_upper_bound) = (None, None);
if low_byte == ByteSize::new(0) {
if let (Some(lower_bound), Some(upper_bound)) =
(&self.widening_lower_bound, &self.widening_upper_bound)
{
let new_min = Bitvector::signed_min_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
let new_max = Bitvector::signed_max_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
if lower_bound.checked_sge(&new_min).unwrap()
&& upper_bound.checked_sle(&new_max).unwrap()
{
new_lower_bound = Some(lower_bound.clone().into_truncate(size).unwrap());
new_upper_bound = Some(upper_bound.clone().into_truncate(size).unwrap());
}
}
}
IntervalDomain {
interval: new_interval,
widening_lower_bound: new_lower_bound,
widening_upper_bound: new_upper_bound,
}
}
/// Compute the result of a cast operation on the interval domain.
fn cast(&self, kind: CastOpType, width: ByteSize) -> Self {
use CastOpType::*;
match kind {
IntZExt => {
assert!(self.bytesize() <= width);
if self.bytesize() == width {
return self.clone();
}
self.clone().zero_extend(width)
}
IntSExt => {
assert!(self.bytesize() <= width);
self.clone().sign_extend(width)
}
Float2Float | Int2Float | Trunc => IntervalDomain::new_top(width),
PopCount => {
if let Ok(bitvec) = self.try_to_bitvec() {
bitvec.cast(kind, width).unwrap().into()
} else {
IntervalDomain::new(
Bitvector::zero(width.into()),
Bitvector::from_u64(self.bytesize().as_bit_length() as u64)
.into_truncate(width)
.unwrap(),
)
}
}
}
}
}
impl From<Bitvector> for IntervalDomain {
/// Create an interval containing only `bitvec`.
fn from(bitvec: Bitvector) -> Self {
IntervalDomain {
interval: bitvec.into(),
widening_lower_bound: None,
widening_upper_bound: None,
}
}
}
impl Display for IntervalDomain {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_top() {
write!(f, "Top:i{}", self.bytesize().as_bit_length())
} else if self.interval.start == self.interval.end {
write!(
f,
"{:016x}:i{}",
apint::Int::from(self.interval.start.clone()),
self.bytesize().as_bit_length()
)
} else {
let start_int = apint::Int::from(self.interval.start.clone());
let end_int = apint::Int::from(self.interval.end.clone());
write!(
f,
"[{:016x}, {:016x}]:i{}",
start_int,
end_int,
self.bytesize().as_bit_length()
)
}
}
}
#[cfg(test)]
mod tests;
use super::*;
impl IntervalDomain {
/// Compute the interval of possible results
/// if one adds a value from `self` to a value from `rhs`.
pub fn add(&self, rhs: &Self) -> Self {
let interval = self.interval.add(&rhs.interval);
if interval.is_top() {
interval.into()
} else {
let new_lower_bound = if let (Some(self_bound), Some(rhs_bound)) =
(&self.widening_lower_bound, &rhs.widening_lower_bound)
{
if self_bound.signed_add_overflow_check(rhs_bound) {
None
} else {
Some(self_bound.clone().into_checked_add(rhs_bound).unwrap())
}
} else {
None
};
let new_upper_bound = if let (Some(self_bound), Some(rhs_bound)) =
(&self.widening_upper_bound, &rhs.widening_upper_bound)
{
if self_bound.signed_add_overflow_check(rhs_bound) {
None
} else {
Some(self_bound.clone().into_checked_add(rhs_bound).unwrap())
}
} else {
None
};
IntervalDomain {
interval,
widening_upper_bound: new_upper_bound,
widening_lower_bound: new_lower_bound,
}
}
}
/// Compute the interval of possible results
/// if one subtracts a value in `rhs` from a value in `self`.
pub fn sub(&self, rhs: &Self) -> Self {
let interval = self.interval.sub(&rhs.interval);
if interval.is_top() {
interval.into()
} else {
let new_lower_bound = if let (Some(self_bound), Some(rhs_bound)) =
(&self.widening_lower_bound, &rhs.widening_upper_bound)
{
if self_bound.signed_sub_overflow_check(rhs_bound) {
None
} else {
Some(self_bound.clone().into_checked_sub(rhs_bound).unwrap())
}
} else {
None
};
let new_upper_bound = if let (Some(self_bound), Some(rhs_bound)) =
(&self.widening_upper_bound, &rhs.widening_lower_bound)
{
if self_bound.signed_sub_overflow_check(rhs_bound) {
None
} else {
Some(self_bound.clone().into_checked_sub(rhs_bound).unwrap())
}
} else {
None
};
IntervalDomain {
interval,
widening_upper_bound: new_upper_bound,
widening_lower_bound: new_lower_bound,
}
}
}
/// Compute the interval of possible results
/// if one multiplies a value in `self` with a value in `rhs`.
pub fn signed_mul(&self, rhs: &Self) -> Self {
let interval = self.interval.signed_mul(&rhs.interval);
if interval.is_top() {
interval.into()
} else {
let mut possible_bounds = Vec::new();
if let (Some(bound1), Some(bound2)) =
(&self.widening_lower_bound, &rhs.widening_lower_bound)
{
if let (result, false) = bound1.signed_mult_with_overflow_flag(bound2).unwrap() {
possible_bounds.push(result);
}
}
if let (Some(bound1), Some(bound2)) =
(&self.widening_lower_bound, &rhs.widening_upper_bound)
{
if let (result, false) = bound1.signed_mult_with_overflow_flag(bound2).unwrap() {
possible_bounds.push(result);
}
}
if let (Some(bound1), Some(bound2)) =
(&self.widening_upper_bound, &rhs.widening_lower_bound)
{
if let (result, false) = bound1.signed_mult_with_overflow_flag(bound2).unwrap() {
possible_bounds.push(result);
}
}
if let (Some(bound1), Some(bound2)) =
(&self.widening_upper_bound, &rhs.widening_upper_bound)
{
if let (result, false) = bound1.signed_mult_with_overflow_flag(bound2).unwrap() {
possible_bounds.push(result);
}
}
let mut lower_bound: Option<Bitvector> = None;
for bound in possible_bounds.iter() {
if bound.checked_slt(&interval.start).unwrap() {
match lower_bound {
Some(prev_bound) if prev_bound.checked_slt(bound).unwrap() => {
lower_bound = Some(bound.clone())
}
None => lower_bound = Some(bound.clone()),
_ => (),
}
}
}
let mut upper_bound: Option<Bitvector> = None;
for bound in possible_bounds.iter() {
if bound.checked_sgt(&interval.end).unwrap() {
match upper_bound {
Some(prev_bound) if prev_bound.checked_sgt(bound).unwrap() => {
upper_bound = Some(bound.clone())
}
None => upper_bound = Some(bound.clone()),
_ => (),
}
}
}
IntervalDomain {
interval,
widening_lower_bound: lower_bound,
widening_upper_bound: upper_bound,
}
}
}
/// Compute the resulting interval after a left shift operation.
/// The result is only exact if the `rhs` interval contains exactly one value.
pub fn shift_left(&self, rhs: &Self) -> Self {
if rhs.interval.start == rhs.interval.end {
let multiplicator = Bitvector::one(self.bytesize().into())
.into_checked_shl(rhs.interval.start.try_to_u64().unwrap() as usize)
.unwrap();
self.signed_mul(&multiplicator.into())
} else {
Self::new_top(self.bytesize())
}
}
}
use crate::intermediate_representation::*;
use crate::prelude::*;
/// An interval of values with a fixed byte size.
///
/// The interval bounds are interpreted as signed integers,
/// i.e. `self.start` is not allowed to be greater than `self.end`
/// as signed integers.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone)]
pub struct Interval {
/// The start of the interval. The bound is included in the represented interval.
pub start: Bitvector,
/// The end of the interval. The bound is included in the represented interval.
pub end: Bitvector,
}
impl Interval {
/// Construct a new interval.
///
/// Both `start` and `end` of the interval are inclusive,
/// i.e. contained in the represented interval.
pub fn new(start: Bitvector, end: Bitvector) -> Interval {
assert_eq!(start.width(), end.width());
Interval { start, end }
}
/// Construct a new unconstrained interval.
pub fn new_top(bytesize: ByteSize) -> Interval {
Interval {
start: Bitvector::signed_min_value(bytesize.into()),
end: Bitvector::signed_max_value(bytesize.into()),
}
}
/// Returns true if all values representable by bitvectors of the corresponding length are contained in the interval.
pub fn is_top(&self) -> bool {
(self.start.clone() - &Bitvector::one(self.start.width())) == self.end
}
/// Get the size in bytes of values contained in the interval.
pub fn bytesize(&self) -> ByteSize {
self.start.width().into()
}
/// Merge two intervals interpreting both as intervals of signed integers.
pub fn signed_merge(&self, other: &Interval) -> Interval {
if self.start.checked_sgt(&self.end).unwrap()
|| other.start.checked_sgt(&other.end).unwrap()
{
// One of the intervals wraps around
return Interval::new_top(self.bytesize());
}
let start = signed_min(&self.start, &other.start);
let end = signed_max(&self.end, &other.end);
Interval { start, end }
}
/// Return the number of contained values of the interval as an unsigned bitvector.
/// If the interval is unconstrained, return zero
/// (since the maximal number of elements is not representable in a bitvector of the same byte size).
pub fn length(&self) -> Bitvector {
self.end.clone() - &self.start + &Bitvector::one(self.start.width())
}
/// Compute the interval represented if the byte size of the value is zero-extended.
pub fn zero_extend(self, width: ByteSize) -> Interval {
assert!(self.bytesize() <= width);
if self.bytesize() == width {
return self;
}
if self.start.sign_bit().to_bool() == self.end.sign_bit().to_bool() {
// Both start and end have the same sign
Interval {
start: self.start.into_zero_extend(width).unwrap(),
end: self.end.into_zero_extend(width).unwrap(),
}
} else {
// The interval either contains both -1 and 0 or wraps around
Interval {
start: Bitvector::zero(width.into()),
end: Bitvector::unsigned_max_value(self.end.width())
.into_zero_extend(width)
.unwrap(),
}
}
}
/// Take a subpiece of the bitvectors.
///
/// The function only tries to be exact if the interval contains exactly one value
/// or if the `low_byte` is zero.
pub fn subpiece(self, low_byte: ByteSize, size: ByteSize) -> Self {
if self.start == self.end {
self.start.subpiece(low_byte, size).into()
} else if low_byte == ByteSize::new(0) {
let new_min = Bitvector::signed_min_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
let new_max = Bitvector::signed_max_value(size.into())
.into_sign_extend(self.bytesize())
.unwrap();
if self.start.checked_sge(&new_min).unwrap() && self.end.checked_sle(&new_max).unwrap()
{
Interval {
start: self.start.into_truncate(size).unwrap(),
end: self.end.into_truncate(size).unwrap(),
}
} else {
Interval::new_top(size)
}
} else {
Interval::new_top(size)
}
}
/// Take the 2's complement of values in the interval.
pub fn int_2_comp(self) -> Self {
if self
.start
.checked_sgt(&Bitvector::signed_min_value(self.bytesize().into()))
.unwrap()
{
Interval {
start: -self.end,
end: -self.start,
}
} else {
Interval::new_top(self.bytesize())
}
}
/// Compute the bitwise negation of values in the interval.
/// Only exact if there is exactly one value in the interval.
pub fn bitwise_not(self) -> Self {
if self.start == self.end {
self.start.into_bitnot().into()
} else {
Interval::new_top(self.bytesize())
}
}
/// Compute the interval of possible results
/// if one adds a value from `self` to a value from `rhs`.
pub fn add(&self, rhs: &Interval) -> Interval {
if self.start.signed_add_overflow_check(&rhs.start)
|| self.end.signed_add_overflow_check(&rhs.end)
{
Interval::new_top(self.bytesize())
} else {
Interval {
start: self.start.clone().into_checked_add(&rhs.start).unwrap(),
end: self.end.clone().into_checked_add(&rhs.end).unwrap(),
}
}
}
/// Compute the interval of possible results
/// if one subtracts a value in `rhs` from a value in `self`.
pub fn sub(&self, rhs: &Interval) -> Interval {
if self.start.signed_sub_overflow_check(&rhs.end)
|| self.end.signed_sub_overflow_check(&rhs.start)
{
Interval::new_top(self.bytesize())
} else {
Interval {
start: self.start.clone().into_checked_sub(&rhs.end).unwrap(),
end: self.end.clone().into_checked_sub(&rhs.start).unwrap(),
}
}
}
/// Compute the interval of possible results
/// if one multiplies a value in `self` with a value in `rhs`.
pub fn signed_mul(&self, rhs: &Interval) -> Interval {
if self.bytesize().as_bit_length() > 64 {
return Interval::new_top(self.bytesize());
}
let val1 = self
.start
.signed_mult_with_overflow_flag(&rhs.start)
.unwrap();
let val2 = self.start.signed_mult_with_overflow_flag(&rhs.end).unwrap();
let val3 = self.end.signed_mult_with_overflow_flag(&rhs.start).unwrap();
let val4 = self.end.signed_mult_with_overflow_flag(&rhs.end).unwrap();
if val1.1 || val2.1 || val3.1 || val4.1 {
// (signed) overflow during multiplication
return Interval::new_top(self.bytesize());
}
let min = signed_min(&val1.0, &signed_min(&val2.0, &signed_min(&val3.0, &val4.0)));
let max = signed_max(&val1.0, &signed_max(&val2.0, &signed_max(&val3.0, &val4.0)));
Interval {
start: min,
end: max,
}
}
}
impl From<Bitvector> for Interval {
/// Create an interval that only contains the given bitvector.
fn from(bitvec: Bitvector) -> Self {
Interval {
start: bitvec.clone(),
end: bitvec,
}
}
}
/// Helper function returning the (signed) minimum of two bitvectors.
fn signed_min(v1: &Bitvector, v2: &Bitvector) -> Bitvector {
if v1.checked_sle(v2).unwrap() {
v1.clone()
} else {
v2.clone()
}
}
/// Helper function returning the (signed) maximum of two bitvectors.
fn signed_max(v1: &Bitvector, v2: &Bitvector) -> Bitvector {
if v1.checked_sge(v2).unwrap() {
v1.clone()
} else {
v2.clone()
}
}
use super::*;
impl IntervalDomain {
/// Return a new interval domain of 8-byte integers.
pub fn mock(start: i64, end: i64) -> IntervalDomain {
IntervalDomain::new(Bitvector::from_i64(start), Bitvector::from_i64(end))
}
/// Return a new interval domain of 1-byte integers.
pub fn mock_i8(start: i8, end: i8) -> IntervalDomain {
IntervalDomain::new(Bitvector::from_i8(start), Bitvector::from_i8(end))
}
pub fn mock_with_bounds(
lower_bound: Option<i64>,
start: i64,
end: i64,
upper_bound: Option<i64>,
) -> IntervalDomain {
let mut domain = IntervalDomain::mock(start, end);
domain.update_widening_lower_bound(&lower_bound.map(|b| Bitvector::from_i64(b)));
domain.update_widening_upper_bound(&upper_bound.map(|b| Bitvector::from_i64(b)));
domain
}
pub fn mock_i8_with_bounds(
lower_bound: Option<i8>,
start: i8,
end: i8,
upper_bound: Option<i8>,
) -> IntervalDomain {
let mut domain = IntervalDomain::mock_i8(start, end);
domain.update_widening_lower_bound(&lower_bound.map(|b| Bitvector::from_i8(b)));
domain.update_widening_upper_bound(&upper_bound.map(|b| Bitvector::from_i8(b)));
domain
}
}
#[test]
fn signed_merge() {
// simple widening examples
let a = IntervalDomain::mock_with_bounds(None, 0, 3, Some(10));
let b = IntervalDomain::mock_with_bounds(None, 2, 5, None);
assert_eq!(
a.merge(&b),
IntervalDomain::mock_with_bounds(None, 0, 10, None)
);
let a = IntervalDomain::mock_with_bounds(Some(-3), 1, 1, None);
let b = IntervalDomain::mock_with_bounds(None, 2, 2, Some(5));
assert_eq!(
a.merge(&b),
IntervalDomain::mock_with_bounds(Some(-3), 1, 2, Some(5))
);
let a = IntervalDomain::mock_with_bounds(Some(-3), 1, 1, None);
let b = IntervalDomain::mock_with_bounds(None, 3, 3, Some(5));
assert_eq!(
a.merge(&b),
IntervalDomain::mock_with_bounds(None, -3, 5, None)
);
let a = IntervalDomain::mock_with_bounds(None, 1, 5, None);
let b = IntervalDomain::mock_with_bounds(None, -1, -1, Some(5));
assert_eq!(a.merge(&b), IntervalDomain::new_top(ByteSize::new(8)));
let a = IntervalDomain::mock_with_bounds(None, 1, 5, None);
let b = IntervalDomain::mock_with_bounds(None, 3, 3, Some(10));
assert_eq!(
a.merge(&b),
IntervalDomain::mock_with_bounds(None, 1, 5, Some(10))
);
let a = IntervalDomain::mock_with_bounds(None, 20, -5, None);
let b = IntervalDomain::mock_with_bounds(None, 0, 0, Some(50));
assert_eq!(a.merge(&a), IntervalDomain::new_top(ByteSize::new(8))); // Interval wraps and is thus merged to `Top`, even though a = a
assert_eq!(a.merge(&b), IntervalDomain::new_top(ByteSize::new(8)));
// Widening process corresponding to a very simple loop counter variable
let mut var = IntervalDomain::mock(0, 0);
let update = IntervalDomain::mock_with_bounds(None, 1, 1, Some(99));
var = var.merge(&update);
assert_eq!(var, IntervalDomain::mock_with_bounds(None, 0, 1, Some(99)));
let update = IntervalDomain::mock_with_bounds(None, 1, 2, Some(99));
var = var.merge(&update);
assert_eq!(var, IntervalDomain::mock_with_bounds(None, 0, 99, None));
let update = IntervalDomain::mock_with_bounds(None, 1, 99, None);
var = var.merge(&update);
assert_eq!(var, IntervalDomain::mock_with_bounds(None, 0, 99, None));
// Widening process corresponding to a loop counter variable with bound in the wrong direction
let mut var = IntervalDomain::mock(0, 0);
let update = IntervalDomain::mock_with_bounds(Some(-3), 1, 1, None);
var = var.merge(&update);
assert_eq!(var, IntervalDomain::mock_with_bounds(Some(-3), 0, 1, None));
let update = IntervalDomain::mock_with_bounds(Some(-3), 1, 2, None);
var = var.merge(&update);
assert_eq!(var, IntervalDomain::mock_with_bounds(None, -3, 2, None));
let update = IntervalDomain::mock_with_bounds(Some(-3), -2, 3, None);
var = var.merge(&update);
assert_eq!(var, IntervalDomain::new_top(ByteSize::new(8)));
}
#[test]
fn cast_zero_and_signed_extend() {
// Zero extend
let val = IntervalDomain::mock_i8_with_bounds(Some(1), 3, 5, Some(30));
let extended_val = val.cast(CastOpType::IntZExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(1), 3, 5, Some(30))
);
let val = IntervalDomain::mock_i8_with_bounds(Some(-10), 0, 5, Some(30));
let extended_val = val.cast(CastOpType::IntZExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(None, 0, 5, Some(30))
);
let val = IntervalDomain::mock_i8_with_bounds(Some(-15), -10, 5, None);
let extended_val = val.cast(CastOpType::IntZExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(None, 0, 255, None)
);
let val = IntervalDomain::mock_i8_with_bounds(Some(-14), -9, -5, Some(-2));
let extended_val = val.cast(CastOpType::IntZExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(242), 247, 251, Some(254))
);
let val = IntervalDomain::mock_i8_with_bounds(Some(-20), -10, -5, Some(3));
let extended_val = val.cast(CastOpType::IntZExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(236), 246, 251, Some(255))
);
// Sign extend
let val = IntervalDomain::mock_i8_with_bounds(Some(1), 3, 5, Some(30));
let extended_val = val.cast(CastOpType::IntSExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(1), 3, 5, Some(30))
);
let val = IntervalDomain::mock_i8_with_bounds(Some(-10), 0, 5, Some(30));
let extended_val = val.cast(CastOpType::IntSExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(-10), 0, 5, Some(30))
);
let val = IntervalDomain::mock_i8_with_bounds(Some(-15), -10, 127, None);
let extended_val = val.cast(CastOpType::IntSExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(-15), -10, 127, None)
);
let val = IntervalDomain::mock_i8_with_bounds(None, -10, -5, None);
let extended_val = val.cast(CastOpType::IntSExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(-128), -10, -5, Some(127))
);
let val = IntervalDomain::mock_i8_with_bounds(Some(-20), -10, -5, Some(3));
let extended_val = val.cast(CastOpType::IntSExt, ByteSize::new(8));
assert_eq!(
extended_val,
IntervalDomain::mock_with_bounds(Some(-20), -10, -5, Some(3))
);
}
#[test]
fn subpiece() {
let val = IntervalDomain::mock_with_bounds(None, -3, 5, Some(10));
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(
subpieced_val,
IntervalDomain::mock_i8_with_bounds(None, -3, 5, None)
);
let val = IntervalDomain::mock_with_bounds(Some(-30), 2, 5, Some(10));
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(
subpieced_val,
IntervalDomain::mock_i8_with_bounds(Some(-30), 2, 5, Some(10))
);
let val = IntervalDomain::mock_with_bounds(Some(-500), 2, 5, Some(10));
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(
subpieced_val,
IntervalDomain::mock_i8_with_bounds(None, 2, 5, None)
);
let val = IntervalDomain::mock_with_bounds(Some(-30), 2, 567, Some(777));
let subpieced_val = val.subpiece(ByteSize::new(0), ByteSize::new(1));
assert_eq!(subpieced_val, IntervalDomain::new_top(ByteSize::new(1)));
let val = IntervalDomain::mock_with_bounds(Some(-30), 2, 3, Some(777));
let subpieced_val = val.subpiece(ByteSize::new(1), ByteSize::new(1));
assert_eq!(subpieced_val, IntervalDomain::new_top(ByteSize::new(1)));
let val = IntervalDomain::mock_with_bounds(Some(-30), 512, 512, Some(777));
let subpieced_val = val.subpiece(ByteSize::new(1), ByteSize::new(1));
assert_eq!(
subpieced_val,
IntervalDomain::mock_i8_with_bounds(None, 2, 2, None)
);
}
#[test]
fn un_op() {
// Int2Comp
let mut val = IntervalDomain::mock_with_bounds(None, -3, 5, Some(10));
val = val.un_op(UnOpType::Int2Comp);
assert_eq!(
val,
IntervalDomain::mock_with_bounds(Some(-10), -5, 3, None)
);
let mut val = IntervalDomain::mock_i8_with_bounds(Some(-128), -3, 5, Some(127));
val = val.un_op(UnOpType::Int2Comp);
assert_eq!(
val,
IntervalDomain::mock_i8_with_bounds(Some(-127), -5, 3, None)
);
// IntNegate
let mut val = IntervalDomain::mock_with_bounds(None, -3, 5, Some(10));
val = val.un_op(UnOpType::IntNegate);
assert_eq!(val, IntervalDomain::new_top(ByteSize::new(8)));
let mut val = IntervalDomain::mock_with_bounds(None, -4, -4, Some(10));
val = val.un_op(UnOpType::IntNegate);
assert_eq!(val, IntervalDomain::mock(3, 3));
}
#[test]
fn add() {
let lhs = IntervalDomain::mock_with_bounds(None, 3, 7, Some(10));
let rhs = IntervalDomain::mock_with_bounds(Some(-20), -3, 0, Some(10));
let result = lhs.bin_op(BinOpType::IntAdd, &rhs);
assert_eq!(
result,
IntervalDomain::mock_with_bounds(None, 0, 7, Some(20))
);
let lhs = IntervalDomain::mock_i8_with_bounds(Some(-121), -120, -120, Some(10));
let rhs = IntervalDomain::mock_i8_with_bounds(Some(-10), -9, 0, Some(10));
let result = lhs.bin_op(BinOpType::IntAdd, &rhs);
assert_eq!(result, IntervalDomain::new_top(ByteSize::new(1)));
let lhs = IntervalDomain::mock_i8_with_bounds(Some(-100), 2, 4, Some(100));
let rhs = IntervalDomain::mock_i8_with_bounds(Some(-50), 10, 20, Some(50));
let result = lhs.bin_op(BinOpType::IntAdd, &rhs);
assert_eq!(
result,
IntervalDomain::mock_i8_with_bounds(None, 12, 24, None)
);
}
#[test]
fn sub() {
let lhs = IntervalDomain::mock_with_bounds(None, 3, 7, Some(10));
let rhs = IntervalDomain::mock_with_bounds(Some(-20), -3, 0, Some(10));
let result = lhs.bin_op(BinOpType::IntSub, &rhs);
assert_eq!(
result,
IntervalDomain::mock_with_bounds(None, 3, 10, Some(30))
);
let lhs = IntervalDomain::mock_i8_with_bounds(Some(-121), -120, -120, Some(10));
let rhs = IntervalDomain::mock_i8_with_bounds(Some(-10), -9, 9, Some(10));
let result = lhs.bin_op(BinOpType::IntSub, &rhs);
assert_eq!(result, IntervalDomain::new_top(ByteSize::new(1)));
let lhs = IntervalDomain::mock_i8_with_bounds(Some(-100), 2, 4, Some(100));
let rhs = IntervalDomain::mock_i8_with_bounds(Some(-50), 10, 20, Some(50));
let result = lhs.bin_op(BinOpType::IntSub, &rhs);
assert_eq!(
result,
IntervalDomain::mock_i8_with_bounds(None, -18, -6, None)
);
}
#[test]
fn multiplication() {
let lhs = IntervalDomain::mock_with_bounds(None, 3, 7, Some(10));
let rhs = IntervalDomain::mock_with_bounds(Some(-20), -3, 0, Some(10));
let result = lhs.bin_op(BinOpType::IntMult, &rhs);
assert_eq!(
result,
IntervalDomain::mock_with_bounds(Some(-200), -21, 0, Some(100))
);
let lhs = IntervalDomain::mock_with_bounds(Some(-4), -3, 1, Some(2));
let rhs = IntervalDomain::mock_with_bounds(Some(-6), -5, 7, Some(8));
let result = lhs.bin_op(BinOpType::IntMult, &rhs);
assert_eq!(
result,
IntervalDomain::mock_with_bounds(Some(-32), -21, 15, Some(16))
);
let lhs = IntervalDomain::mock_i8_with_bounds(None, 3, 7, Some(50));
let rhs = IntervalDomain::mock_i8_with_bounds(Some(-30), -3, 0, Some(50));
let result = lhs.bin_op(BinOpType::IntMult, &rhs);
assert_eq!(
result,
IntervalDomain::mock_i8_with_bounds(None, -21, 0, None)
);
}
#[test]
fn shift_left() {
let lhs = IntervalDomain::mock_i8_with_bounds(None, 3, 3, Some(50));
let rhs = IntervalDomain::mock_i8_with_bounds(Some(1), 2, 3, Some(4));
let result = lhs.bin_op(BinOpType::IntLeft, &rhs);
assert_eq!(result, IntervalDomain::new_top(ByteSize::new(1)));
let lhs = IntervalDomain::mock_i8_with_bounds(None, 3, 4, Some(5));
let rhs = IntervalDomain::mock_i8_with_bounds(Some(1), 2, 2, Some(4));
let result = lhs.bin_op(BinOpType::IntLeft, &rhs);
assert_eq!(
result,
IntervalDomain::mock_i8_with_bounds(None, 12, 16, None)
);
let lhs = IntervalDomain::mock_i8_with_bounds(Some(2), 3, 4, Some(64));
let rhs = IntervalDomain::mock_i8_with_bounds(Some(0), 1, 1, Some(4));
let result = lhs.bin_op(BinOpType::IntLeft, &rhs);
assert_eq!(
result,
IntervalDomain::mock_i8_with_bounds(None, 6, 8, None)
);
}
......@@ -18,6 +18,9 @@ pub use data::*;
mod mem_region;
pub use mem_region::*;
mod interval;
pub use interval::*;
/// The main trait describing an abstract domain.
///
/// Each abstract domain is partially ordered and has a maximal element (which can be generated by `top()`).
......
use super::*;
/// A bitvector is a fixed-length vector of bits
/// with the semantics of a CPU register,
/// i.e. it supports two's complement modulo arithmetic.
///
/// Bitvector is just an alias for the [`apint::ApInt`] type.
pub type Bitvector = apint::ApInt;
/// A trait to extend the bitvector type with useful helper functions
/// that are not contained in the [`apint`] crate.
/// See the implementation of the trait on the [`Bitvector`] type for more information.
pub trait BitvectorExtended: Sized {
fn cast(&self, kind: CastOpType, width: ByteSize) -> Result<Self, Error>;
fn subpiece(&self, low_byte: ByteSize, size: ByteSize) -> Self;
fn un_op(&self, op: UnOpType) -> Result<Self, Error>;
fn bin_op(&self, op: BinOpType, rhs: &Self) -> Result<Self, Error>;
fn signed_add_overflow_check(&self, rhs: &Self) -> bool;
fn signed_sub_overflow_check(&self, rhs: &Self) -> bool;
fn signed_mult_with_overflow_flag(&self, rhs: &Self) -> Result<(Self, bool), Error>;
}
impl BitvectorExtended for Bitvector {
/// Perform a cast operation on the bitvector.
/// Returns an error for non-implemented cast operations (currently all float-related casts).
fn cast(&self, kind: CastOpType, width: ByteSize) -> Result<Self, Error> {
match kind {
CastOpType::IntZExt => Ok(self.clone().into_zero_extend(width).unwrap()),
CastOpType::IntSExt => Ok(self.clone().into_sign_extend(width).unwrap()),
CastOpType::Int2Float | CastOpType::Float2Float | CastOpType::Trunc => {
Err(anyhow!("Float operations not yet implemented"))
}
CastOpType::PopCount => Ok(Bitvector::from_u64(self.count_ones() as u64)
.into_truncate(width)
.unwrap()),
}
}
/// Extract a subpiece of the given bitvector.
fn subpiece(&self, low_byte: ByteSize, size: ByteSize) -> Self {
self.clone()
.into_checked_lshr(low_byte.as_bit_length())
.unwrap()
.into_truncate(size.as_bit_length())
.unwrap()
}
/// Perform a unary operation on the given bitvector.
/// Returns an error for non-implemented operations (currently all float-related operations).
fn un_op(&self, op: UnOpType) -> Result<Self, Error> {
use UnOpType::*;
match op {
Int2Comp => Ok(-self.clone()),
IntNegate => Ok(self.clone().into_bitnot()),
BoolNegate => {
if self.is_zero() {
Ok(Bitvector::from_u8(1))
} else {
Ok(Bitvector::from_u8(0))
}
}
FloatNegate | FloatAbs | FloatSqrt | FloatCeil | FloatFloor | FloatRound | FloatNaN => {
Err(anyhow!("Float operations not yet implemented"))
}
}
}
/// Perform a binary operation on the given bitvectors.
/// Returns an error for non-implemented operations (currently all float-related operations).
fn bin_op(&self, op: BinOpType, rhs: &Self) -> Result<Self, Error> {
use BinOpType::*;
match op {
Piece => {
let new_bitwidth = self.width().to_usize() + rhs.width().to_usize();
let upper_bits = self
.clone()
.into_zero_extend(new_bitwidth)
.unwrap()
.into_checked_shl(rhs.width().to_usize())
.unwrap();
let lower_bits = rhs.clone().into_zero_extend(new_bitwidth).unwrap();
Ok(upper_bits | &lower_bits)
}
IntAdd => Ok(self + rhs),
IntSub => Ok(self - rhs),
IntCarry => {
let result = self + rhs;
if result.checked_ult(self).unwrap() || result.checked_ult(rhs).unwrap() {
Ok(Bitvector::from_u8(1))
} else {
Ok(Bitvector::from_u8(0))
}
}
IntSCarry => {
let result = apint::Int::from(self + rhs);
let signed_self = apint::Int::from(self.clone());
let signed_rhs = apint::Int::from(rhs.clone());
if (result.is_negative() && signed_self.is_positive() && signed_rhs.is_positive())
|| (!result.is_negative()
&& signed_self.is_negative()
&& signed_rhs.is_negative())
{
Ok(Bitvector::from_u8(1))
} else {
Ok(Bitvector::from_u8(0))
}
}
IntSBorrow => {
let result = apint::Int::from(self - rhs);
let signed_self = apint::Int::from(self.clone());
let signed_rhs = apint::Int::from(rhs.clone());
if (result.is_negative() && !signed_self.is_positive() && signed_rhs.is_negative())
|| (result.is_positive()
&& signed_self.is_negative()
&& signed_rhs.is_positive())
{
Ok(Bitvector::from_u8(1))
} else {
Ok(Bitvector::from_u8(0))
}
}
IntMult => {
// FIXME: Multiplication for bitvectors larger than 8 bytes is not yet implemented in the `apint` crate (version 0.2).
if self.width().to_usize() > 64 {
Err(anyhow!("Multiplication and division of integers larger than 8 bytes not yet implemented."))
} else {
Ok(self * rhs)
}
}
IntDiv => {
// FIXME: Division for bitvectors larger than 8 bytes is not yet implemented in the `apint` crate (version 0.2).
if self.width().to_usize() > 64 {
Err(anyhow!("Multiplication and division of integers larger than 8 bytes not yet implemented."))
} else {
Ok(self.clone().into_checked_udiv(rhs).unwrap())
}
}
IntSDiv => {
// FIXME: Division for bitvectors larger than 8 bytes is not yet implemented in the `apint` crate (version 0.2).
if self.width().to_usize() > 64 {
Err(anyhow!("Multiplication and division of integers larger than 8 bytes not yet implemented."))
} else {
Ok(self.clone().into_checked_sdiv(rhs).unwrap())
}
}
IntRem => Ok(self.clone().into_checked_urem(rhs).unwrap()),
IntSRem => Ok(self.clone().into_checked_srem(rhs).unwrap()),
IntLeft => {
let shift_amount = rhs.try_to_u64().unwrap() as usize;
if shift_amount < self.width().to_usize() {
Ok(self.clone().into_checked_shl(shift_amount).unwrap())
} else {
Ok(Bitvector::zero(self.width()))
}
}
IntRight => {
let shift_amount = rhs.try_to_u64().unwrap() as usize;
if shift_amount < self.width().to_usize() {
Ok(self.clone().into_checked_lshr(shift_amount).unwrap())
} else {
Ok(Bitvector::zero(self.width()))
}
}
IntSRight => {
let shift_amount = rhs.try_to_u64().unwrap() as usize;
if shift_amount < self.width().to_usize() {
Ok(self.clone().into_checked_ashr(shift_amount).unwrap())
} else {
let signed_bitvec = apint::Int::from(self.clone());
if signed_bitvec.is_negative() {
let minus_one =
Bitvector::zero(self.width()) - &Bitvector::one(self.width());
Ok(minus_one)
} else {
Ok(Bitvector::zero(self.width()))
}
}
}
IntAnd | BoolAnd => Ok(self & rhs),
IntOr | BoolOr => Ok(self | rhs),
IntXOr | BoolXOr => Ok(self ^ rhs),
IntEqual => {
assert_eq!(self.width(), rhs.width());
Ok(Bitvector::from((self == rhs) as u8))
}
IntNotEqual => {
assert_eq!(self.width(), rhs.width());
Ok(Bitvector::from((self != rhs) as u8))
}
IntLess => Ok(Bitvector::from(self.checked_ult(rhs).unwrap() as u8)),
IntLessEqual => Ok(Bitvector::from(self.checked_ule(rhs).unwrap() as u8)),
IntSLess => Ok(Bitvector::from(self.checked_slt(rhs).unwrap() as u8)),
IntSLessEqual => Ok(Bitvector::from(self.checked_sle(rhs).unwrap() as u8)),
FloatEqual | FloatNotEqual | FloatLess | FloatLessEqual => {
// TODO: Implement floating point comparison operators!
Err(anyhow!("Float operations not yet implemented"))
}
FloatAdd | FloatSub | FloatMult | FloatDiv => {
// TODO: Implement floating point arithmetic operators!
Err(anyhow!("Float operations not yet implemented"))
}
}
}
/// Returns `true` if adding `self` to `rhs` would result in a signed integer overflow or underflow.
fn signed_add_overflow_check(&self, rhs: &Self) -> bool {
let result = self.clone().into_checked_add(rhs).unwrap();
if rhs.sign_bit().to_bool() {
self.checked_sle(&result).unwrap()
} else {
self.checked_sgt(&result).unwrap()
}
}
/// Returns `true` if subtracting `rhs` from `self` would result in a signed integer overflow or underflow.
fn signed_sub_overflow_check(&self, rhs: &Self) -> bool {
let result = self.clone().into_checked_sub(rhs).unwrap();
if rhs.sign_bit().to_bool() {
self.checked_sge(&result).unwrap()
} else {
self.checked_slt(&result).unwrap()
}
}
/// Return the result of multiplying `self` with `rhs`
/// and a flag that is set to `true` if the multiplication resulted in a signed integer overflow or underflow.
///
/// Returns an error for bitvectors larger than 8 bytes,
/// since multiplication for them is not yet implemented in the [`apint`] crate.
fn signed_mult_with_overflow_flag(&self, rhs: &Self) -> Result<(Self, bool), Error> {
if self.is_zero() {
Ok((Bitvector::zero(self.width()), false))
} else if self.width().to_usize() > 64 {
// FIXME: Multiplication for bitvectors larger than 8 bytes is not yet implemented in the `apint` crate (version 0.2).
Err(anyhow!(
"Multiplication and division of integers larger than 8 bytes not yet implemented."
))
} else {
let result = self.clone().into_checked_mul(rhs).unwrap();
if result.clone().into_checked_sdiv(self).unwrap() != *rhs {
Ok((result, true))
} else {
Ok((result, false))
}
}
}
}
......@@ -10,6 +10,8 @@
use crate::prelude::*;
use derive_more::*;
mod bitvector;
pub use bitvector::*;
mod variable;
pub use variable::*;
mod expression;
......@@ -17,13 +19,6 @@ pub use expression::*;
mod term;
pub use term::*;
/// A bitvector is a fixed-length vector of bits
/// with the semantics of a CPU register,
/// i.e. it supports two's complement modulo arithmetic.
///
/// Bitvector is just an alias for the [`apint::ApInt`] type.
pub type Bitvector = apint::ApInt;
/// An unsigned number of bytes.
///
/// Used to represent sizes of values in registers or in memory.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment