Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
C
cwe_checker
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
fact-depend
cwe_checker
Commits
80455c63
Unverified
Commit
80455c63
authored
Jul 05, 2021
by
Enkelmann
Committed by
GitHub
Jul 05, 2021
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
add strides to the interval domain (#189)
parent
1c369052
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
773 additions
and
89 deletions
+773
-89
Cargo.toml
src/cwe_checker_lib/Cargo.toml
+1
-0
bitvector.rs
src/cwe_checker_lib/src/abstract_domain/bitvector.rs
+1
-1
interval.rs
src/cwe_checker_lib/src/abstract_domain/interval.rs
+115
-34
bin_ops.rs
src/cwe_checker_lib/src/abstract_domain/interval/bin_ops.rs
+4
-8
simple_interval.rs
...ecker_lib/src/abstract_domain/interval/simple_interval.rs
+334
-33
tests.rs
...lib/src/abstract_domain/interval/simple_interval/tests.rs
+219
-0
tests.rs
src/cwe_checker_lib/src/abstract_domain/interval/tests.rs
+70
-3
object.rs
src/cwe_checker_lib/src/analysis/pointer_inference/object.rs
+1
-1
object_list.rs
...checker_lib/src/analysis/pointer_inference/object_list.rs
+1
-1
bitvector.rs
..._checker_lib/src/intermediate_representation/bitvector.rs
+24
-0
term.rs
src/cwe_checker_lib/src/pcode/term.rs
+2
-3
symbol_utils.rs
src/cwe_checker_lib/src/utils/symbol_utils.rs
+1
-5
No files found.
src/cwe_checker_lib/Cargo.toml
View file @
80455c63
...
...
@@ -17,6 +17,7 @@ crossbeam-channel = "0.4"
derive_more
=
"0.99"
directories
=
"3.0"
goblin
=
"0.2"
gcd
=
"2.0"
[lib]
name
=
"cwe_checker_lib"
src/cwe_checker_lib/src/abstract_domain/bitvector.rs
View file @
80455c63
...
...
@@ -159,7 +159,7 @@ impl TryToInterval for BitvectorDomain {
/// If the domain represents an absolute value, return it as an interval of length one.
fn
try_to_interval
(
&
self
)
->
Result
<
Interval
,
Error
>
{
match
self
{
BitvectorDomain
::
Value
(
val
)
=>
Ok
(
Interval
::
new
(
val
.clone
(),
val
.clone
()
)),
BitvectorDomain
::
Value
(
val
)
=>
Ok
(
val
.clone
()
.into
(
)),
BitvectorDomain
::
Top
(
_
)
=>
Err
(
anyhow!
(
"Value is Top"
)),
}
}
...
...
src/cwe_checker_lib/src/abstract_domain/interval.rs
View file @
80455c63
...
...
@@ -11,13 +11,18 @@ pub use simple_interval::*;
mod
bin_ops
;
/// An abstract domain representing values in an interval range.
/// An abstract domain representing values in an interval range
with strides and widening hints
.
///
/// The interval bounds are signed integers,
/// i.e. the domain looses precision if tasked to represent large unsigned integers.
/// The interval has a stride,
/// i.e. all values represented by the interval are contained in the same residue class modulo the stride
/// as the interval bounds.
///
/// The domain also contains widening hints to faciliate fast and exact widening for simple loop counter variables.
/// See the [`IntervalDomain::signed_merge_and_widen`] method for details on the widening strategy.
/// Note that the widening hints may not respect the stride,
/// i.e. they may be contained in different residue classes than the interval bounds.
#[derive(Serialize,
Deserialize,
Debug,
PartialEq,
Eq,
Hash,
Clone)]
pub
struct
IntervalDomain
{
/// The underlying interval.
...
...
@@ -47,10 +52,10 @@ impl IntervalDomain {
/// Create a new interval domain with the given bounds.
///
/// Both `start` and `end` are inclusive, i.e. contained in the interval.
/// The widening hints are set to `None`.
/// The widening hints are set to `None`
and the stride is set to 1 if `start != end`
.
pub
fn
new
(
start
:
Bitvector
,
end
:
Bitvector
)
->
Self
{
IntervalDomain
{
interval
:
Interval
::
new
(
start
,
end
),
interval
:
Interval
::
new
(
start
,
end
,
1
),
widening_upper_bound
:
None
,
widening_lower_bound
:
None
,
widening_delay
:
0
,
...
...
@@ -68,13 +73,17 @@ impl IntervalDomain {
/// Otherwise keep the old lower bound.
pub
fn
update_widening_lower_bound
(
&
mut
self
,
bound
:
&
Option
<
Bitvector
>
)
{
if
let
Some
(
bound_value
)
=
bound
{
let
bound_value
=
match
bound_value
.clone
()
.round_up_to_stride_of
(
&
self
.interval
)
{
Some
(
bound
)
=>
bound
,
None
=>
return
,
};
if
bound_value
.checked_slt
(
&
self
.interval.start
)
.unwrap
()
{
if
let
Some
(
ref
previous_bound
)
=
self
.widening_lower_bound
{
if
bound_value
.checked_sgt
(
previous_bound
)
.unwrap
()
{
self
.widening_lower_bound
=
Some
(
bound_value
.clone
()
);
self
.widening_lower_bound
=
Some
(
bound_value
);
}
}
else
{
self
.widening_lower_bound
=
Some
(
bound_value
.clone
()
);
self
.widening_lower_bound
=
Some
(
bound_value
);
}
}
}
...
...
@@ -85,13 +94,17 @@ impl IntervalDomain {
/// Otherwise keep the old upper bound.
pub
fn
update_widening_upper_bound
(
&
mut
self
,
bound
:
&
Option
<
Bitvector
>
)
{
if
let
Some
(
bound_value
)
=
bound
{
let
bound_value
=
match
bound_value
.clone
()
.round_down_to_stride_of
(
&
self
.interval
)
{
Some
(
bound
)
=>
bound
,
None
=>
return
,
};
if
bound_value
.checked_sgt
(
&
self
.interval.end
)
.unwrap
()
{
if
let
Some
(
ref
previous_bound
)
=
self
.widening_upper_bound
{
if
bound_value
.checked_slt
(
previous_bound
)
.unwrap
()
{
self
.widening_upper_bound
=
Some
(
bound_value
.clone
()
);
self
.widening_upper_bound
=
Some
(
bound_value
);
}
}
else
{
self
.widening_upper_bound
=
Some
(
bound_value
.clone
()
);
self
.widening_upper_bound
=
Some
(
bound_value
);
}
}
}
...
...
@@ -125,7 +138,8 @@ impl IntervalDomain {
/// ### When to widen
///
/// If the merged interval equals one of the input intervals as value sets, do not perform widening.
/// Else widening is performed if and only if the length of the interval is greater than `widening_delay + 2`.
/// Else widening is performed if and only if
/// the length of the interval is greater than the widening delay plus the stride of the interval.
///
/// ### How to widen
///
...
...
@@ -137,16 +151,24 @@ impl IntervalDomain {
/// After that the `widening_delay` is set to the length of the resulting interval.
pub
fn
signed_merge_and_widen
(
&
self
,
other
:
&
IntervalDomain
)
->
IntervalDomain
{
let
mut
merged_domain
=
self
.signed_merge
(
other
);
if
merged_domain
.equal_as_value_sets
(
self
)
||
merged_domain
.equal_as_value_sets
(
other
)
{
// Do not widen if the value set itself is already contained in either `self` or `other`.
if
merged_domain
.equal_as_value_sets
(
self
)
||
merged_domain
.equal_as_value_sets
(
other
)
||
merged_domain
.is_top
()
{
// Do not widen if the value set itself is already contained in either `self` or `other`
// or if the domain is already unconstrained.
return
merged_domain
;
}
if
let
Ok
(
length
)
=
merged_domain
.interval
.length
()
.try_to_u64
()
{
if
length
<=
merged_domain
.widening_delay
+
2
{
// Do not widen for already unconstrained intervals (case length() returning zero)
// or if the interval length is not larger than `widening_delay + 2`.
// FIXME: `widening_delay + 2` may overflow.
// But such a large delay is probably incorrect anyway, so this should not cause unnecessary widenings.
if
let
Ok
(
length
)
=
(
merged_domain
.interval.end
.clone
()
-
&
merged_domain
.interval.start
)
.try_to_u64
()
{
let
widening_threshold
=
std
::
cmp
::
max
(
merged_domain
.widening_delay
+
1
,
merged_domain
.widening_delay
+
merged_domain
.interval.stride
,
);
if
length
<=
widening_threshold
{
// Do not widen below the widening threshold.
// NOTE: The widening threshold may overflow. In this case we do perform widening.
return
merged_domain
;
}
}
...
...
@@ -156,18 +178,22 @@ impl IntervalDomain {
{
// widen to the lower bound
merged_domain
.interval.start
=
merged_domain
.widening_lower_bound
.unwrap
();
merged_domain
.interval
.adjust_start_to_value_in_stride
();
merged_domain
.widening_lower_bound
=
None
;
has_been_widened
=
true
;
}
if
self
.interval.end
!=
other
.interval.end
&&
merged_domain
.widening_upper_bound
.is_some
()
{
// widen to the upper bound
merged_domain
.interval.end
=
merged_domain
.widening_upper_bound
.unwrap
();
merged_domain
.interval
.adjust_end_to_value_in_stride
();
merged_domain
.widening_upper_bound
=
None
;
has_been_widened
=
true
;
}
if
has_been_widened
{
merged_domain
.widening_delay
=
merged_domain
.interval
.length
()
.try_to_u64
()
.unwrap_or
(
0
);
merged_domain
.widening_delay
=
(
merged_domain
.interval.end
.clone
()
-
&
merged_domain
.interval.start
)
.try_to_u64
()
.unwrap_or
(
0
);
merged_domain
}
else
{
// No widening bounds could be used for widening, so we have to widen to the `Top` value.
...
...
@@ -213,6 +239,7 @@ impl IntervalDomain {
interval
:
Interval
{
start
:
self
.interval.start
.clone
()
.into_sign_extend
(
width
)
.unwrap
(),
end
:
self
.interval.end
.clone
()
.into_sign_extend
(
width
)
.unwrap
(),
stride
:
self
.interval.stride
,
},
widening_lower_bound
:
self
.widening_lower_bound
...
...
@@ -236,7 +263,10 @@ impl IntervalDomain {
intersected_domain
.widening_delay
=
std
::
cmp
::
max
(
self
.widening_delay
,
other
.widening_delay
);
if
let
Ok
(
interval_length
)
=
intersected_domain
.interval
.length
()
.try_to_u64
()
{
if
let
Ok
(
interval_length
)
=
(
intersected_domain
.interval.end
.clone
()
-
&
intersected_domain
.interval.start
)
.try_to_u64
()
{
intersected_domain
.widening_delay
=
std
::
cmp
::
min
(
intersected_domain
.widening_delay
,
interval_length
);
}
...
...
@@ -370,22 +400,27 @@ impl IntervalDomain {
impl
SpecializeByConditional
for
IntervalDomain
{
fn
add_signed_less_equal_bound
(
mut
self
,
bound
:
&
Bitvector
)
->
Result
<
Self
,
Error
>
{
let
bound
=
match
bound
.clone
()
.round_down_to_stride_of
(
&
self
.interval
)
{
Some
(
bound
)
=>
bound
,
None
=>
return
Err
(
anyhow!
(
"Empty interval"
)),
};
if
let
Some
(
old_upper_bound
)
=
&
self
.widening_upper_bound
{
if
old_upper_bound
.checked_sle
(
bound
)
.unwrap
()
{
if
old_upper_bound
.checked_sle
(
&
bound
)
.unwrap
()
{
return
Ok
(
self
);
}
else
if
self
.interval.end
.checked_slt
(
bound
)
.unwrap
()
{
self
.widening_upper_bound
=
Some
(
bound
.clone
()
);
}
else
if
self
.interval.end
.checked_slt
(
&
bound
)
.unwrap
()
{
self
.widening_upper_bound
=
Some
(
bound
);
return
Ok
(
self
);
}
else
{
self
.widening_upper_bound
=
None
;
}
}
else
if
self
.interval.end
.checked_slt
(
bound
)
.unwrap
()
{
self
.widening_upper_bound
=
Some
(
bound
.clone
()
);
}
else
if
self
.interval.end
.checked_slt
(
&
bound
)
.unwrap
()
{
self
.widening_upper_bound
=
Some
(
bound
);
return
Ok
(
self
);
}
// we already know that the bound is less equal to `self.interval.end`
if
self
.interval.start
.checked_sle
(
bound
)
.unwrap
()
{
self
.interval.end
=
bound
.clone
();
if
self
.interval.start
.checked_sle
(
&
bound
)
.unwrap
()
{
self
.interval.end
=
bound
;
self
.interval
.adjust_end_to_value_in_stride
();
Ok
(
self
)
}
else
{
Err
(
anyhow!
(
"Empty interval"
))
...
...
@@ -393,22 +428,27 @@ impl SpecializeByConditional for IntervalDomain {
}
fn
add_signed_greater_equal_bound
(
mut
self
,
bound
:
&
Bitvector
)
->
Result
<
Self
,
Error
>
{
let
bound
=
match
bound
.clone
()
.round_up_to_stride_of
(
&
self
.interval
)
{
Some
(
bound
)
=>
bound
,
None
=>
return
Err
(
anyhow!
(
"Empty interval"
)),
};
if
let
Some
(
old_lower_bound
)
=
&
self
.widening_lower_bound
{
if
old_lower_bound
.checked_sge
(
bound
)
.unwrap
()
{
if
old_lower_bound
.checked_sge
(
&
bound
)
.unwrap
()
{
return
Ok
(
self
);
}
else
if
self
.interval.start
.checked_sgt
(
bound
)
.unwrap
()
{
self
.widening_lower_bound
=
Some
(
bound
.clone
()
);
}
else
if
self
.interval.start
.checked_sgt
(
&
bound
)
.unwrap
()
{
self
.widening_lower_bound
=
Some
(
bound
);
return
Ok
(
self
);
}
else
{
self
.widening_lower_bound
=
None
;
}
}
else
if
self
.interval.start
.checked_sgt
(
bound
)
.unwrap
()
{
self
.widening_lower_bound
=
Some
(
bound
.clone
()
);
}
else
if
self
.interval.start
.checked_sgt
(
&
bound
)
.unwrap
()
{
self
.widening_lower_bound
=
Some
(
bound
);
return
Ok
(
self
);
}
// we already know that the bound is greater equal to `self.interval.start`
if
self
.interval.end
.checked_sge
(
bound
)
.unwrap
()
{
self
.interval.start
=
bound
.clone
();
if
self
.interval.end
.checked_sge
(
&
bound
)
.unwrap
()
{
self
.interval.start
=
bound
;
self
.interval
.adjust_start_to_value_in_stride
();
Ok
(
self
)
}
else
{
Err
(
anyhow!
(
"Empty interval"
))
...
...
@@ -451,11 +491,13 @@ impl SpecializeByConditional for IntervalDomain {
self
.add_signed_greater_equal_bound
(
&
(
bound
+
&
Bitvector
::
one
(
bound
.width
())))
}
else
if
self
.interval.start
==
*
bound
{
self
.interval.start
+=
&
Bitvector
::
one
(
bound
.width
());
self
.interval
.adjust_start_to_value_in_stride
();
Ok
(
self
)
}
else
if
self
.interval.end
.checked_slt
(
bound
)
.unwrap
()
{
self
.add_signed_less_equal_bound
(
&
(
bound
-
&
Bitvector
::
one
(
bound
.width
())))
}
else
if
self
.interval.end
==
*
bound
{
self
.interval.end
-
=
&
Bitvector
::
one
(
bound
.width
());
self
.interval
.adjust_end_to_value_in_stride
();
Ok
(
self
)
}
else
{
Ok
(
self
)
...
...
@@ -488,6 +530,7 @@ impl SizedDomain for IntervalDomain {
interval
:
Interval
{
start
:
Bitvector
::
signed_min_value
(
bytesize
.into
()),
end
:
Bitvector
::
signed_max_value
(
bytesize
.into
()),
stride
:
1
,
},
widening_lower_bound
:
None
,
widening_upper_bound
:
None
,
...
...
@@ -707,8 +750,9 @@ impl Display for IntervalDomain {
let
end_int
=
apint
::
Int
::
from
(
self
.interval.end
.clone
());
write!
(
f
,
"[0x{:016x}, 0x{:016x}]:i{}"
,
"[0x{:016x},
<stride {:x}>,
0x{:016x}]:i{}"
,
start_int
,
self
.interval.stride
,
end_int
,
self
.bytesize
()
.as_bit_length
()
)
...
...
@@ -716,5 +760,42 @@ impl Display for IntervalDomain {
}
}
/// Trait for adjusting a bitvector to the stride of an interval.
trait
StrideRounding
:
Sized
{
/// Round `self` up to the nearest value that adheres to the stride of `interval`.
fn
round_up_to_stride_of
(
self
,
interval
:
&
Interval
)
->
Option
<
Self
>
;
/// Round `self` down to the nearest value that adheres to the stride of `interval`.
fn
round_down_to_stride_of
(
self
,
interval
:
&
Interval
)
->
Option
<
Self
>
;
}
impl
StrideRounding
for
Bitvector
{
/// Round `self` up to the nearest value that adheres to the stride of `interval`.
/// Returns `None` if rounding would result in an integer overflow.
fn
round_up_to_stride_of
(
self
,
interval
:
&
Interval
)
->
Option
<
Self
>
{
if
interval
.stride
==
0
||
interval
.bytesize
()
>
ByteSize
::
new
(
8
)
{
return
Some
(
self
);
}
let
diff
=
interval
.start
.try_to_i128
()
.unwrap
()
-
self
.try_to_i128
()
.unwrap
();
let
diff
=
diff
%
interval
.stride
as
i128
;
let
diff
=
(
diff
+
interval
.stride
as
i128
)
%
interval
.stride
as
i128
;
let
diff
=
Bitvector
::
from_u64
(
diff
as
u64
)
.into_resize_unsigned
(
interval
.bytesize
());
self
.signed_add_overflow_checked
(
&
diff
)
}
/// Round `self` down to the nearest value that adheres to the stride of `interval`.
/// Returns `None` if rounding would result in an integer overflow.
fn
round_down_to_stride_of
(
self
,
interval
:
&
Interval
)
->
Option
<
Self
>
{
if
interval
.stride
==
0
||
interval
.bytesize
()
>
ByteSize
::
new
(
8
)
{
return
Some
(
self
);
}
let
diff
=
self
.try_to_i128
()
.unwrap
()
-
interval
.end
.try_to_i128
()
.unwrap
();
let
diff
=
diff
%
interval
.stride
as
i128
;
let
diff
=
(
diff
+
interval
.stride
as
i128
)
%
interval
.stride
as
i128
;
let
diff
=
Bitvector
::
from_u64
(
diff
as
u64
)
.into_resize_unsigned
(
interval
.bytesize
());
self
.signed_sub_overflow_checked
(
&
diff
)
}
}
#[cfg(test)]
mod
tests
;
src/cwe_checker_lib/src/abstract_domain/interval/bin_ops.rs
View file @
80455c63
...
...
@@ -5,9 +5,7 @@ impl IntervalDomain {
/// if one adds a value from `self` to a value from `rhs`.
pub
fn
add
(
&
self
,
rhs
:
&
Self
)
->
Self
{
let
mut
interval
:
IntervalDomain
=
self
.interval
.add
(
&
rhs
.interval
)
.into
();
if
interval
.is_top
()
{
interval
}
else
{
if
!
interval
.is_top
()
{
interval
.widening_delay
=
std
::
cmp
::
max
(
self
.widening_delay
,
rhs
.widening_delay
);
interval
.update_widening_lower_bound
(
&
self
...
...
@@ -31,17 +29,15 @@ impl IntervalDomain {
.as_ref
()
.and_then
(|
bound
|
bound
.signed_add_overflow_checked
(
&
self
.interval.end
)),
);
interval
}
interval
}
/// Compute the interval of possible results
/// if one subtracts a value in `rhs` from a value in `self`.
pub
fn
sub
(
&
self
,
rhs
:
&
Self
)
->
Self
{
let
mut
interval
:
IntervalDomain
=
self
.interval
.sub
(
&
rhs
.interval
)
.into
();
if
interval
.is_top
()
{
interval
}
else
{
if
!
interval
.is_top
()
{
interval
.widening_delay
=
std
::
cmp
::
max
(
self
.widening_delay
,
rhs
.widening_delay
);
interval
.update_widening_lower_bound
(
&
self
...
...
@@ -65,8 +61,8 @@ impl IntervalDomain {
.as_ref
()
.and_then
(|
bound
|
self
.interval.end
.signed_sub_overflow_checked
(
bound
)),
);
interval
}
interval
}
/// Compute the interval of possible results
...
...
src/cwe_checker_lib/src/abstract_domain/interval/simple_interval.rs
View file @
80455c63
use
crate
::
intermediate_representation
::
*
;
use
crate
::
prelude
::
*
;
use
gcd
::
Gcd
;
/// A
n
interval of values with a fixed byte size.
/// A
strided
interval of values with a fixed byte size.
///
/// The interval bounds are interpreted as signed integers,
/// i.e. `self.start` is not allowed to be greater than `self.end`
/// as signed integers.
///
/// The values represented by the interval are `start, start + stride, start + 2*stride, ... , end`.
/// The following invariants have to hold for a correct interval instance:
/// - `end - start % stride == 0`
/// - if `start == end`, then the stride should always be set to zero.
#[derive(Serialize,
Deserialize,
Debug,
PartialEq,
Eq,
Hash,
Clone)]
pub
struct
Interval
{
/// The start of the interval. The bound is included in the represented interval.
pub
start
:
Bitvector
,
/// The end of the interval. The bound is included in the represented interval.
pub
end
:
Bitvector
,
/// The stride.
pub
stride
:
u64
,
}
impl
Interval
{
...
...
@@ -19,9 +27,15 @@ impl Interval {
///
/// Both `start` and `end` of the interval are inclusive,
/// i.e. contained in the represented interval.
pub
fn
new
(
start
:
Bitvector
,
end
:
Bitvector
)
->
Interval
{
///
/// The function automatically rounds down `end` (if necessary)
/// so that it is contained in the same residue class as the start value modulo the stride.
/// If the stride is 0 then `end` will be set to `start`.
pub
fn
new
(
start
:
Bitvector
,
end
:
Bitvector
,
stride
:
u64
)
->
Interval
{
assert_eq!
(
start
.width
(),
end
.width
());
Interval
{
start
,
end
}
let
mut
interval
=
Interval
{
start
,
end
,
stride
};
interval
.adjust_end_to_value_in_stride
();
interval
}
/// Construct a new unconstrained interval.
...
...
@@ -29,12 +43,13 @@ impl Interval {
Interval
{
start
:
Bitvector
::
signed_min_value
(
bytesize
.into
()),
end
:
Bitvector
::
signed_max_value
(
bytesize
.into
()),
stride
:
1
,
}
}
/// Returns true if all values representable by bitvectors of the corresponding length are contained in the interval.
pub
fn
is_top
(
&
self
)
->
bool
{
(
self
.start
.clone
()
-
&
Bitvector
::
one
(
self
.start
.width
()))
==
self
.end
(
self
.start
.clone
()
-
&
Bitvector
::
one
(
self
.start
.width
()))
==
self
.end
&&
self
.stride
==
1
}
/// Get the size in bytes of values contained in the interval.
...
...
@@ -52,7 +67,17 @@ impl Interval {
}
let
start
=
signed_min
(
&
self
.start
,
&
other
.start
);
let
end
=
signed_max
(
&
self
.end
,
&
other
.end
);
Interval
{
start
,
end
}
let
start_diff
=
if
self
.start
.checked_sgt
(
&
other
.start
)
.unwrap
()
{
self
.start
.clone
()
-
&
other
.start
}
else
{
other
.start
.clone
()
-
&
self
.start
};
let
stride
=
if
let
Ok
(
start_diff
)
=
start_diff
.try_to_u64
()
{
self
.stride
.gcd
(
other
.stride
)
.gcd
(
start_diff
)
}
else
{
1
};
Interval
{
start
,
end
,
stride
}
}
/// Compute the intersection of two intervals as intervals of signed integers.
...
...
@@ -60,18 +85,123 @@ impl Interval {
pub
fn
signed_intersect
(
&
self
,
other
:
&
Interval
)
->
Result
<
Interval
,
Error
>
{
let
start
=
signed_max
(
&
self
.start
,
&
other
.start
);
let
end
=
signed_min
(
&
self
.end
,
&
other
.end
);
if
start
.checked_sle
(
&
end
)
.unwrap
()
{
Ok
(
Interval
{
start
,
end
})
if
self
.stride
==
0
&&
other
.stride
==
0
{
if
start
==
end
{
return
Ok
(
Interval
{
start
,
end
,
stride
:
0
,
});
}
else
{
return
Err
(
anyhow!
(
"Empty interval"
));
}
}
if
self
.start
.bytesize
()
>
ByteSize
::
new
(
8
)
{
// We ignore the stride for bytesizes larger than 8 bytes
let
stride
=
!
(
start
==
end
)
as
u64
;
// 0 if start == end, else 1
if
start
.checked_sle
(
&
end
)
.unwrap
()
{
return
Ok
(
Interval
{
start
,
end
,
stride
});
}
else
{
return
Err
(
anyhow!
(
"Empty interval"
));
}
}
if
let
Some
((
stride
,
remainder
))
=
compute_intersection_residue_class
(
self
,
other
)
?
{
Interval
{
start
,
end
,
stride
}
.adjust_to_stride_and_remainder
(
stride
,
remainder
)
}
else
{
Err
(
anyhow!
(
"Empty interval"
))
}
}
/// Return the number of contained values of the interval as an unsigned bitvector.
/// If the interval is unconstrained, return zero
/// (since the maximal number of elements is not representable in a bitvector of the same byte size).
pub
fn
length
(
&
self
)
->
Bitvector
{
self
.end
.clone
()
-
&
self
.start
+
&
Bitvector
::
one
(
self
.start
.width
())
/// If `self.start == self.end` set the stride to 0 and to 1 if `self.start < self.end`.
fn
set_stride_to_unknown
(
&
mut
self
)
{
if
self
.start
==
self
.end
{
self
.stride
=
0
;
}
else
{
self
.stride
=
1
;
}
}
/// Round down `self.end` to the nearest value such that `self.end - self.start` is again divisible by the stride.
/// If afterwards `self.start == self.end` holds then set the stride to 0.
pub
fn
adjust_end_to_value_in_stride
(
&
mut
self
)
{
if
self
.stride
==
0
{
self
.end
=
self
.start
.clone
();
return
;
}
if
self
.stride
==
1
&&
self
.start
!=
self
.end
{
return
;
}
if
let
(
Ok
(
start
),
Ok
(
end
))
=
(
self
.start
.try_to_i64
(),
self
.end
.try_to_i64
())
{
let
diff
=
(
end
-
start
)
as
u64
%
self
.stride
;
let
diff
=
Bitvector
::
from_u64
(
diff
)
.into_resize_unsigned
(
self
.end
.bytesize
());
self
.end
.checked_sub_assign
(
&
diff
)
.unwrap
();
if
self
.start
==
self
.end
{
self
.stride
=
0
;
}
}
else
{
self
.set_stride_to_unknown
();
}
}
/// Round up `self.start` to the nearest value such that `self.end - self.start` is again divisible by the stride.
/// If afterwards `self.start == self.end` holds then set the stride to 0.
pub
fn
adjust_start_to_value_in_stride
(
&
mut
self
)
{
if
self
.stride
==
0
{
self
.start
=
self
.end
.clone
();
return
;
}
if
self
.stride
==
1
&&
self
.start
!=
self
.end
{
return
;
}
if
let
(
Ok
(
start
),
Ok
(
end
))
=
(
self
.start
.try_to_i64
(),
self
.end
.try_to_i64
())
{
let
diff
=
(
end
-
start
)
as
u64
%
self
.stride
;
let
diff
=
Bitvector
::
from_u64
(
diff
)
.into_resize_unsigned
(
self
.end
.bytesize
());
self
.start
.checked_add_assign
(
&
diff
)
.unwrap
();
if
self
.start
==
self
.end
{
self
.stride
=
0
;
}
}
else
{
self
.set_stride_to_unknown
();
}
}
/// Change the given interval such that it only contains values with the given remainder modulo the given stride.
/// This may round up the start of the interval and may round down the end of the interval.
/// If the resulting interval is empty then an error is returned.
/// This function ignores and replaces the previous stride of the interval.
///
/// For intervals with bytesize greater than 8 this function just returns the unmodified interval.
pub
fn
adjust_to_stride_and_remainder
(
self
,
stride
:
u64
,
remainder
:
u64
,
)
->
Result
<
Self
,
Error
>
{
if
self
.bytesize
()
>
ByteSize
::
new
(
8
)
{
return
Ok
(
self
);
}
let
(
mut
start
,
mut
end
)
=
(
self
.start
.try_to_i128
()
.unwrap
(),
self
.end
.try_to_i128
()
.unwrap
(),
);
let
diff
=
(
remainder
as
i128
-
start
)
%
stride
as
i128
;
let
diff
=
(
diff
+
stride
as
i128
)
%
stride
as
i128
;
start
+=
diff
;
let
diff
=
(
end
-
remainder
as
i128
)
%
stride
as
i128
;
let
diff
=
(
diff
+
stride
as
i128
)
%
stride
as
i128
;
end
-
=
diff
;
if
start
>
i64
::
MAX
as
i128
||
end
<
i64
::
MIN
as
i128
||
start
>
end
{
return
Err
(
anyhow!
(
"Empty interval"
));
}
let
start
=
Bitvector
::
from_i64
(
start
as
i64
)
.into_truncate
(
self
.start
.bytesize
())
.unwrap
();
let
end
=
Bitvector
::
from_i64
(
end
as
i64
)
.into_truncate
(
self
.end
.bytesize
())
.unwrap
();
let
stride
=
if
start
==
end
{
0
}
else
{
stride
};
Ok
(
Interval
{
start
,
end
,
stride
})
}
/// Compute the interval represented if the byte size of the value is zero-extended.
...
...
@@ -85,14 +215,30 @@ impl Interval {
Interval
{
start
:
self
.start
.into_zero_extend
(
width
)
.unwrap
(),
end
:
self
.end
.into_zero_extend
(
width
)
.unwrap
(),
stride
:
self
.stride
,
}
}
else
{
// The interval either contains both -1 and 0 or wraps around
Interval
{
start
:
Bitvector
::
zero
(
width
.into
()),
end
:
Bitvector
::
unsigned_max_value
(
self
.end
.width
())
.into_zero_extend
(
width
)
.unwrap
(),
if
let
Ok
(
start
)
=
self
.start
.try_to_i128
()
{
let
stride
=
1
<<
self
.stride
.trailing_zeros
();
let
remainder
=
(
start
%
stride
+
stride
)
%
stride
;
Interval
{
start
:
Bitvector
::
zero
(
width
.into
()),
end
:
Bitvector
::
unsigned_max_value
(
self
.end
.width
())
.into_zero_extend
(
width
)
.unwrap
(),
stride
:
stride
as
u64
,
}
.adjust_to_stride_and_remainder
(
stride
as
u64
,
remainder
as
u64
)
.unwrap
()
}
else
{
Interval
{
start
:
Bitvector
::
zero
(
width
.into
()),
end
:
Bitvector
::
unsigned_max_value
(
self
.end
.width
())
.into_zero_extend
(
width
)
.unwrap
(),
stride
:
1
,
}
}
}
}
...
...
@@ -100,29 +246,36 @@ impl Interval {
/// Truncate the bitvectors in the interval
/// by removing the least significant bytes lower than the `low_byte` from them.
pub
fn
subpiece_higher
(
self
,
low_byte
:
ByteSize
)
->
Self
{
let
start
=
self
.start
.subpiece
(
low_byte
,
self
.bytesize
()
-
low_byte
);
let
end
=
self
.end
.subpiece
(
low_byte
,
self
.bytesize
()
-
low_byte
);
let
stride
=
!
(
start
==
end
)
as
u64
;
Interval
{
start
:
self
.start
.subpiece
(
low_byte
,
self
.bytesize
()
-
low_byte
),
end
:
self
.end
.subpiece
(
low_byte
,
self
.bytesize
()
-
low_byte
),
stride
,
}
}
/// Truncate the bitvectors in the interval to `size`,
/// i.e. the most significant bytes (higher than `size`) are removed from all values.
pub
fn
subpiece_lower
(
self
,
size
:
ByteSize
)
->
Self
{
let
length
=
self
.length
();
if
!
length
.is_zero
()
&&
length
.checked_ule
(
&
Bitvector
::
unsigned_max_value
(
size
.into
())
.into_zero_extend
(
self
.bytesize
())
.unwrap
(),
)
.unwrap
()
let
length
=
self
.end
.clone
()
-
&
self
.start
;
if
length
.checked_ule
(
&
Bitvector
::
unsigned_max_value
(
size
.into
())
.into_zero_extend
(
self
.bytesize
())
.unwrap
(),
)
.unwrap
()
{
let
start
=
self
.start
.into_truncate
(
size
)
.unwrap
();
let
end
=
self
.end
.into_truncate
(
size
)
.unwrap
();
if
start
.checked_sle
(
&
end
)
.unwrap
()
{
return
Interval
{
start
,
end
};
return
Interval
{
start
,
end
,
stride
:
self
.stride
,
};
}
}
Self
::
new_top
(
size
)
...
...
@@ -143,8 +296,8 @@ impl Interval {
/// and `other` contains the least significant bytes of the resulting values.
pub
fn
piece
(
&
self
,
other
:
&
Interval
)
->
Self
{
if
other
.start
.sign_bit
()
.to_bool
()
&&
!
other
.end
.sign_bit
()
.to_bool
()
{
// The `other` interval contains both
-1 and 0
.
Interval
{
// The `other` interval contains both
negative and positive values
.
let
interval
=
Interval
{
start
:
self
.start
.bin_op
(
BinOpType
::
Piece
,
&
Bitvector
::
zero
(
other
.start
.width
()))
...
...
@@ -153,11 +306,28 @@ impl Interval {
.end
.bin_op
(
BinOpType
::
Piece
,
&
(
-
Bitvector
::
one
(
other
.end
.width
())))
.unwrap
(),
stride
:
1
,
};
if
other
.bytesize
()
>
ByteSize
::
new
(
8
)
{
interval
}
else
{
let
stride
=
1u64
<<
other
.stride
.trailing_zeros
();
let
remainder
=
other
.start
.try_to_i128
()
.unwrap
()
%
stride
as
i128
;
let
remainder
=
((
remainder
+
stride
as
i128
)
%
stride
as
i128
)
as
u64
;
interval
.adjust_to_stride_and_remainder
(
stride
,
remainder
)
.unwrap
()
}
}
else
{
let
stride
=
match
(
self
.stride
,
other
.stride
)
{
(
0
,
_
)
=>
other
.stride
,
(
_
,
0
)
=>
self
.stride
<<
other
.bytesize
()
.as_bit_length
(),
_
=>
1u64
<<
other
.stride
.trailing_zeros
(),
};
Interval
{
start
:
self
.start
.bin_op
(
BinOpType
::
Piece
,
&
other
.start
)
.unwrap
(),
end
:
self
.end
.bin_op
(
BinOpType
::
Piece
,
&
other
.end
)
.unwrap
(),
stride
,
}
}
}
...
...
@@ -172,6 +342,7 @@ impl Interval {
Interval
{
start
:
-
self
.end
,
end
:
-
self
.start
,
stride
:
self
.stride
,
}
}
else
{
Interval
::
new_top
(
self
.bytesize
())
...
...
@@ -195,7 +366,11 @@ impl Interval {
self
.start
.signed_add_overflow_checked
(
&
rhs
.start
),
self
.end
.signed_add_overflow_checked
(
&
rhs
.end
),
)
{
Interval
{
start
,
end
}
Interval
{
start
,
end
,
stride
:
self
.stride
.gcd
(
rhs
.stride
),
}
}
else
{
Interval
::
new_top
(
self
.bytesize
())
}
...
...
@@ -208,7 +383,11 @@ impl Interval {
self
.start
.signed_sub_overflow_checked
(
&
rhs
.end
),
self
.end
.signed_sub_overflow_checked
(
&
rhs
.start
),
)
{
Interval
{
start
,
end
}
Interval
{
start
,
end
,
stride
:
self
.stride
.gcd
(
rhs
.stride
),
}
}
else
{
Interval
::
new_top
(
self
.bytesize
())
}
...
...
@@ -236,13 +415,23 @@ impl Interval {
Interval
{
start
:
min
,
end
:
max
,
stride
:
self
.stride
.gcd
(
rhs
.stride
),
}
}
/// Return `true` if `bitvec` is contained in the interval.
/// Return `true` if `bitvec` is contained in the
strided
interval.
/// Panics if the interval and `bitvec` have different bytesizes.
pub
fn
contains
(
&
self
,
bitvec
:
&
Bitvector
)
->
bool
{
self
.start
.checked_sle
(
bitvec
)
.unwrap
()
&&
self
.end
.checked_sge
(
bitvec
)
.unwrap
()
if
self
.start
==
*
bitvec
{
return
true
;
}
self
.start
.checked_sle
(
bitvec
)
.unwrap
()
&&
self
.end
.checked_sge
(
bitvec
)
.unwrap
()
&&
{
if
let
Ok
(
diff
)
=
(
bitvec
-
&
self
.start
)
.try_to_u64
()
{
self
.stride
>
0
&&
diff
%
self
.stride
==
0
}
else
{
true
}
}
}
}
...
...
@@ -252,6 +441,7 @@ impl From<Bitvector> for Interval {
Interval
{
start
:
bitvec
.clone
(),
end
:
bitvec
,
stride
:
0
,
}
}
}
...
...
@@ -273,3 +463,114 @@ fn signed_max(v1: &Bitvector, v2: &Bitvector) -> Bitvector {
v2
.clone
()
}
}
/// The extended Euclidean algorithm.
///
/// Returns a triple `(gcd, x, y)` such that `gcd` is the greatest common divisor of `a` and `b`
/// and the following equation holds:
/// ```txt
/// gcd = x*a + y*b
/// ```
fn
extended_gcd
(
a
:
i128
,
b
:
i128
)
->
(
i128
,
i128
,
i128
)
{
if
a
==
0
{
(
b
,
0
,
1
)
}
else
{
let
(
g
,
left_inverse
,
right_inverse
)
=
extended_gcd
(
b
%
a
,
a
);
(
g
,
right_inverse
-
(
b
/
a
)
*
left_inverse
,
left_inverse
)
}
}
/// Compute the stride and the residue class of the intersection of the given intervals using the chinese remainder theorem.
/// The inputs are required to have byte sizes not larger than 8 (= 64bit).
///
/// If the intersection is empty, then `Ok(None)` is returned.
/// If an error occured during the computation (e.g. because of an integer overflow), then an error is returned.
/// Note that this also includes the case where the computed stride is larger than [`u64::MAX`].
fn
compute_intersection_residue_class
(
interval_left
:
&
Interval
,
interval_right
:
&
Interval
,
)
->
Result
<
Option
<
(
u64
,
u64
)
>
,
Error
>
{
match
(
interval_left
.stride
,
interval_right
.stride
)
{
(
0
,
0
)
=>
{
// both intervals contain exactly one value
if
interval_left
.start
==
interval_right
.start
{
return
Ok
(
Some
((
0
,
0
)));
}
else
{
return
Ok
(
None
);
}
}
(
0
,
_
)
=>
{
if
interval_right
.contains
(
&
interval_left
.start
)
{
let
stride
=
interval_right
.stride
as
i128
;
let
remainder
=
interval_right
.start
.try_to_i128
()
?
%
stride
;
let
remainder
=
(
remainder
+
stride
)
%
stride
;
return
Ok
(
Some
((
stride
as
u64
,
remainder
as
u64
)));
}
else
{
return
Ok
(
None
);
}
}
(
_
,
0
)
=>
{
if
interval_left
.contains
(
&
interval_right
.start
)
{
let
stride
=
interval_left
.stride
as
i128
;
let
remainder
=
interval_left
.start
.try_to_i128
()
?
%
stride
;
let
remainder
=
(
remainder
+
stride
)
%
stride
;
return
Ok
(
Some
((
stride
as
u64
,
remainder
as
u64
)));
}
else
{
return
Ok
(
None
);
}
}
_
=>
(),
}
// We compute everything in i128 to reduce the likelihood of integer overflows.
let
(
stride_left
,
stride_right
)
=
(
interval_left
.stride
as
i128
,
interval_right
.stride
as
i128
);
let
(
base_left
,
base_right
)
=
(
interval_left
.start
.try_to_i64
()
.unwrap
()
as
i128
,
interval_right
.start
.try_to_i64
()
.unwrap
()
as
i128
,
);
// The result of the extended euclidean algorithm satisfies
// `gcd = left_inverse * stride_left + right_inverse * stride_right`.
// For us most important is the equation system
// ```
// left_inverse * stride_left = 0 (modulo stride_left)
// left_inverse * stride_left = gcd (modulo stride_right)
// right_inverse * stride_right = gcd (modulo stride_left)
// right_inverse * stride_right = 0 (modulo stride_right)
// ```
let
(
gcd
,
left_inverse
,
right_inverse
)
=
extended_gcd
(
stride_left
,
stride_right
);
if
base_left
as
i128
%
gcd
!=
base_right
as
i128
%
gcd
{
// The residue classes do not intersect, thus the intersection is empty.
Ok
(
None
)
}
else
{
let
lcm
=
(
stride_left
/
gcd
)
*
stride_right
;
// The residue class of the intersection is computed such that the following equations hold:
// ```
// residue_class = base_right (modulo stride_right)
// residue_class = base_left (modulo stride_left)
// ```
// The `% lcm` operations are there to reduce the risk of integer overflows
let
residue_class
=
((
base_right
%
lcm
)
/
gcd
*
(
left_inverse
*
stride_left
))
%
lcm
// = base_right / gcd * gcd (modulo stride_right)
+
((
base_left
%
lcm
)
/
gcd
*
(
right_inverse
*
stride_right
))
%
lcm
// = base_left / gcd * gcd (modulo stride_left)
+
base_left
%
gcd
;
// = base_left % gcd = base_right % gcd
// Ensure that the residue class is not negative
let
residue_class
=
(
residue_class
+
lcm
)
%
lcm
;
// Since we cannot rule out integer overflows for all possible inputs,
// we need to check the correctness of the result.
if
lcm
<=
u64
::
MAX
as
i128
&&
lcm
%
stride_left
==
0
&&
lcm
%
stride_right
==
0
&&
(
base_left
-
residue_class
)
%
stride_left
==
0
&&
(
base_right
-
residue_class
)
%
stride_right
==
0
{
Ok
(
Some
((
lcm
as
u64
,
residue_class
as
u64
)))
}
else
{
Err
(
anyhow!
(
"Integer overflow during chinese remainder theorem computation."
))
}
}
}
#[cfg(test)]
mod
tests
;
src/cwe_checker_lib/src/abstract_domain/interval/simple_interval/tests.rs
0 → 100644
View file @
80455c63
use
super
::
*
;
impl
Interval
{
pub
fn
mock
(
start
:
i64
,
end
:
i64
)
->
Interval
{
Interval
::
new
(
Bitvector
::
from_i64
(
start
),
Bitvector
::
from_i64
(
end
),
1
)
}
pub
fn
mock_i8
(
start
:
i8
,
end
:
i8
)
->
Interval
{
Interval
::
new
(
Bitvector
::
from_i8
(
start
),
Bitvector
::
from_i8
(
end
),
1
)
}
pub
fn
with_stride
(
mut
self
,
stride
:
u64
)
->
Interval
{
self
.stride
=
stride
;
self
}
}
#[test]
fn
signed_merge
()
{
// Strides and Merge
let
a
=
Interval
::
mock
(
1
,
13
)
.with_stride
(
6
);
let
b
=
Interval
::
mock
(
4
,
10
)
.with_stride
(
3
);
assert_eq!
(
a
.signed_merge
(
&
b
),
Interval
::
mock
(
1
,
13
)
.with_stride
(
3
));
let
a
=
Interval
::
mock
(
1
,
13
)
.with_stride
(
6
);
let
b
=
Interval
::
mock
(
3
,
9
)
.with_stride
(
3
);
assert_eq!
(
a
.signed_merge
(
&
b
),
Interval
::
mock
(
1
,
13
)
.with_stride
(
1
));
let
a
=
Interval
::
mock
(
0
,
24
)
.with_stride
(
12
);
let
b
=
Interval
::
mock
(
2
,
42
)
.with_stride
(
4
);
assert_eq!
(
a
.signed_merge
(
&
b
),
Interval
::
mock
(
0
,
42
)
.with_stride
(
2
));
}
#[test]
fn
adjust_start_or_end_to_value_in_stride
()
{
let
mut
val
=
Interval
::
mock
(
-
3
,
10
)
.with_stride
(
7
);
val
.adjust_start_to_value_in_stride
();
assert_eq!
(
val
,
Interval
::
mock
(
3
,
10
)
.with_stride
(
7
));
let
mut
val
=
Interval
::
mock
(
-
3
,
10
)
.with_stride
(
7
);
val
.adjust_end_to_value_in_stride
();
assert_eq!
(
val
,
Interval
::
mock
(
-
3
,
4
)
.with_stride
(
7
));
let
mut
val
=
Interval
::
mock
(
-
3
,
2
)
.with_stride
(
7
);
val
.adjust_start_to_value_in_stride
();
assert_eq!
(
val
,
Interval
::
mock
(
2
,
2
)
.with_stride
(
0
));
let
mut
val
=
Interval
::
mock
(
-
3
,
2
)
.with_stride
(
7
);
val
.adjust_end_to_value_in_stride
();
assert_eq!
(
val
,
Interval
::
mock
(
-
3
,
-
3
)
.with_stride
(
0
));
}
#[test]
fn
adjust_to_stride_and_remainder
()
{
let
val
=
Interval
::
mock
(
-
17
,
23
)
.with_stride
(
5
);
assert_eq!
(
val
.adjust_to_stride_and_remainder
(
4
,
2
)
.unwrap
(),
Interval
::
mock
(
-
14
,
22
)
.with_stride
(
4
)
);
let
val
=
Interval
::
mock
(
7
,
24
);
assert
!
(
val
.adjust_to_stride_and_remainder
(
50
,
5
)
.is_err
());
let
val
=
Interval
::
mock
(
5
,
5
)
.with_stride
(
1
);
assert_eq!
(
val
.adjust_to_stride_and_remainder
(
1
,
0
)
.unwrap
(),
Interval
::
mock
(
5
,
5
)
);
}
#[test]
fn
zero_extend
()
{
// Interval with only non-negative values
let
val
=
Interval
::
mock_i8
(
11
,
51
)
.with_stride
(
10
);
assert_eq!
(
val
.zero_extend
(
ByteSize
::
new
(
8
)),
Interval
::
mock
(
11
,
51
)
.with_stride
(
10
)
);
// Interval with only negative values
let
val
=
Interval
::
mock_i8
(
-
50
,
-
10
)
.with_stride
(
10
);
assert_eq!
(
val
.zero_extend
(
ByteSize
::
new
(
8
)),
Interval
::
mock
(
206
,
246
)
.with_stride
(
10
)
);
// Interval with both positive and negative values
let
val
=
Interval
::
mock_i8
(
-
3
,
21
)
.with_stride
(
12
);
assert_eq!
(
val
.clone
()
.zero_extend
(
ByteSize
::
new
(
1
)),
val
);
assert_eq!
(
val
.zero_extend
(
ByteSize
::
new
(
8
)),
Interval
::
mock
(
1
,
253
)
.with_stride
(
4
)
);
}
#[test]
fn
subpiece_higher
()
{
let
val
=
Interval
::
mock
(
3
,
21
)
.with_stride
(
6
);
assert_eq!
(
val
.subpiece_higher
(
ByteSize
::
new
(
7
)),
Interval
::
from
(
Bitvector
::
from_i8
(
0
))
)
}
#[test]
fn
subpiece_lower
()
{
let
val
=
Interval
::
mock
(
-
15
,
25
)
.with_stride
(
10
);
assert_eq!
(
val
.subpiece_lower
(
ByteSize
::
new
(
1
)),
Interval
::
mock_i8
(
-
15
,
25
)
.with_stride
(
10
)
);
let
val
=
Interval
::
mock
(
-
256
,
25
)
.with_stride
(
10
);
assert_eq!
(
val
.subpiece_lower
(
ByteSize
::
new
(
1
)),
Interval
::
new_top
(
ByteSize
::
new
(
1
))
);
}
#[test]
fn
piece
()
{
let
left
=
Interval
::
mock_i8
(
1
,
4
)
.with_stride
(
3
);
let
right
=
Interval
::
mock_i8
(
-
2
,
2
)
.with_stride
(
2
);
assert_eq!
(
left
.piece
(
&
right
),
Interval
{
start
:
Bitvector
::
from_i16
(
256
),
end
:
Bitvector
::
from_i16
(
1278
),
stride
:
2
,
}
);
let
left
=
Interval
::
mock_i8
(
1
,
4
)
.with_stride
(
3
);
let
right
=
Interval
::
mock_i8
(
3
,
15
)
.with_stride
(
6
);
assert_eq!
(
left
.piece
(
&
right
),
Interval
{
start
:
Bitvector
::
from_i16
(
259
),
end
:
Bitvector
::
from_i16
(
1039
),
stride
:
2
,
}
);
}
#[test]
fn
add_and_sub
()
{
let
left
=
Interval
::
mock
(
3
,
15
)
.with_stride
(
12
);
let
right
=
Interval
::
mock
(
-
2
,
18
)
.with_stride
(
10
);
assert_eq!
(
left
.add
(
&
right
),
Interval
::
mock
(
1
,
33
)
.with_stride
(
2
));
assert_eq!
(
left
.sub
(
&
right
),
Interval
::
mock
(
-
15
,
17
)
.with_stride
(
2
));
}
#[test]
fn
contains
()
{
let
interval
=
Interval
::
mock
(
2
,
10
)
.with_stride
(
4
);
let
elem
=
Bitvector
::
from_i64
(
4
);
assert
!
(
!
interval
.contains
(
&
elem
));
let
elem
=
Bitvector
::
from_i64
(
6
);
assert
!
(
interval
.contains
(
&
elem
));
let
elem
=
Bitvector
::
from_i64
(
14
);
assert
!
(
!
interval
.contains
(
&
elem
));
}
#[test]
fn
test_extended_gcd
()
{
assert_eq!
(
extended_gcd
(
48
,
21
),
(
3
,
-
3
,
7
));
}
#[test]
fn
test_compute_intersection_residue_class
()
{
let
left
=
Interval
::
mock
(
21
,
421
)
.with_stride
(
20
);
let
right
=
Interval
::
mock
(
11
,
191
)
.with_stride
(
15
);
assert_eq!
(
compute_intersection_residue_class
(
&
left
,
&
right
)
.unwrap
(),
Some
((
60
,
41
))
);
let
left
=
Interval
::
mock
(
21
,
421
)
.with_stride
(
20
);
let
right
=
Interval
::
mock
(
14
,
194
)
.with_stride
(
15
);
assert_eq!
(
compute_intersection_residue_class
(
&
left
,
&
right
)
.unwrap
(),
None
);
let
left
=
Interval
::
mock
(
0
,
2
<<
60
)
.with_stride
(
2
<<
60
);
let
right
=
Interval
::
mock
(
2
,
(
2
<<
60
)
+
1
)
.with_stride
((
2
<<
60
)
-
1
);
assert
!
(
compute_intersection_residue_class
(
&
left
,
&
right
)
.is_err
());
let
left
=
Interval
::
mock
(
3
,
3
);
let
right
=
Interval
::
mock
(
0
,
15
)
.with_stride
(
3
);
assert_eq!
(
compute_intersection_residue_class
(
&
left
,
&
right
)
.unwrap
(),
Some
((
3
,
0
))
);
let
left
=
Interval
::
mock
(
3
,
23
)
.with_stride
(
5
);
let
right
=
Interval
::
mock
(
8
,
8
);
assert_eq!
(
compute_intersection_residue_class
(
&
left
,
&
right
)
.unwrap
(),
Some
((
5
,
3
))
);
let
left
=
Interval
::
mock
(
3
,
3
);
let
right
=
Interval
::
mock
(
0
,
15
)
.with_stride
(
5
);
assert_eq!
(
compute_intersection_residue_class
(
&
left
,
&
right
)
.unwrap
(),
None
);
}
#[test]
fn
signed_intersect
()
{
let
left
=
Interval
::
mock
(
21
,
421
)
.with_stride
(
20
);
let
right
=
Interval
::
mock
(
11
,
191
)
.with_stride
(
15
);
assert_eq!
(
left
.signed_intersect
(
&
right
)
.unwrap
(),
Interval
::
mock
(
41
,
161
)
.with_stride
(
60
)
);
let
left
=
Interval
::
mock
(
21
,
421
)
.with_stride
(
20
);
let
right
=
Interval
::
mock
(
14
,
194
)
.with_stride
(
15
);
assert
!
(
left
.signed_intersect
(
&
right
)
.is_err
());
let
left
=
Interval
::
mock
(
0
,
2
<<
60
)
.with_stride
(
2
<<
60
);
let
right
=
Interval
::
mock
(
2
,
(
2
<<
60
)
+
1
)
.with_stride
((
2
<<
60
)
-
1
);
assert
!
(
left
.signed_intersect
(
&
right
)
.is_err
());
}
src/cwe_checker_lib/src/abstract_domain/interval/tests.rs
View file @
80455c63
...
...
@@ -38,7 +38,17 @@ impl IntervalDomain {
/// Set the widening delay to the interval length
/// to simulate that `self` was just widened.
pub
fn
as_freshly_widened
(
mut
self
)
->
Self
{
self
.widening_delay
=
self
.interval
.length
()
.try_to_u64
()
.unwrap
();
self
.widening_delay
=
(
self
.interval.end
.clone
()
-
&
self
.interval.start
)
.try_to_u64
()
.unwrap
();
self
}
/// Set the stride of the interval.
/// also rounds down the interval end to be contained in the same residue class as the interval start.
pub
fn
with_stride
(
mut
self
,
stride
:
u64
)
->
Self
{
self
.interval.stride
=
stride
;
self
.interval
.adjust_end_to_value_in_stride
();
self
}
}
...
...
@@ -62,7 +72,7 @@ fn signed_merge() {
let
b
=
IntervalDomain
::
mock_with_bounds
(
None
,
3
,
3
,
Some
(
5
));
assert_eq!
(
a
.merge
(
&
b
),
IntervalDomain
::
mock_with_bounds
(
None
,
-
3
,
5
,
None
)
.as_freshly_widened
(
)
IntervalDomain
::
mock_with_bounds
(
Some
(
-
3
),
1
,
3
,
Some
(
5
))
.with_stride
(
2
)
);
let
a
=
IntervalDomain
::
mock_with_bounds
(
None
,
1
,
5
,
None
);
let
b
=
IntervalDomain
::
mock_with_bounds
(
None
,
-
1
,
-
1
,
Some
(
5
));
...
...
@@ -110,7 +120,7 @@ fn signed_merge() {
let
update
=
IntervalDomain
::
mock_with_bounds
(
Some
(
-
3
),
-
2
,
3
,
None
);
var
=
var
.merge
(
&
update
);
let
mut
expected_result
=
IntervalDomain
::
mock_with_bounds
(
None
,
-
3
,
3
,
None
);
expected_result
.widening_delay
=
6
;
expected_result
.widening_delay
=
5
;
// The last update does not cause widening
assert_eq!
(
var
,
expected_result
);
}
...
...
@@ -411,6 +421,14 @@ fn add_signed_bounds() {
.clone
()
.add_signed_less_equal_bound
(
&
Bitvector
::
from_i64
(
-
20
));
assert
!
(
x
.is_err
());
let
x
=
IntervalDomain
::
mock
(
0
,
10
)
.with_stride
(
10
)
.add_signed_less_equal_bound
(
&
Bitvector
::
from_i64
(
15
));
assert_eq!
(
x
.unwrap
(),
IntervalDomain
::
mock
(
0
,
10
)
.with_stride
(
10
));
let
x
=
IntervalDomain
::
mock
(
0
,
10
)
.with_stride
(
10
)
.add_signed_less_equal_bound
(
&
Bitvector
::
from_i64
(
5
));
assert_eq!
(
x
.unwrap
(),
IntervalDomain
::
mock
(
0
,
0
));
//signed_greater_equal
let
x
=
interval
...
...
@@ -431,6 +449,14 @@ fn add_signed_bounds() {
x
.unwrap
(),
IntervalDomain
::
mock_with_bounds
(
Some
(
-
20
),
-
10
,
10
,
Some
(
100
))
);
let
x
=
IntervalDomain
::
mock
(
0
,
10
)
.with_stride
(
10
)
.add_signed_greater_equal_bound
(
&
Bitvector
::
from_i64
(
-
5
));
assert_eq!
(
x
.unwrap
(),
IntervalDomain
::
mock
(
0
,
10
)
.with_stride
(
10
));
let
x
=
IntervalDomain
::
mock
(
0
,
10
)
.with_stride
(
10
)
.add_signed_greater_equal_bound
(
&
Bitvector
::
from_i64
(
5
));
assert_eq!
(
x
.unwrap
(),
IntervalDomain
::
mock
(
10
,
10
));
}
#[test]
...
...
@@ -588,3 +614,44 @@ fn float_nan_bytesize() {
assert
!
(
result
.is_top
());
assert_eq!
(
result
.bytesize
(),
ByteSize
::
new
(
1
));
}
#[test]
fn
stride_rounding
()
{
let
interval
=
Interval
::
mock
(
3
,
13
)
.with_stride
(
10
);
assert_eq!
(
Bitvector
::
from_i64
(
5
)
.round_up_to_stride_of
(
&
interval
)
.unwrap
(),
Bitvector
::
from_i64
(
13
)
);
assert_eq!
(
Bitvector
::
from_i64
(
5
)
.round_down_to_stride_of
(
&
interval
)
.unwrap
(),
Bitvector
::
from_i64
(
3
)
);
assert_eq!
(
Bitvector
::
from_i64
(
-
277
)
.round_up_to_stride_of
(
&
interval
)
.unwrap
(),
Bitvector
::
from_i64
(
-
277
)
);
assert_eq!
(
Bitvector
::
from_i64
(
-
277
)
.round_down_to_stride_of
(
&
interval
)
.unwrap
(),
Bitvector
::
from_i64
(
-
277
)
);
let
interval
=
Interval
::
mock_i8
(
100
,
110
)
.with_stride
(
10
);
assert_eq!
(
Bitvector
::
from_i8
(
-
123
)
.round_up_to_stride_of
(
&
interval
)
.unwrap
(),
Bitvector
::
from_i8
(
-
120
)
);
assert_eq!
(
Bitvector
::
from_i8
(
-
123
)
.round_down_to_stride_of
(
&
interval
),
None
);
}
src/cwe_checker_lib/src/analysis/pointer_inference/object.rs
View file @
80455c63
...
...
@@ -461,7 +461,7 @@ mod tests {
object
.merge_value
(
new_data
(
23
),
&
bv
(
-
12
));
assert_eq!
(
object
.get_value
(
Bitvector
::
from_i64
(
-
12
),
ByteSize
::
new
(
8
)),
Data
::
Value
(
ValueDomain
::
new_top
(
ByteSize
::
new
(
8
)
))
Data
::
Value
(
IntervalDomain
::
mock
(
4
,
23
)
.with_stride
(
19
))
);
let
mut
other_object
=
new_abstract_object
();
...
...
src/cwe_checker_lib/src/analysis/pointer_inference/object_list.rs
View file @
80455c63
...
...
@@ -536,7 +536,7 @@ mod tests {
merged
.get_value
(
&
Data
::
Pointer
(
pointer
.clone
()),
ByteSize
::
new
(
8
))
.unwrap
(),
Data
::
Value
(
ValueDomain
::
new_top
(
ByteSize
::
new
(
8
)
))
Data
::
Value
(
IntervalDomain
::
mock
(
3
,
42
)
.with_stride
(
39
))
);
assert_eq!
(
merged
...
...
src/cwe_checker_lib/src/intermediate_representation/bitvector.rs
View file @
80455c63
...
...
@@ -10,6 +10,12 @@ pub type Bitvector = apint::ApInt;
/// A trait to extend the bitvector type with useful helper functions
/// that are not contained in the [`apint`] crate.
pub
trait
BitvectorExtended
:
Sized
{
/// Resize `self` to the target byte size by either zero extending or truncating `self`.
fn
into_resize_unsigned
(
self
,
size
:
ByteSize
)
->
Self
;
/// Resize `self` to the target byte size by either sign extending or truncating `self`.
fn
into_resize_signed
(
self
,
size
:
ByteSize
)
->
Self
;
/// Perform a cast operation on the bitvector.
/// Returns an error for non-implemented cast operations (currently all float-related casts).
fn
cast
(
&
self
,
kind
:
CastOpType
,
width
:
ByteSize
)
->
Result
<
Self
,
Error
>
;
...
...
@@ -269,6 +275,24 @@ impl BitvectorExtended for Bitvector {
fn
bytesize
(
&
self
)
->
ByteSize
{
self
.width
()
.into
()
}
/// Resize `self` to the target byte size by either zero extending or truncating `self`.
fn
into_resize_unsigned
(
self
,
size
:
ByteSize
)
->
Self
{
if
self
.width
()
<
size
.into
()
{
self
.into_zero_extend
(
size
)
.unwrap
()
}
else
{
self
.into_truncate
(
size
)
.unwrap
()
}
}
/// Resize `self` to the target byte size by either sign extending or truncating `self`.
fn
into_resize_signed
(
self
,
size
:
ByteSize
)
->
Self
{
if
self
.width
()
<
size
.into
()
{
self
.into_sign_extend
(
size
)
.unwrap
()
}
else
{
self
.into_truncate
(
size
)
.unwrap
()
}
}
}
#[cfg(test)]
...
...
src/cwe_checker_lib/src/pcode/term.rs
View file @
80455c63
...
...
@@ -834,12 +834,11 @@ impl Project {
for
sub
in
self
.program.term.subs
.iter_mut
()
{
if
!
sub
.term.blocks
.is_empty
()
&&
sub
.tid.address
!=
sub
.term.blocks
[
0
]
.tid.address
&&
sub
&&
!
sub
.term
.blocks
.iter
()
.find
(|
block
|
block
.tid.address
==
sub
.tid.address
)
.is_none
()
.any
(|
block
|
block
.tid.address
==
sub
.tid.address
)
{
log_messages
.push
(
LogMessage
::
new_error
(
format!
(
"Starting block of function {} ({}) not found."
,
...
...
src/cwe_checker_lib/src/utils/symbol_utils.rs
View file @
80455c63
...
...
@@ -30,11 +30,7 @@ pub fn get_calls_to_symbols<'a, 'b>(
for
jmp
in
blk
.term.jmps
.iter
()
{
if
let
Jmp
::
Call
{
target
:
dst
,
..
}
=
&
jmp
.term
{
if
symbols
.contains_key
(
dst
)
{
calls
.push
((
sub
.term.name
.as_str
(),
&
jmp
.tid
,
symbols
.get
(
dst
)
.clone
()
.unwrap
(),
));
calls
.push
((
sub
.term.name
.as_str
(),
&
jmp
.tid
,
symbols
.get
(
dst
)
.unwrap
()));
}
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment