old htb folders
This commit is contained in:
2023-08-29 21:53:22 +02:00
parent 62ab804867
commit 82b0759f1e
21891 changed files with 6277643 additions and 0 deletions

View File

@@ -0,0 +1,115 @@
#!/usr/bin/env python
# pylint: disable=F0401,W0401,W0603,
__version__ = "9.2.35"
if bytes is str:
raise Exception("This module is designed for python 3 only. Please install an older version to use python 2.")
import os
import sys
import socket
import logging
l = logging.getLogger("claripy")
l.addHandler(logging.NullHandler())
from .errors import *
from . import operations
from . import ops as _all_operations
# This is here for later, because we'll fuck the namespace in a few lines
from . import backends as _backends_module
from .backends import Backend
from .backend_object import BackendObject
#
# backend objects
#
from . import bv
from . import fp
from . import vsa
from .fp import FSORT_DOUBLE, FSORT_FLOAT
from .annotation import *
#
# Operations
#
from .ast.base import *
from .ast.bv import *
from .ast.fp import *
from .ast.bool import *
from .ast.strings import *
from . import ast
del BV
del Bool
del FP
del Base
ast._import()
def BV(name, size, explicit_name=None): # pylint:disable=function-redefined
l.critical(
"DEPRECATION WARNING: claripy.BV is deprecated and will soon be removed. Please use claripy.BVS, instead."
)
print("DEPRECATION WARNING: claripy.BV is deprecated and will soon be removed. Please use claripy.BVS, instead.")
return BVS(name, size, explicit_name=explicit_name)
#
# Initialize the backends
#
from . import backend_manager as _backend_manager
_backend_manager.backends._register_backend(_backends_module.BackendConcrete(), "concrete", True, True)
_backend_manager.backends._register_backend(_backends_module.BackendVSA(), "vsa", False, False)
if not os.environ.get("WORKER", False) and os.environ.get("REMOTE", False):
try:
_backend_z3 = _backends_module.backendremote.BackendRemote()
except OSError:
raise ImportError("can't connect to backend")
else:
_backend_z3 = _backends_module.BackendZ3()
_backend_manager.backends._register_backend(_backend_z3, "z3", False, False)
backends = _backend_manager.backends
def downsize():
"""
Clear all temporary data associated with any backend
"""
backends.downsize()
#
# Frontends
#
from .frontend import Frontend as _Frontend
from . import frontends
from . import frontend_mixins
from .solvers import *
#
# Convenient button
#
def reset():
"""
Attempt to refresh any caching state associated with the module
"""
downsize()
from .ast import bv # pylint:disable=redefined-outer-name
bv._bvv_cache.clear()
from .debug import set_debug

View File

@@ -0,0 +1,65 @@
class Annotation:
"""
Annotations are used to achieve claripy's goal of being an arithmetic instrumentation engine.
They provide a means to pass extra information to the claripy backends.
"""
@property
def eliminatable(self): # pylint:disable=no-self-use
"""
Returns whether this annotation can be eliminated in a simplification.
:return: True if eliminatable, False otherwise
"""
return True
@property
def relocatable(self): # pylint:disable=no-self-use
"""
Returns whether this annotation can be relocated in a simplification.
:return: True if it can be relocated, false otherwise.
"""
return False
def relocate(self, src, dst): # pylint:disable=no-self-use,unused-argument
"""
This is called when an annotation has to be relocated because of simplifications.
Consider the following case:
x = claripy.BVS('x', 32)
zero = claripy.BVV(0, 32).add_annotation(your_annotation)
y = x + zero
Here, one of three things can happen:
1. if your_annotation.eliminatable is True, the simplifiers will simply
eliminate your_annotation along with `zero` and `y is x` will hold
2. elif your_annotation.relocatable is False, the simplifier will abort
and y will never be simplified
3. elif your_annotation.relocatable is True, the simplifier will run,
determine that the simplified result of `x + zero` will be `x`. It
will then call your_annotation.relocate(zero, x) to move the annotation
away from the AST that is about to be eliminated.
:param src: the old AST that was eliminated in the simplification
:param dst: the new AST (the result of a simplification)
:return: the annotation that will be applied to `dst`
"""
return self
#
# Some built-in annotations
#
class SimplificationAvoidanceAnnotation(Annotation):
@property
def eliminatable(self):
return False
@property
def relocatable(self):
return False

View File

@@ -0,0 +1,41 @@
# pylint:disable=redefined-outer-name
from typing import TYPE_CHECKING
# Mypy is severely confused by this delayed import trickery, but works if we just pretend that the import
# happens here already
if TYPE_CHECKING:
from .bits import Bits
from .bv import BV
from .vs import VS
from .fp import FP
from .bool import Bool, true, false
from .int import Int
from .base import Base
from .strings import String
from .. import ops as all_operations
else:
Bits = lambda *args, **kwargs: None
BV = lambda *args, **kwargs: None
VS = lambda *args, **kwargs: None
FP = lambda *args, **kwargs: None
Bool = lambda *args, **kwargs: None
Int = lambda *args, **kwargs: None
Base = lambda *args, **kwargs: None
true = lambda *args, **kwargs: None
false = lambda *args, **kwargs: None
String = lambda *args, **kwargs: None
all_operations = None
def _import():
global Bits, BV, VS, FP, Bool, Int, Base, String, true, false, all_operations
from .bits import Bits
from .bv import BV
from .vs import VS
from .fp import FP
from .bool import Bool, true, false
from .int import Int
from .base import Base
from .strings import String
from .. import ops as all_operations

View File

@@ -0,0 +1,45 @@
from ..ast.base import Base
class Bits(Base):
"""
A base class for AST types that can be stored as a series of bits. Currently, this is bitvectors and IEEE floats.
:ivar length: The length of this value in bits.
"""
def make_like(self, op, args, **kwargs):
if "length" not in kwargs:
kwargs["length"] = self.length
return Base.make_like(self, op, args, **kwargs)
def size(self):
"""
:returns: The bit length of this AST
"""
return self.length
def _type_name(self):
return self.__class__.__name__ + str(self.length)
def raw_to_bv(self):
"""
Converts this data's bit-pattern to a bitvector.
"""
raise NotImplementedError
def raw_to_fp(self):
"""
Converts this data's bit-pattern to an IEEE float.
"""
raise NotImplementedError
@staticmethod
def _check_replaceability(old, new):
if old.size() != new.size():
raise ClaripyOperationError("replacements must have matching sizes")
__len__ = size
from ..errors import ClaripyOperationError

View File

@@ -0,0 +1,274 @@
import logging
from ..ast.base import Base, _make_name, ASTCacheKey
l = logging.getLogger("claripy.ast.bool")
_boolv_cache = {}
# This is a hilarious hack to get around some sort of bug in z3's python bindings, where
# under some circumstances stuff gets destructed out of order
def cleanup():
global _boolv_cache # pylint:disable=global-variable-not-assigned
del _boolv_cache
import atexit
atexit.register(cleanup)
class Bool(Base):
__slots__ = ()
@staticmethod
def _from_bool(like, val): # pylint:disable=unused-argument
return BoolV(val)
def is_true(self):
"""
Returns True if 'self' can be easily determined to be True. Otherwise, return False. Note that the AST *might*
still be True (i.e., if it were simplified via Z3), but it's hard to quickly tell that.
"""
return is_true(self)
def is_false(self):
"""
Returns True if 'self' can be easily determined to be False. Otherwise, return False. Note that the AST *might*
still be False (i.e., if it were simplified via Z3), but it's hard to quickly tell that.
"""
return is_false(self)
def BoolS(name, explicit_name=None):
"""
Creates a boolean symbol (i.e., a variable).
:param name: The name of the symbol
:param explicit_name: If False, an identifier is appended to the name to ensure uniqueness.
:return: A Bool object representing this symbol.
"""
n = _make_name(name, -1, False if explicit_name is None else explicit_name)
return Bool("BoolS", (n,), variables={n}, symbolic=True)
def BoolV(val):
try:
return _boolv_cache[(val)]
except KeyError:
result = Bool("BoolV", (val,))
_boolv_cache[val] = result
return result
#
# some standard ASTs
#
true = BoolV(True)
false = BoolV(False)
#
# Bound operations
#
from .. import operations
Bool.__eq__ = operations.op("__eq__", (Bool, Bool), Bool)
Bool.__ne__ = operations.op("__ne__", (Bool, Bool), Bool)
Bool.intersection = operations.op("intersection", (Bool, Bool), Bool)
#
# Unbound operations
#
def If(*args):
# the coercion here is strange enough that we'll just implement it manually
if len(args) != 3:
raise ClaripyOperationError("invalid number of args passed to If")
args = list(args)
if isinstance(args[0], bool):
args[0] = BoolV(args[0])
ty = None
if isinstance(args[1], Base):
ty = type(args[1])
elif isinstance(args[2], Base):
ty = type(args[2])
else:
raise ClaripyTypeError("true/false clause of If must have bearable types")
if isinstance(args[1], Bits) and isinstance(args[2], Bits) and args[1].length != args[2].length:
raise ClaripyTypeError("sized arguments to If must have the same length")
if not isinstance(args[1], ty):
if hasattr(ty, "_from_" + type(args[1]).__name__):
convert = getattr(ty, "_from_" + type(args[1]).__name__)
args[1] = convert(args[2], args[1])
else:
raise ClaripyTypeError(f"can't convert {type(args[1])} to {ty}")
if not isinstance(args[2], ty):
if hasattr(ty, "_from_" + type(args[2]).__name__):
convert = getattr(ty, "_from_" + type(args[2]).__name__)
args[2] = convert(args[1], args[2])
else:
raise ClaripyTypeError(f"can't convert {type(args[2])} to {ty}")
if is_true(args[0]):
return args[1].append_annotations(args[0].annotations)
elif is_false(args[0]):
return args[2].append_annotations(args[0].annotations)
if isinstance(args[1], Base) and args[1].op == "If" and args[1].args[0] is args[0]:
return If(args[0], args[1].args[1], args[2])
if isinstance(args[1], Base) and args[1].op == "If" and args[1].args[0] is Not(args[0]):
return If(args[0], args[1].args[2], args[2])
if isinstance(args[2], Base) and args[2].op == "If" and args[2].args[0] is args[0]:
return If(args[0], args[1], args[2].args[2])
if isinstance(args[2], Base) and args[2].op == "If" and args[2].args[0] is Not(args[0]):
return If(args[0], args[1], args[2].args[1])
if args[1] is args[2]:
return args[1]
if args[1] is true and args[2] is false:
return args[0]
if args[1] is false and args[2] is true:
return ~args[0]
if issubclass(ty, Bits):
return ty("If", tuple(args), length=args[1].length)
else:
return ty("If", tuple(args))
And = operations.op("And", Bool, Bool, bound=False)
Or = operations.op("Or", Bool, Bool, bound=False)
Not = operations.op("Not", (Bool,), Bool, bound=False)
Bool.__invert__ = Not
Bool.__and__ = And
Bool.__rand__ = And
Bool.__or__ = Or
Bool.__ror__ = Or
def is_true(e, exact=None): # pylint:disable=unused-argument
for b in backends._quick_backends:
try:
return b.is_true(e)
except BackendError:
pass
l.debug("Unable to tell the truth-value of this expression")
return False
def is_false(e, exact=None): # pylint:disable=unused-argument
for b in backends._quick_backends:
try:
return b.is_false(e)
except BackendError:
pass
l.debug("Unable to tell the truth-value of this expression")
return False
# For large tables, ite_dict that uses a binary search tree instead of a "linear" search tree.
# This improves Z3 search capability (eliminating branches) and decreases recursion depth:
# linear search trees make Z3 error out on tables larger than a couple hundred elements.)
def ite_dict(i, d, default):
"""
Return an expression of if-then-else trees which expresses a switch tree
:param i: The variable which may take on multiple values affecting the final result
:param d: A dict mapping possible values for i to values which the result could be
:param default: A default value that the expression should take on if `i` matches none of the keys of `d`
:return: An expression encoding the result of the above
"""
i = i.ast if type(i) is ASTCacheKey else i
# for small dicts fall back to the linear implementation
if len(d) < 4:
return ite_cases([(i == c, v) for c, v in d.items()], default)
# otherwise, binary search.
# Find the median:
keys = list(d.keys())
keys.sort()
split_val = keys[len(keys) // 2]
# split the dictionary
dictLow = {c: v for c, v in d.items() if c <= split_val}
dictHigh = {c: v for c, v in d.items() if c > split_val}
valLow = ite_dict(i, dictLow, default)
valHigh = ite_dict(i, dictHigh, default)
return If(i <= split_val, valLow, valHigh)
def ite_cases(cases, default):
"""
Return an expression of if-then-else trees which expresses a series of alternatives
:param cases: A list of tuples (c, v). `c` is the condition under which `v` should be the result of the expression
:param default: A default value that the expression should take on if none of the `c` conditions are satisfied
:return: An expression encoding the result of the above
"""
sofar = default
for c, v in reversed(list(cases)):
if is_true(v == sofar):
continue
sofar = If(c, v, sofar)
return sofar
def reverse_ite_cases(ast):
"""
Given an expression created by `ite_cases`, produce the cases that generated it
:param ast:
:return:
"""
queue = [(true, ast)]
while queue:
condition, ast = queue.pop(0)
if ast.op == "If":
queue.append((And(condition, ast.args[0]), ast.args[1]))
queue.append((And(condition, Not(ast.args[0])), ast.args[2]))
else:
yield condition, ast
def constraint_to_si(expr):
"""
Convert a constraint to SI if possible.
:param expr:
:return:
"""
satisfiable = True
replace_list = []
satisfiable, replace_list = backends.vsa.constraint_to_si(expr)
# Make sure the replace_list are all ast.bvs
for i in range(len(replace_list)): # pylint:disable=consider-using-enumerate
ori, new = replace_list[i]
if not isinstance(new, Base):
new = BVS(
new.name, new._bits, min=new._lower_bound, max=new._upper_bound, stride=new._stride, explicit_name=True
)
replace_list[i] = (ori, new)
return satisfiable, replace_list
from ..backend_manager import backends
from ..errors import ClaripyOperationError, ClaripyTypeError, BackendError
from .bits import Bits
from .bv import BVS

View File

@@ -0,0 +1,628 @@
import logging
import numbers
from .bits import Bits
from ..ast.base import _make_name
from .bool import If
from ..utils import deprecated
l = logging.getLogger("claripy.ast.bv")
_bvv_cache = {}
# This is a hilarious hack to get around some sort of bug in z3's python bindings, where
# under some circumstances stuff gets destructed out of order
def cleanup():
global _bvv_cache # pylint:disable=global-variable-not-assigned
del _bvv_cache
import atexit
atexit.register(cleanup)
class BV(Bits):
"""
A class representing an AST of operations culminating in a bitvector.
Do not instantiate this class directly, instead use BVS or BVV to construct a symbol or value, and then use
operations to construct more complicated expressions.
Individual sub-bits and bit-ranges can be extracted from a bitvector using index and slice notation.
Bits are indexed weirdly. For a 32-bit AST:
a[31] is the *LEFT* most bit, so it'd be the 0 in
01111111111111111111111111111111
a[0] is the *RIGHT* most bit, so it'd be the 0 in
11111111111111111111111111111110
a[31:30] are the two leftmost bits, so they'd be the 0s in:
00111111111111111111111111111111
a[1:0] are the two rightmost bits, so they'd be the 0s in:
11111111111111111111111111111100
"""
__slots__ = ()
def chop(self, bits=1):
"""
Chops a BV into consecutive sub-slices. Obviously, the length of this BV must be a multiple of bits.
:returns: A list of smaller bitvectors, each ``bits`` in length. The first one will be the left-most (i.e.
most significant) bits.
"""
s = len(self)
if s % bits != 0:
raise ValueError("expression length (%d) should be a multiple of 'bits' (%d)" % (len(self), bits))
if s == bits:
return [self]
else:
return list(reversed([self[(n + 1) * bits - 1 : n * bits] for n in range(0, s // bits)]))
def __getitem__(self, rng):
if type(rng) is slice:
left = rng.start if rng.start is not None else len(self) - 1
right = rng.stop if rng.stop is not None else 0
if left < 0:
left = len(self) + left
if right < 0:
right = len(self) + right
return Extract(left, right, self)
else:
return Extract(int(rng), int(rng), self)
def get_byte(self, index):
"""
Extracts a byte from a BV, where the index refers to the byte in a big-endian order
:param index: the byte to extract
:return: An 8-bit BV
"""
pos = (self.size() + 7) // 8 - 1 - index
if pos < 0:
raise ValueError(
"Incorrect index %d. Your index must be between %d and %d." % (index, 0, self.size() // 8 - 1)
)
r = self[min(pos * 8 + 7, self.size() - 1) : pos * 8]
if r.size() % 8 != 0: # pylint:disable=no-member
r = r.zero_extend(8 - r.size() % 8) # pylint:disable=no-member
return r
def get_bytes(self, index, size):
"""
Extracts several bytes from a bitvector, where the index refers to the byte in a big-endian order
:param index: the byte index at which to start extracting
:param size: the number of bytes to extract
:return: A BV of size ``size * 8``
"""
pos = (self.size() + 7) // 8 - 1 - index
if pos < 0:
raise ValueError(
"Incorrect index %d. Your index must be between %d and %d." % (index, 0, self.size() // 8 - 1)
)
if size == 0:
return BVV(0, 0)
r = self[min(pos * 8 + 7, self.size() - 1) : (pos - size + 1) * 8]
if r.size() % 8 != 0: # pylint:disable=no-member
r = r.zero_extend(8 - r.size() % 8) # pylint:disable=no-member
return r
def zero_extend(self, n):
"""
Zero-extends the bitvector by n bits. So:
a = BVV(0b1111, 4)
b = a.zero_extend(4)
b is BVV(0b00001111)
"""
return ZeroExt(n, self)
def sign_extend(self, n):
"""
Sign-extends the bitvector by n bits. So:
a = BVV(0b1111, 4)
b = a.sign_extend(4)
b is BVV(0b11111111)
"""
return SignExt(n, self)
def concat(self, *args):
"""
Concatenates this bitvector with the bitvectors provided.
This bitvector will be on the far-left, i.e. the most significant bits.
"""
return Concat(self, *args)
@staticmethod
def _from_int(like, value):
return BVV(value, like.length)
@staticmethod
def _from_Bool(like, value):
return If(value, BVV(1, like.length), BVV(0, like.length))
@staticmethod
def _from_bytes(like, value): # pylint:disable=unused-argument
return BVV(value)
@staticmethod
def _from_str(like, value): # pylint:disable=unused-argument
l.warning("BVV value is being coerced from a unicode string, encoding as utf-8")
value = value.encode("utf-8")
return BVV(value)
@staticmethod
def _from_BVV(like, value): # pylint:disable=unused-argument
return BVV(value.value, value.size())
def val_to_fp(self, sort, signed=True, rm=None):
"""
Interpret this bitvector as an integer, and return the floating-point representation of that integer.
:param sort: The sort of floating point value to return
:param signed: Optional: whether this value is a signed integer
:param rm: Optional: the rounding mode to use
:return: An FP AST whose value is the same as this BV
"""
if rm is None:
rm = fp.fp.RM.default()
if sort is None:
sort = fp.fp.FSort.from_size(self.length)
op = fp.fpToFP if signed else fp.fpToFPUnsigned
return op(rm, self, sort)
def raw_to_fp(self):
"""
Interpret the bits of this bitvector as an IEEE754 floating point number.
The inverse of this function is raw_to_bv.
:return: An FP AST whose bit-pattern is the same as this BV
"""
sort = fp.fp.FSort.from_size(self.length)
return fp.fpToFP(self, sort)
def raw_to_bv(self):
"""
A counterpart to FP.raw_to_bv - does nothing and returns itself.
"""
return self
def to_bv(self):
return self.raw_to_bv()
def BVS(
name,
size,
min=None,
max=None,
stride=None,
uninitialized=False, # pylint:disable=redefined-builtin
explicit_name=None,
discrete_set=False,
discrete_set_max_card=None,
**kwargs,
):
"""
Creates a bit-vector symbol (i.e., a variable).
If you want to specify the maximum or minimum value of a normal symbol that is not part of value-set analysis, you
should manually add constraints to that effect. **Do not use ``min`` and ``max`` for symbolic execution.**
:param name: The name of the symbol.
:param size: The size (in bits) of the bit-vector.
:param min: The minimum value of the symbol, used only for value-set analysis
:param max: The maximum value of the symbol, used only for value-set analysis
:param stride: The stride of the symbol, used only for value-set analysis
:param uninitialized: Whether this value should be counted as an "uninitialized" value in the course of an
analysis.
:param bool explicit_name: If False, an identifier is appended to the name to ensure uniqueness.
:param bool discrete_set: If True, a DiscreteStridedIntervalSet will be used instead of a normal StridedInterval.
:param int discrete_set_max_card: The maximum cardinality of the discrete set. It is ignored if discrete_set is set
to False or None.
:returns: a BV object representing this symbol.
"""
if stride == 0 and max != min:
raise ClaripyValueError("BVSes of stride 0 should have max == min")
if type(name) is bytes:
name = name.decode()
if type(name) is not str:
raise TypeError("Name value for BVS must be a str, got %r" % type(name))
n = _make_name(name, size, False if explicit_name is None else explicit_name)
encoded_name = n.encode()
if not discrete_set:
discrete_set_max_card = None
return BV(
"BVS",
(n, min, max, stride, uninitialized, discrete_set, discrete_set_max_card),
variables={n},
length=size,
symbolic=True,
eager_backends=None,
uninitialized=uninitialized,
encoded_name=encoded_name,
**kwargs,
)
def BVV(value, size=None, **kwargs):
"""
Creates a bit-vector value (i.e., a concrete value).
:param value: The value. Either an integer or a bytestring. If it's the latter, it will be interpreted as the
bytes of a big-endian constant.
:param size: The size (in bits) of the bit-vector. Optional if you provide a string, required for an integer.
:returns: A BV object representing this value.
"""
if type(value) in (bytes, bytearray, memoryview, str):
if type(value) is str:
l.warning("BVV value is a unicode string, encoding as utf-8")
value = value.encode("utf-8")
if size is None:
size = len(value) * 8
elif type(size) is not int:
raise TypeError("Bitvector size must be either absent (implicit) or an integer")
elif size != len(value) * 8:
raise ClaripyValueError("string/size mismatch for BVV creation")
value = int.from_bytes(value, "big")
elif size is None or (type(value) is not int and value is not None):
raise TypeError("BVV() takes either an integer value and a size or a string of bytes")
# ensure the 0 <= value < (1 << size)
# FIXME hack to handle None which is used for an Empty Strided Interval (ESI)
if value is not None:
value &= (1 << size) - 1
if not kwargs:
try:
return _bvv_cache[(value, size)]
except KeyError:
pass
result = BV("BVV", (value, size), length=size, **kwargs)
_bvv_cache[(value, size)] = result
return result
def SI(
name=None,
bits=0,
lower_bound=None,
upper_bound=None,
stride=None,
to_conv=None,
explicit_name=None,
discrete_set=False,
discrete_set_max_card=None,
):
name = "unnamed" if name is None else name
if to_conv is not None:
si = vsa.CreateStridedInterval(
name=name, bits=bits, lower_bound=lower_bound, upper_bound=upper_bound, stride=stride, to_conv=to_conv
)
return BVS(
name, si._bits, min=si._lower_bound, max=si._upper_bound, stride=si._stride, explicit_name=explicit_name
)
return BVS(
name,
bits,
min=lower_bound,
max=upper_bound,
stride=stride,
explicit_name=explicit_name,
discrete_set=discrete_set,
discrete_set_max_card=discrete_set_max_card,
)
def TSI(bits, name=None, uninitialized=False, explicit_name=None):
name = "unnamed" if name is None else name
return BVS(name, bits, uninitialized=uninitialized, explicit_name=explicit_name)
def ESI(bits, **kwargs):
return BVV(None, bits, **kwargs)
def ValueSet(bits, region=None, region_base_addr=None, value=None, name=None, val=None):
# Backward compatibility
if value is None and val is not None:
value = val
if region_base_addr is None:
region_base_addr = 0
v = region_base_addr + value
# Backward compatibility
if isinstance(v, numbers.Number):
min_v, max_v = v, v
stride = 0
elif isinstance(v, vsa.StridedInterval):
min_v, max_v = v.lower_bound, v.upper_bound
stride = v.stride
else:
raise ClaripyValueError("ValueSet() does not take `value` of type %s" % type(value))
if name is None:
name = "ValueSet"
bvs = BVS(name, bits, min=region_base_addr + min_v, max=region_base_addr + max_v, stride=stride)
# Annotate the bvs and return the new AST
vs = bvs.annotate(vsa.RegionAnnotation(region, region_base_addr, value))
return vs
VS = ValueSet
def DSIS(
name=None, bits=0, lower_bound=None, upper_bound=None, stride=None, explicit_name=None, to_conv=None, max_card=None
):
if to_conv is not None:
si = vsa.CreateStridedInterval(bits=to_conv.size(), to_conv=to_conv)
return SI(
name=name,
bits=si._bits,
lower_bound=si._lower_bound,
upper_bound=si._upper_bound,
stride=si._stride,
explicit_name=explicit_name,
discrete_set=True,
discrete_set_max_card=max_card,
)
else:
return SI(
name=name,
bits=bits,
lower_bound=lower_bound,
upper_bound=upper_bound,
stride=stride,
explicit_name=explicit_name,
discrete_set=True,
discrete_set_max_card=max_card,
)
#
# Unbound operations
#
from .bool import Bool
from .. import operations
# comparisons
ULT = operations.op("__lt__", (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
ULE = operations.op("__le__", (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
UGT = operations.op("__gt__", (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
UGE = operations.op("__ge__", (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
SLT = operations.op("SLT", (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
SLE = operations.op("SLE", (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
SGT = operations.op("SGT", (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
SGE = operations.op("SGE", (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
# division
SDiv = operations.op(
"SDiv",
(BV, BV),
BV,
extra_check=operations.length_same_check,
bound=False,
calc_length=operations.basic_length_calc,
)
SMod = operations.op(
"SMod",
(BV, BV),
BV,
extra_check=operations.length_same_check,
bound=False,
calc_length=operations.basic_length_calc,
)
# bit stuff
LShR = operations.op(
"LShR",
(BV, BV),
BV,
extra_check=operations.length_same_check,
calc_length=operations.basic_length_calc,
bound=False,
)
SignExt = operations.op(
"SignExt", (int, BV), BV, extra_check=operations.extend_check, calc_length=operations.ext_length_calc, bound=False
)
ZeroExt = operations.op(
"ZeroExt", (int, BV), BV, extra_check=operations.extend_check, calc_length=operations.ext_length_calc, bound=False
)
Extract = operations.op(
"Extract",
(int, int, BV),
BV,
extra_check=operations.extract_check,
calc_length=operations.extract_length_calc,
bound=False,
)
Concat = operations.op("Concat", BV, BV, calc_length=operations.concat_length_calc, bound=False)
RotateLeft = operations.op(
"RotateLeft",
(BV, BV),
BV,
extra_check=operations.length_same_check,
calc_length=operations.basic_length_calc,
bound=False,
)
RotateRight = operations.op(
"RotateRight",
(BV, BV),
BV,
extra_check=operations.length_same_check,
calc_length=operations.basic_length_calc,
bound=False,
)
Reverse = operations.op("Reverse", (BV,), BV, calc_length=operations.basic_length_calc, bound=False)
union = operations.op(
"union",
(BV, BV),
BV,
extra_check=operations.length_same_check,
calc_length=operations.basic_length_calc,
bound=False,
)
widen = operations.op(
"widen",
(BV, BV),
BV,
extra_check=operations.length_same_check,
calc_length=operations.basic_length_calc,
bound=False,
)
intersection = operations.op(
"intersection",
(BV, BV),
BV,
extra_check=operations.length_same_check,
calc_length=operations.basic_length_calc,
bound=False,
)
#
# Bound operations
#
BV.__add__ = operations.op(
"__add__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__radd__ = operations.reversed_op(BV.__add__)
BV.__floordiv__ = operations.op(
"__floordiv__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__rfloordiv__ = operations.reversed_op(BV.__floordiv__)
BV.__truediv__ = deprecated("BV.__floordiv__", "BV.__truediv__")(BV.__floordiv__)
BV.__rtruediv__ = deprecated("BV.__rfloordiv__", "BV.__rtruediv__")(BV.__rfloordiv__)
BV.__mul__ = operations.op(
"__mul__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__rmul__ = operations.reversed_op(BV.__mul__)
BV.__sub__ = operations.op(
"__sub__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__rsub__ = operations.reversed_op(BV.__sub__)
BV.__pow__ = operations.op(
"__pow__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__rpow__ = operations.reversed_op(BV.__pow__)
BV.__mod__ = operations.op(
"__mod__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__rmod__ = operations.reversed_op(BV.__mod__)
BV.SDiv = operations.op(
"SDiv",
(BV, BV),
BV,
extra_check=operations.length_same_check,
bound=False,
calc_length=operations.basic_length_calc,
)
BV.SMod = operations.op(
"SMod",
(BV, BV),
BV,
extra_check=operations.length_same_check,
bound=False,
calc_length=operations.basic_length_calc,
)
BV.__neg__ = operations.op("__neg__", (BV,), BV, calc_length=operations.basic_length_calc)
BV.__pos__ = lambda x: x
BV.__abs__ = operations.op("__abs__", (BV,), BV, calc_length=operations.basic_length_calc)
BV.__eq__ = operations.op("__eq__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__ne__ = operations.op("__ne__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__ge__ = operations.op("__ge__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__le__ = operations.op("__le__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__gt__ = operations.op("__gt__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__lt__ = operations.op("__lt__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.SLT = operations.op("SLT", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.SGT = operations.op("SGT", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.SLE = operations.op("SLE", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.SGE = operations.op("SGE", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.ULT = operations.op("__lt__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.UGT = operations.op("__gt__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.ULE = operations.op("__le__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.UGE = operations.op("__ge__", (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__invert__ = operations.op("__invert__", (BV,), BV, calc_length=operations.basic_length_calc)
BV.__or__ = operations.op(
"__or__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__ror__ = operations.reversed_op(BV.__or__)
BV.__and__ = operations.op(
"__and__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__rand__ = operations.reversed_op(BV.__and__)
BV.__xor__ = operations.op(
"__xor__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__rxor__ = operations.reversed_op(BV.__xor__)
BV.__lshift__ = operations.op(
"__lshift__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__rlshift__ = operations.reversed_op(BV.__lshift__)
BV.__rshift__ = operations.op(
"__rshift__", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.__rrshift__ = operations.reversed_op(BV.__rshift__)
BV.LShR = operations.op(
"LShR", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.Extract = staticmethod(
operations.op(
"Extract",
(int, int, BV),
BV,
extra_check=operations.extract_check,
calc_length=operations.extract_length_calc,
bound=False,
)
)
BV.Concat = staticmethod(operations.op("Concat", BV, BV, calc_length=operations.concat_length_calc, bound=False))
BV.reversed = property(operations.op("Reverse", (BV,), BV, calc_length=operations.basic_length_calc))
BV.union = operations.op(
"union", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.widen = operations.op(
"widen", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
BV.intersection = operations.op(
"intersection", (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc
)
from . import fp
from .. import vsa
from ..errors import ClaripyValueError

View File

@@ -0,0 +1,226 @@
import struct
from .bits import Bits
from ..ast.base import _make_name
from ..fp import FSORT_FLOAT
class FP(Bits):
"""
An AST representing a set of operations culminating in an IEEE754 floating point number.
Do not instantiate this class directly, instead use FPV or FPS to construct a value or symbol, and then use
operations to construct more complicated expressions.
:ivar length: The length of this value
:ivar sort: The sort of this value, usually either FSORT_FLOAT or FSORT_DOUBLE
"""
__slots__ = ()
def to_fp(self, sort, rm=None):
"""
Convert this float to a different sort
:param sort: The sort to convert to
:param rm: Optional: The rounding mode to use
:return: An FP AST
"""
if rm is None:
rm = fp.RM.default()
return fpToFP(rm, self, sort)
def raw_to_fp(self):
"""
A counterpart to BV.raw_to_fp - does nothing and returns itself.
"""
return self
def raw_to_bv(self):
"""
Interpret the bit-pattern of this IEEE754 floating point number as a bitvector.
The inverse of this function is to_bv.
:return: A BV AST whose bit-pattern is the same as this FP
"""
return fpToIEEEBV(self)
def to_bv(self):
return self.raw_to_bv()
def val_to_bv(self, size, signed=True, rm=None):
"""
Convert this floating point value to an integer.
:param size: The size of the bitvector to return
:param signed: Optional: Whether the target integer is signed
:param rm: Optional: The rounding mode to use
:return: A bitvector whose value is the rounded version of this FP's value
"""
if rm is None:
rm = fp.RM.default()
op = fpToSBV if signed else fpToUBV
return op(rm, self, size)
@property
def sort(self):
return fp.FSort.from_size(self.length)
@staticmethod
def _from_float(like, value):
return FPV(float(value), like.sort)
_from_int = _from_float
_from_str = _from_float
def FPS(name, sort, explicit_name=None):
"""
Creates a floating-point symbol.
:param name: The name of the symbol
:param sort: The sort of the floating point
:param explicit_name: If False, an identifier is appended to the name to ensure uniqueness.
:return: An FP AST.
"""
n = _make_name(name, sort.length, False if explicit_name is None else explicit_name, prefix="FP_")
return FP("FPS", (n, sort), variables={n}, symbolic=True, length=sort.length)
def FPV(value, sort):
"""
Creates a concrete floating-point value.
:param value: The value of the floating point.
:param sort: The sort of the floating point.
:return: An FP AST.
"""
if type(value) is int:
value = float(value)
elif type(value) is float:
pass
else:
raise TypeError("Must instanciate FPV with a numerical value")
if type(sort) is not fp.FSort:
raise TypeError("Must instanciate FPV with a FSort")
if sort == FSORT_FLOAT:
# By default, Python treats all floating-point numbers as double-precision. However, a single-precision float is
# being created here. Hence, we need to convert value to single-precision.
value = struct.unpack("f", struct.pack("f", value))[0]
return FP("FPV", (value, sort), length=sort.length)
#
# unbound floating point conversions
#
from .. import operations
from .. import fp
from .bv import BV
from .bool import Bool
def _fp_length_calc(a1, a2, a3=None):
if isinstance(a1, fp.RM) and a3 is None:
raise Exception()
if a3 is None:
return a2.length
else:
return a3.length
fpToFP = operations.op("fpToFP", object, FP, bound=False, calc_length=_fp_length_calc)
fpToFPUnsigned = operations.op("fpToFPUnsigned", (fp.RM, BV, fp.FSort), FP, bound=False, calc_length=_fp_length_calc)
fpFP = operations.op("fpFP", (BV, BV, BV), FP, bound=False, calc_length=lambda a, b, c: a.length + b.length + c.length)
fpToIEEEBV = operations.op("fpToIEEEBV", (FP,), BV, bound=False, calc_length=lambda fp: fp.length)
fpToSBV = operations.op("fpToSBV", (fp.RM, FP, int), BV, bound=False, calc_length=lambda _rm, _fp, len: len)
fpToUBV = operations.op("fpToUBV", (fp.RM, FP, int), BV, bound=False, calc_length=lambda _rm, _fp, len: len)
#
# unbound float point comparisons
#
def _fp_cmp_check(a, b):
return a.length == b.length, "FP lengths must be the same"
fpEQ = operations.op("fpEQ", (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
fpGT = operations.op("fpGT", (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
fpGEQ = operations.op("fpGEQ", (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
fpLT = operations.op("fpLT", (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
fpLEQ = operations.op("fpLEQ", (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
fpIsNaN = operations.op("fpIsNaN", (FP,), Bool, bound=False)
fpIsInf = operations.op("fpIsInf", (FP,), Bool, bound=False)
#
# unbound floating point arithmetic
#
def _fp_binop_check(rm, a, b): # pylint:disable=unused-argument
return a.length == b.length, "Lengths must be equal"
def _fp_binop_length(rm, a, b): # pylint:disable=unused-argument
return a.length
fpAbs = operations.op("fpAbs", (FP,), FP, bound=False, calc_length=lambda x: x.length)
fpNeg = operations.op("fpNeg", (FP,), FP, bound=False, calc_length=lambda x: x.length)
fpSub = operations.op(
"fpSub", (fp.RM, FP, FP), FP, bound=False, extra_check=_fp_binop_check, calc_length=_fp_binop_length
)
fpAdd = operations.op(
"fpAdd", (fp.RM, FP, FP), FP, bound=False, extra_check=_fp_binop_check, calc_length=_fp_binop_length
)
fpMul = operations.op(
"fpMul", (fp.RM, FP, FP), FP, bound=False, extra_check=_fp_binop_check, calc_length=_fp_binop_length
)
fpDiv = operations.op(
"fpDiv", (fp.RM, FP, FP), FP, bound=False, extra_check=_fp_binop_check, calc_length=_fp_binop_length
)
fpSqrt = operations.op(
"fpSqrt",
(
fp.RM,
FP,
),
FP,
bound=False,
calc_length=lambda _, x: x.length,
)
#
# bound fp operations
#
FP.__eq__ = operations.op("fpEQ", (FP, FP), Bool, extra_check=_fp_cmp_check)
FP.__ne__ = operations.op("fpNE", (FP, FP), Bool, extra_check=_fp_cmp_check)
FP.__ge__ = operations.op("fpGEQ", (FP, FP), Bool, extra_check=_fp_cmp_check)
FP.__le__ = operations.op("fpLEQ", (FP, FP), Bool, extra_check=_fp_cmp_check)
FP.__gt__ = operations.op("fpGT", (FP, FP), Bool, extra_check=_fp_cmp_check)
FP.__lt__ = operations.op("fpLT", (FP, FP), Bool, extra_check=_fp_cmp_check)
FP.__abs__ = fpAbs
FP.__neg__ = fpNeg
FP.__add__ = fpAdd
FP.__sub__ = fpSub
FP.__mul__ = fpMul
FP.__truediv__ = fpDiv
FP.Sqrt = fpSqrt
FP.__radd__ = operations.reversed_op(FP.__add__)
FP.__rsub__ = operations.reversed_op(FP.__sub__)
FP.__rmul__ = operations.reversed_op(FP.__mul__)
FP.__rtruediv__ = operations.reversed_op(FP.__truediv__)
FP.isNaN = fpIsNaN
FP.isInf = fpIsInf

View File

@@ -0,0 +1,5 @@
from ..ast.base import Base
class Int(Base):
__slots__ = ()

View File

@@ -0,0 +1,210 @@
from typing import Optional
from .bits import Bits
from ..ast.base import _make_name
from .. import operations
from .bool import Bool
from .bv import BV, BVS, BVV
class String(Bits):
"""
Base class that represent the AST of a String object and implements all the operation useful to create and modify the AST.
Do not instantiate this class directly, instead use StringS or StringV to construct a symbol or value, and then use
operations to construct more complicated expressions.
"""
__slots__ = ("string_length",)
# Identifier used by composite solver in order to identify if a certain constraints contains
# variables of type string... In this case cvc4 would handle the solving part.
#
# TODO: Find a smarter way to do this!
STRING_TYPE_IDENTIFIER = "STRING_"
GENERATED_BVS_IDENTIFIER = "BVS_"
MAX_LENGTH = 10000
def __init__(self, *args, length: int, **kwargs):
"""
:param length: The string byte length
"""
super().__init__(*args, length=length, **kwargs)
self.string_length: int = length
def __getitem__(self, rng):
"""
This is a big endian indexer that takes its arguments as bits but returns bytes.
Returns the range of bits in self defined by: [rng.start, rng.end], indexed from the end
The bit range may not internally divide any bytes; i.e. low and (high+1) must both be divisible by 8
Examples:
self[7:0] -- returns the last byte of self
self[15:0] -- returns the last two bytes of self
self[8:0] -- Error! [8:0] is 9 bits, it asks for individual bits of the second to last byte!
self[8:1] -- Error! [8:1] asks for 1 bit from the second to last byte and 7 from the last byte!
"""
if type(rng) is slice:
bits_low = rng.start if rng.start is not None else 0
bits_high = rng.stop if rng.stop is not None else 8 * (self.string_length - 1)
if bits_high % 8 != 0 or (bits_low + 1) % 8 != 0:
raise ValueError("Bit indicies must not internally divide bytes!")
# high / low form a reverse-indexed byte index
high = bits_high // 8
low = bits_low // 8
if high < 0:
high = self.string_length + high
if low < 0:
low = self.string_length + low
# StrSubstr takes a front-indexed byte index as a starting point, and a length
start_idx = self.string_length - 1 - low
if high > low:
return StrSubstr(start_idx, 0, self)
return StrSubstr(start_idx, 1 + low - high, self)
else:
raise ValueError("Only slices allowed for string extraction")
@staticmethod
# TODO: Figure out what to convert here
#
# The use case that i found useful here is during the concretization
# of a symbolic address in memory.
# When angr has to do this it creates a claripy.If constraints in this form
# If(condition, <value_read_if_condition_true>, <value_read_if_condition_false>).
# In some case <value_read_if_condition_false> can be MEM_UNINITIALIZED expressed as BVV
#
# What should we return in this case?
def _from_BV(like, value): # pylint: disable=unused-argument
return value
@staticmethod
def _from_str(like, value): # pylint: disable=unused-argument
return StringV(value)
def strReplace(self, str_to_replace, replacement):
"""
Replace the first occurence of str_to_replace with replacement
:param claripy.ast.String str_to_replace: pattern that has to be replaced
:param claripy.ast.String replacement: replacement pattern
"""
return StrReplace(self, str_to_replace, replacement)
def toInt(self, bitlength):
"""
Convert the string to a bitvector holding the integer
representation of the string
:param bitlength: size of the biitvector holding the result
"""
return StrToInt(self, bitlength)
def indexOf(self, pattern, start_idx, bitlength):
"""
Return the start index of the pattern inside the input string in a
Bitvector representation, otherwise it returns -1 (always using a BitVector)
:param bitlength: size of the biitvector holding the result
"""
return StrIndexOf(self, pattern, start_idx, bitlength)
def raw_to_bv(self):
"""
A counterpart to FP.raw_to_bv - does nothing and returns itself.
"""
if self.symbolic:
return BVS(
next(iter(self.variables)).replace(self.STRING_TYPE_IDENTIFIER, self.GENERATED_BVS_IDENTIFIER),
self.length,
)
else:
return BVV(ord(self.args[0]), self.length)
def raw_to_fp(self):
return self.raw_to_bv().raw_to_fp()
def StringS(name, size, uninitialized=False, explicit_name=False, **kwargs):
"""
Create a new symbolic string (analogous to z3.String())
:param name: The name of the symbolic string (i. e. the name of the variable)
:param size: The size in bytes of the string (i. e. the length of the string)
:param uninitialized: Whether this value should be counted as an "uninitialized" value in the course of an
analysis.
:param bool explicit_name: If False, an identifier is appended to the name to ensure uniqueness.
:returns: The String object representing the symbolic string
"""
n = _make_name(String.STRING_TYPE_IDENTIFIER + name, size, False if explicit_name is None else explicit_name)
result = String(
"StringS",
(n, uninitialized),
length=8 * size,
symbolic=True,
eager_backends=None,
uninitialized=uninitialized,
variables={n},
**kwargs,
)
return result
def StringV(value, length: Optional[int] = None, **kwargs):
"""
Create a new Concrete string (analogous to z3.StringVal())
:param value: The constant value of the concrete string
:param length: The byte length of the string
:returns: The String object representing the concrete string
"""
if length is None:
length = len(value)
if length < len(value):
raise ValueError("Can't make a concrete string value longer than the specified length!")
result = String("StringV", (value, length), length=8 * length, **kwargs)
return result
StrConcat = operations.op("StrConcat", String, String, calc_length=operations.str_concat_length_calc, bound=False)
StrSubstr = operations.op("StrSubstr", (BV, BV, String), String, calc_length=operations.substr_length_calc, bound=False)
StrLen = operations.op("StrLen", (String, int), BV, calc_length=operations.strlen_bv_size_calc, bound=False)
StrReplace = operations.op(
"StrReplace",
(String, String, String),
String,
extra_check=operations.str_replace_check,
calc_length=operations.str_replace_length_calc,
bound=False,
)
StrContains = operations.op("StrContains", (String, String), Bool, bound=False)
StrPrefixOf = operations.op("StrPrefixOf", (String, String), Bool, bound=False)
StrSuffixOf = operations.op("StrSuffixOf", (String, String), Bool, bound=False)
StrIndexOf = operations.op(
"StrIndexOf", (String, String, BV, int), BV, calc_length=operations.strindexof_bv_size_calc, bound=False
)
StrToInt = operations.op("StrToInt", (String, int), BV, calc_length=operations.strtoint_bv_size_calc, bound=False)
IntToStr = operations.op("IntToStr", (BV,), String, calc_length=operations.int_to_str_length_calc, bound=False)
StrIsDigit = operations.op("StrIsDigit", (String,), Bool, bound=False)
# Equality / inequality check
String.__eq__ = operations.op("__eq__", (String, String), Bool)
String.__ne__ = operations.op("__ne__", (String, String), Bool)
# String manipulation
String.__add__ = StrConcat
String.StrSubstr = staticmethod(StrSubstr)
String.StrConcat = staticmethod(StrConcat)
String.StrLen = staticmethod(StrLen)
String.StrReplace = staticmethod(StrReplace)
String.StrContains = staticmethod(StrContains)
String.StrPrefixOf = staticmethod(StrPrefixOf)
String.StrSuffixOf = staticmethod(StrSuffixOf)
String.StrIndexOf = staticmethod(StrIndexOf)
String.StrToInt = staticmethod(StrToInt)
String.StrIsDigit = staticmethod(StrIsDigit)
String.IntToStr = staticmethod(IntToStr)

View File

@@ -0,0 +1,5 @@
from .bits import Bits
class VS(Bits):
__slots__ = ()

View File

@@ -0,0 +1,30 @@
class BackendManager:
def __init__(self):
self._eager_backends = []
self._quick_backends = []
self._all_backends = []
self._backends_by_type = {}
self._backends_by_name = {}
def _register_backend(self, b, name, eager, quick):
self._backends_by_name[name] = b
self._backends_by_type[b.__class__.__name__] = b
self._all_backends.append(b)
if eager:
self._eager_backends.append(b)
if quick:
self._quick_backends.append(b)
def __getattr__(self, a):
if a in self._backends_by_name:
return self._backends_by_name[a]
else:
raise AttributeError(a)
def downsize(self):
for b in self._all_backends:
b.downsize()
backends = BackendManager()

View File

@@ -0,0 +1,17 @@
class BackendObject:
"""
This is a base class for custom backend objects to implement.
It lets Claripy know that how to deal with those objects, in case they're directly used in operations.
Backend objects that *don't* derive from this class need to be wrapped in a type-I claripy.ast.Base.
"""
__slots__ = ()
def to_claripy(self):
"""
Claripy calls this to retrieve something that it can directly reason about.
"""
return self

View File

@@ -0,0 +1,854 @@
import ctypes
import weakref
import operator
import threading
import numbers
import logging
l = logging.getLogger("claripy.backend")
class Backend:
"""
Backends are Claripy's workhorses. Claripy exposes ASTs (claripy.ast.Base objects)
to the world, but when actual computation has to be done, it pushes those
ASTs into objects that can be handled by the backends themselves. This
provides a unified interface to the outside world while allowing Claripy to
support different types of computation. For example, BackendConcrete
provides computation support for concrete bitvectors and booleans,
BackendVSA introduces VSA constructs such as StridedIntervals (and details
what happens when operations are performed on them), and BackendZ3 provides
support for symbolic variables and constraint solving.
There are a set of functions that a backend is expected to implement. For
all of these functions, the "public" version is expected to be able to deal
with claripy.ast.Base objects, while the "private" version should only deal with
objects specific to the backend itself. This is distinguished with Python
idioms: a public function will be named func() while a private function
will be _func(). All functions should return objects that are usable by the
backend in its private methods. If this can't be done (i.e., some
functionality is being attempted that the backend can't handle), the
backend should raise a BackendError. In this case, Claripy will move on to
the next backend in its list.
All backends must implement a convert() function. This function receives a
claripy.ast.Base and should return an object that the backend can handle in its
private methods. Backends should also implement a _convert() method, which
will receive anything that is *not* a claripy.ast.Base object (i.e., an integer or
an object from a different backend). If convert() or _convert() receives
something that the backend can't translate to a format that is usable
internally, the backend should raise BackendError, and thus won't be used
for that object.
Claripy contract with its backends is as follows: backends should be able
to can handle, in their private functions, any object that they return from
their private *or* public functions. Likewise, Claripy will never pass an
object to any backend private function that did not originate as a return
value from a private or public function of that backend. One exception to
this is _convert(), as Claripy can try to stuff anything it feels like into
_convert() to see if the backend can handle that type of object.
"""
__slots__ = (
"_op_raw",
"_op_expr",
"_cache_objects",
"_solver_required",
"_tls",
"_true_cache",
"_false_cache",
)
def __init__(self, solver_required=None):
self._op_raw = {}
self._op_expr = {}
self._cache_objects = True
self._solver_required = solver_required is not None
self._tls = threading.local()
self._true_cache = weakref.WeakKeyDictionary()
self._false_cache = weakref.WeakKeyDictionary()
@property
def is_smt_backend(self):
return False
@property
def _object_cache(self):
try:
return self._tls.object_cache
except AttributeError:
self._tls.object_cache = weakref.WeakKeyDictionary()
return self._tls.object_cache
def _make_raw_ops(self, op_list, op_dict=None, op_module=None):
for o in op_list:
if op_dict is not None:
if o in op_dict:
self._op_raw[o] = op_dict[o]
else:
l.warning("Operation %s not in op_dict.", o)
else:
if hasattr(op_module, o):
self._op_raw[o] = getattr(op_module, o)
else:
l.debug("Operation %s not in op_module %s.", o, op_module)
def _make_expr_ops(self, op_list, op_dict=None, op_class=None):
"""
Fill up `self._op_expr` dict.
:param op_list: A list of operation names.
:param op_dict: A dictionary of operation methods.
:param op_class: Where the operation method comes from.
:return:
"""
for o in op_list:
if op_dict is not None:
if o in op_dict:
self._op_expr[o] = op_dict[o]
else:
l.warning("Operation %s not in op_dict.", o)
else:
if hasattr(op_class, o):
self._op_expr[o] = getattr(op_class, o)
else:
l.warning("Operation %s not in op_class %s.", o, op_class)
#
# These functions handle converting expressions to formats that the backend
# can understand.
#
def _convert(self, r): # pylint:disable=W0613,R0201
"""
Converts `r` to something usable by this backend.
"""
return r
def downsize(self):
"""
Clears all caches associated with this backend.
"""
self._object_cache.clear()
self._true_cache.clear()
self._false_cache.clear()
def handles(self, expr):
"""
Checks whether this backend can handle the expression.
:param expr: The expression.
:return: True if the backend can handle this expression, False if not.
"""
try:
self.convert(expr)
return True
except BackendError:
return False
def convert(self, expr): # pylint:disable=R0201
"""
Resolves a claripy.ast.Base into something usable by the backend.
:param expr: The expression.
:param save: Save the result in the expression's object cache
:return: A backend object.
"""
ast_queue = [[expr]]
arg_queue = []
op_queue = []
try:
while ast_queue:
args_list = ast_queue[-1]
if args_list:
ast = args_list.pop(0)
if type(ast) in {bool, int, str, float} or not isinstance(ast, Base):
converted = self._convert(ast)
arg_queue.append(converted)
continue
if self in ast._errored:
raise BackendError(
"%s can't handle operation %s (%s) due to a failed "
"conversion on a child node" % (self, ast.op, ast.__class__.__name__)
)
if self._cache_objects:
cached_obj = self._object_cache.get(ast._cache_key, None)
if cached_obj is not None:
arg_queue.append(cached_obj)
continue
op_queue.append(ast)
if ast.op in self._op_expr:
ast_queue.append(None)
else:
ast_queue.append(list(ast.args))
else:
ast_queue.pop()
if op_queue:
ast = op_queue.pop()
op = self._op_expr.get(ast.op, None)
if op is not None:
r = op(ast)
else:
args = arg_queue[-len(ast.args) :]
del arg_queue[-len(ast.args) :]
try:
r = self._call(ast.op, args)
except BackendUnsupportedError:
r = self.default_op(ast)
for a in ast.annotations:
r = self.apply_annotation(r, a)
if self._cache_objects:
self._object_cache[ast._cache_key] = r
arg_queue.append(r)
except (RuntimeError, ctypes.ArgumentError) as e:
raise ClaripyRecursionError("Recursion limit reached. Sorry about that.") from e
except BackendError:
for ast in op_queue:
ast._errored.add(self)
if isinstance(expr, Base):
expr._errored.add(self)
raise
# Note: Uncomment the following assertions if you are touching the above implementation
# assert len(op_queue) == 0, "op_queue is not empty"
# assert len(ast_queue) == 0, "ast_queue is not empty"
# assert len(arg_queue) == 1, ("arg_queue has unexpected length", len(arg_queue))
return arg_queue.pop()
def convert_list(self, args):
return [a if isinstance(a, numbers.Number) else self.convert(a) for a in args]
#
# These functions provide support for applying operations to expressions.
#
def call(self, op, args):
"""
Calls operation `op` on args `args` with this backend.
:return: A backend object representing the result.
"""
converted = self.convert_list(args)
return self._call(op, converted)
def _call(self, op, args):
"""_call
:param op:
:param args:
:return:
"""
if op in self._op_raw:
# the raw ops don't get the model, cause, for example, Z3 stuff can't take it
obj = self._op_raw[op](*args)
elif not op.startswith("__"):
l.debug("backend has no operation %s", op)
raise BackendUnsupportedError
else:
obj = NotImplemented
# first, try the operation with the first guy
try:
obj = getattr(operator, op)(*args)
except (TypeError, ValueError):
pass
if obj is NotImplemented:
l.debug("received NotImplemented in %s.call() for operation %s", self, op)
raise BackendUnsupportedError
return obj
#
# Abstraction and resolution.
#
def _abstract(self, e): # pylint:disable=W0613,R0201
"""
Abstracts the BackendObject e to an AST.
:param e: The backend object.
:return: An AST.
"""
raise BackendError("backend %s doesn't implement abstract()" % self.__class__.__name__)
#
# These functions simplify expressions.
#
def simplify(self, e):
o = self._abstract(self._simplify(self.convert(e)))
o._simplified = Base.FULL_SIMPLIFY
return o
def _simplify(self, e): # pylint:disable=R0201,unused-argument
raise BackendError("backend %s can't simplify" % self.__class__.__name__)
#
# Some other helpers
#
def is_true(self, e, extra_constraints=(), solver=None, model_callback=None): # pylint:disable=unused-argument
"""
Should return True if `e` can be easily found to be True.
:param e: The AST.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it.
:param model_callback: a function that will be executed with recovered models (if any)
:returns: A boolean.
"""
# if self._solver_required and solver is None:
# raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
if not isinstance(e, Base):
return self._is_true(
self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback
)
try:
return self._true_cache[e.cache_key]
except KeyError:
t = self._is_true(
self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback
)
if len(extra_constraints) == 0: # Only update cache when we have no extra constraints
self._true_cache[e.cache_key] = t
if t is True:
self._false_cache[e.cache_key] = False
return t
def is_false(self, e, extra_constraints=(), solver=None, model_callback=None): # pylint:disable=unused-argument
"""
Should return True if e can be easily found to be False.
:param e: The AST
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean.
"""
# if self._solver_required and solver is None:
# raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
if not isinstance(e, Base):
return self._is_false(
self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback
)
try:
return self._false_cache[e.cache_key]
except KeyError:
f = self._is_false(
self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback
)
if len(extra_constraints) == 0: # Only update cache when we have no extra constraints
self._false_cache[e.cache_key] = f
if f is True:
self._true_cache[e.cache_key] = False
return f
def _is_false(
self, e, extra_constraints=(), solver=None, model_callback=None
): # pylint:disable=no-self-use,unused-argument
"""
The native version of is_false.
:param e: The backend object.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean.
"""
raise BackendError("backend doesn't support _is_false")
def _is_true(
self, e, extra_constraints=(), solver=None, model_callback=None
): # pylint:disable=no-self-use,unused-argument
"""
The native version of is_true.
:param e: The backend object.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean.
"""
raise BackendError("backend doesn't support _is_true")
def has_true(self, e, extra_constraints=(), solver=None, model_callback=None): # pylint:disable=unused-argument
"""
Should return True if `e` can possible be True.
:param e: The AST.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean
"""
# if self._solver_required and solver is None:
# raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
return self._has_true(
self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback
)
def has_false(self, e, extra_constraints=(), solver=None, model_callback=None): # pylint:disable=unused-argument
"""
Should return False if `e` can possibly be False.
:param e: The AST.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean.
"""
# if self._solver_required and solver is None:
# raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
return self._has_false(
self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback
)
def _has_false(
self, e, extra_constraints=(), solver=None, model_callback=None
): # pylint:disable=no-self-use,unused-argument
"""
The native version of :func:`has_false`.
:param e: The backend object.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean.
"""
raise BackendError("backend doesn't support _has_false")
def _has_true(
self, e, extra_constraints=(), solver=None, model_callback=None
): # pylint:disable=no-self-use,unused-argument
"""
The native version of :func:`has_true`.
:param e: The backend object.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean.
"""
raise BackendError("backend doesn't support _has_true")
#
# These functions are straight-up solver functions
#
def solver(self, timeout=None): # pylint:disable=no-self-use,unused-argument
"""
This function should return an instance of whatever object handles
solving for this backend. For example, in Z3, this would be z3.Solver().
"""
raise BackendError("backend doesn't support solving")
def add(self, s, c, track=False):
"""
This function adds constraints to the backend solver.
:param c: A sequence of ASTs
:param s: A backend solver object
:param bool track: True to enable constraint tracking, which is used in unsat_core()
"""
return self._add(s, self.convert_list(c), track=track)
def _add(self, s, c, track=False): # pylint:disable=no-self-use,unused-argument
"""
This function adds constraints to the backend solver.
:param c: sequence of converted backend objects
:param s: backend solver object
:param bool track: True to enable constraint tracking, which is used in unsat_core().
"""
raise BackendError("backend doesn't support solving")
def unsat_core(self, s):
"""
This function returns the unsat core from the backend solver.
:param s: A backend solver object.
:return: The unsat core.
"""
return [self._abstract(core) for core in self._unsat_core(s)]
def _unsat_core(self, s): # pylint:disable=no-self-use,unused-argument
"""
This function returns the unsat core from the backend solver.
:param s: A backend solver object.
:return: The unsat core.
"""
raise BackendError("backend doesn't support unsat_core")
#
# These functions provide evaluation support.
#
def eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None):
"""
This function returns up to `n` possible solutions for expression `expr`.
:param expr: expression (an AST) to evaluate
:param n: number of results to return
:param solver: a solver object, native to the backend, to assist in
the evaluation (for example, a z3.Solver)
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:param model_callback: a function that will be executed with recovered models (if any)
:return: A sequence of up to n results (backend objects)
"""
if self._solver_required and solver is None:
raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
return self._eval(
self.convert(expr),
n,
extra_constraints=self.convert_list(extra_constraints),
solver=solver,
model_callback=model_callback,
)
def _eval(
self, expr, n, extra_constraints=(), solver=None, model_callback=None
): # pylint:disable=unused-argument,no-self-use
"""
This function returns up to `n` possible solutions for expression `expr`.
:param expr: An expression (backend object) to evaluate.
:param n: A number of results to return.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver object, native to the backend, to assist in the evaluation (for example, a
z3.Solver).
:param model_callback: a function that will be executed with recovered models (if any)
:return: A sequence of up to n results (backend objects).
"""
raise BackendError("backend doesn't support eval()")
def batch_eval(self, exprs, n, extra_constraints=(), solver=None, model_callback=None):
"""
Evaluate one or multiple expressions.
:param exprs: A list of expressions to evaluate.
:param n: Number of different solutions to return.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver object, native to the backend, to assist in the evaluation.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A list of up to n tuples, where each tuple is a solution for all expressions.
"""
if self._solver_required and solver is None:
raise BackendError("%s requires a solver for batch evaluation" % self.__class__.__name__)
converted_exprs = [self.convert(ex) for ex in exprs]
return self._batch_eval(
converted_exprs,
n,
extra_constraints=self.convert_list(extra_constraints),
solver=solver,
model_callback=model_callback,
)
def _batch_eval(
self, exprs, n, extra_constraints=(), solver=None, model_callback=None
): # pylint:disable=unused-argument,no-self-use
"""
Evaluate one or multiple expressions.
:param exprs: A list of expressions to evaluate.
:param n: Number of different solutions to return.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver object, native to the backend, to assist in the evaluation.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A list of up to n tuples, where each tuple is a solution for all expressions.
"""
raise BackendError("backend doesn't support batch_eval()")
def min(self, expr, extra_constraints=(), signed=False, solver=None, model_callback=None):
"""
Return the minimum value of `expr`.
:param expr: expression (an AST) to evaluate
:param solver: a solver object, native to the backend, to assist in
the evaluation (for example, a z3.Solver)
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:param signed: Whether to solve for the minimum signed integer instead of the unsigned min
:param model_callback: a function that will be executed with recovered models (if any)
:return: the minimum possible value of expr (backend object)
"""
if self._solver_required and solver is None:
raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
return self._min(
self.convert(expr),
extra_constraints=self.convert_list(extra_constraints),
signed=signed,
solver=solver,
model_callback=model_callback,
)
def _min(
self, expr, extra_constraints=(), signed=False, solver=None, model_callback=None
): # pylint:disable=unused-argument,no-self-use
"""
Return the minimum value of expr.
:param expr: expression (backend object) to evaluate
:param solver: a solver object, native to the backend, to assist in
the evaluation (for example, a z3.Solver)
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:param signed: Whether to solve for the minimum signed integer instead of the unsigned min
:param model_callback: a function that will be executed with recovered models (if any)
:return: the minimum possible value of expr (backend object)
"""
raise BackendError("backend doesn't support min()")
def max(self, expr, extra_constraints=(), signed=False, solver=None, model_callback=None):
"""
Return the maximum value of expr.
:param expr: expression (an AST) to evaluate
:param solver: a solver object, native to the backend, to assist in
the evaluation (for example, a z3.Solver)
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:param signed: Whether to solve for the maximum signed integer instead of the unsigned max
:param model_callback: a function that will be executed with recovered models (if any)
:return: the maximum possible value of expr (backend object)
"""
if self._solver_required and solver is None:
raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
return self._max(
self.convert(expr),
extra_constraints=self.convert_list(extra_constraints),
signed=signed,
solver=solver,
model_callback=model_callback,
)
def _max(
self, expr, extra_constraints=(), signed=False, solver=None, model_callback=None
): # pylint:disable=unused-argument,no-self-use
"""
Return the maximum value of expr.
:param expr: expression (backend object) to evaluate
:param solver: a solver object, native to the backend, to assist in
the evaluation (for example, a z3.Solver)
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:param signed: Whether to solve for the maximum signed integer instead of the unsigned max
:param model_callback: a function that will be executed with recovered models (if any)
:return: the maximum possible value of expr (backend object)
"""
raise BackendError("backend doesn't support max()")
def check_satisfiability(self, extra_constraints=(), solver=None, model_callback=None):
"""
This function does a constraint check and returns the solvers state
:param solver: The backend solver object.
:param extra_constraints: Extra constraints (as ASTs) to add to s for this solve
:param model_callback: a function that will be executed with recovered models (if any)
:return: 'SAT', 'UNSAT', or 'UNKNOWN'
"""
return self._check_satisfiability(
extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback
)
def _check_satisfiability(self, extra_constraints=(), solver=None, model_callback=None):
"""
This function does a constraint check and returns the solvers state
:param solver: The backend solver object.
:param extra_constraints: Extra constraints (as ASTs) to add to s for this solve
:param model_callback: a function that will be executed with recovered models (if any)
:return: 'SAT', 'UNSAT', or 'UNKNOWN'
"""
return (
"SAT"
if self.satisfiable(extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
else "UNSAT"
)
def satisfiable(self, extra_constraints=(), solver=None, model_callback=None):
"""
This function does a constraint check and checks if the solver is in a sat state.
:param solver: The backend solver object.
:param extra_constraints: Extra constraints (as ASTs) to add to s for this solve
:param model_callback: a function that will be executed with recovered models (if any)
:return: True if sat, otherwise false
"""
return self._satisfiable(
extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback
)
def _satisfiable(
self, extra_constraints=(), solver=None, model_callback=None
): # pylint:disable=no-self-use,unused-argument
"""
This function does a constraint check and returns a model for a solver.
:param solver: The backend solver object
:param extra_constraints: Extra constraints (backend objects) to add to s for this solve
:param model_callback: a function that will be executed with recovered models (if any)
:return: True if sat, otherwise false
"""
raise BackendError("backend doesn't support solving")
def solution(self, expr, v, extra_constraints=(), solver=None, model_callback=None):
"""
Return True if `v` is a solution of `expr` with the extra constraints, False otherwise.
:param expr: An expression (an AST) to evaluate
:param v: The proposed solution (an AST)
:param solver: A solver object, native to the backend, to assist in the evaluation (for example,
a z3.Solver).
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param model_callback: a function that will be executed with recovered models (if any)
:return: True if `v` is a solution of `expr`, False otherwise
"""
if self._solver_required and solver is None:
raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
return self._solution(
self.convert(expr),
self.convert(v),
extra_constraints=self.convert_list(extra_constraints),
solver=solver,
model_callback=model_callback,
)
def _solution(
self, expr, v, extra_constraints=(), solver=None, model_callback=None
): # pylint:disable=unused-argument,no-self-use
"""
Return True if v is a solution of expr with the extra constraints, False otherwise.
:param expr: expression (backend object) to evaluate
:param v: the proposed solution (backend object)
:param solver: a solver object, native to the backend, to assist in
the evaluation (for example, a z3.Solver)
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:return: True if v is a solution of expr, False otherwise
"""
raise BackendError("backend doesn't support solution()")
#
# Some other methods
#
def name(self, a):
"""
This should return the name of an expression.
:param a: the AST to evaluate
"""
return self._name(self.convert(a))
def _name(self, o): # pylint:disable=no-self-use,unused-argument
"""
This should return the name of an object.
:param o: the (backend-native) object
"""
raise BackendError("backend doesn't support name()")
def identical(self, a, b):
"""
This should return whether `a` is identical to `b`. Of course, this isn't always clear. True should mean that it
is definitely identical. False eans that, conservatively, it might not be.
:param a: an AST
:param b: another AST
"""
return self._identical(self.convert(a), self.convert(b))
def _identical(self, a, b): # pylint:disable=no-self-use,unused-argument
"""
This should return whether `a` is identical to `b`. This is the native version of ``identical()``.
:param a: the (backend-native) object
:param b: the (backend-native) object
"""
raise BackendError("backend doesn't support identical()")
def cardinality(self, a):
"""
This should return the maximum number of values that an expression can take on. This should be a strict *over*
approximation.
:param a: The AST to evaluate
:return: An integer
"""
return self._cardinality(self.convert(a))
def _cardinality(self, b): # pylint:disable=no-self-use,unused-argument
"""
This should return the maximum number of values that an expression can take on. This should be a strict
*over* approximation.
:param a: The AST to evalute
:return: An integer
"""
raise BackendError("backend doesn't support cardinality()")
def singlevalued(self, a):
return self.cardinality(a) == 1
def multivalued(self, a):
return self.cardinality(a) > 1
def apply_annotation(self, o, a): # pylint:disable=no-self-use,unused-argument
"""
This should apply the annotation on the backend object, and return a new backend object.
:param o: A backend object.
:param a: An Annotation object.
:return: A backend object.
"""
return o
def default_op(self, expr):
# pylint: disable=unused-argument
raise BackendError(f"Backend {self} does not support operation {expr.op}")
from ..errors import BackendError, ClaripyRecursionError, BackendUnsupportedError
from .backend_z3 import BackendZ3
from .backend_z3_parallel import BackendZ3Parallel
from .backend_concrete import BackendConcrete
from .backend_vsa import BackendVSA
from ..ast.base import Base
# If you need support for multiple solvers, please import claripy.backends.backend_smtlib_solvers by yourself
# from .backend_smtlib_solvers import *

View File

@@ -0,0 +1,245 @@
# pylint:disable=duplicate-value,missing-class-docstring,wrong-import-position
import logging
import numbers
import operator
from functools import reduce
from . import BackendError, Backend
l = logging.getLogger("claripy.backends.backend_concrete")
class BackendConcrete(Backend):
__slots__ = ()
def __init__(self):
Backend.__init__(self)
self._make_raw_ops(set(backend_operations) - {"If"}, op_module=bv)
self._make_raw_ops(backend_strings_operations, op_module=strings)
self._make_raw_ops(backend_fp_operations, op_module=fp)
self._op_raw["If"] = self._If
self._op_raw["BVV"] = self.BVV
self._op_raw["StringV"] = self.StringV
self._op_raw["FPV"] = self.FPV
# reduceable
self._op_raw["__add__"] = self._op_add
self._op_raw["__sub__"] = self._op_sub
self._op_raw["__mul__"] = self._op_mul
self._op_raw["__or__"] = self._op_or
self._op_raw["__xor__"] = self._op_xor
self._op_raw["__and__"] = self._op_and
# unary
self._op_raw["__invert__"] = self._op_not
self._op_raw["__neg__"] = self._op_neg
self._op_raw["fpSqrt"] = self._op_fpSqrt
# boolean ops
self._op_raw["And"] = self._op_and
self._op_raw["Or"] = self._op_or
self._op_raw["Xor"] = self._op_xor
self._op_raw["Not"] = self._op_boolnot
self._cache_objects = False
@staticmethod
def BVV(value, size):
if value is None:
raise BackendError("can't handle empty BVVs")
return bv.BVV(value, size)
@staticmethod
def StringV(value, size): # pylint: disable=unused-argument
if not value:
raise BackendError("can't handle empty Strings")
return strings.StringV(value)
@staticmethod
def FPV(op, sort):
return fp.FPV(op, sort)
@staticmethod
def _op_add(*args):
return reduce(operator.__add__, args)
@staticmethod
def _op_sub(*args):
return reduce(operator.__sub__, args)
@staticmethod
def _op_mul(*args):
return reduce(operator.__mul__, args)
@staticmethod
def _op_or(*args):
return reduce(operator.__or__, args)
@staticmethod
def _op_xor(*args):
return reduce(operator.__xor__, args)
@staticmethod
def _op_and(*args):
return reduce(operator.__and__, args)
@staticmethod
def _op_not(arg):
return ~arg
@staticmethod
def _op_neg(arg):
return -arg
@staticmethod
def _op_boolnot(arg):
return not arg
@staticmethod
def _op_fpSqrt(rm, a): # pylint:disable=unused-argument
return a.fpSqrt()
def convert(self, expr):
"""
Override Backend.convert() to add fast paths for BVVs and BoolVs.
"""
if type(expr) is BV:
if expr.op == "BVV":
cached_obj = self._object_cache.get(expr._cache_key, None)
if cached_obj is None:
cached_obj = self.BVV(*expr.args)
self._object_cache[expr._cache_key] = cached_obj
return cached_obj
if type(expr) is Bool and expr.op == "BoolV":
return expr.args[0]
return super().convert(expr)
def _If(self, b, t, f): # pylint:disable=no-self-use,unused-argument
if not isinstance(b, bool):
raise BackendError("BackendConcrete can't handle non-bool condition in If.")
return t if b else f
def _name(self, e): # pylint:disable=unused-argument,no-self-use
return None
def _identical(self, a, b):
if type(a) is bv.BVV and type(b) is bv.BVV and a.size() != b.size():
return False
else:
return a == b
def _convert(self, a):
if type(a) in {int, str, bytes}:
return a
if isinstance(a, (numbers.Number, bv.BVV, fp.FPV, fp.RM, fp.FSort, strings.StringV)):
return a
raise BackendError("can't handle AST of type %s" % type(a))
def _simplify(self, e):
return e
def _abstract(self, e): # pylint:disable=no-self-use
if isinstance(e, bv.BVV):
return BVV(e.value, e.size())
elif isinstance(e, bool):
return BoolV(e)
elif isinstance(e, fp.FPV):
return FPV(e.value, e.sort)
elif isinstance(e, strings.StringV):
return StringV(e.value)
else:
raise BackendError(f"Couldn't abstract object of type {type(e)}")
def _cardinality(self, b):
# if we got here, it's a cardinality of 1
return 1
#
# Evaluation functions
#
@staticmethod
def _to_primitive(expr):
if isinstance(expr, bv.BVV):
return expr.value
elif isinstance(expr, fp.FPV):
return expr.value
elif isinstance(expr, strings.StringV):
return expr.value
elif isinstance(expr, bool):
return expr
elif isinstance(expr, numbers.Number):
return expr
else:
raise BackendError("idk how to turn this into a primitive")
def _eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None):
if not all(extra_constraints):
raise UnsatError("concrete False constraint in extra_constraints")
return (self._to_primitive(expr),)
def _batch_eval(self, exprs, n, extra_constraints=(), solver=None, model_callback=None):
if not all(extra_constraints):
raise UnsatError("concrete False constraint in extra_constraints")
return [tuple(self._to_primitive(ex) for ex in exprs)]
def _max(self, expr, extra_constraints=(), signed=False, solver=None, model_callback=None):
if not all(extra_constraints):
raise UnsatError("concrete False constraint in extra_constraints")
return self._to_primitive(expr)
def _min(self, expr, extra_constraints=(), signed=False, solver=None, model_callback=None):
if not all(extra_constraints):
raise UnsatError("concrete False constraint in extra_constraints")
return self._to_primitive(expr)
def _solution(self, expr, v, extra_constraints=(), solver=None, model_callback=None):
if not all(extra_constraints):
raise UnsatError("concrete False constraint in extra_constraints")
return self.convert(expr) == v
# Override Backend.is_true() for a better performance
def is_true(self, e, extra_constraints=(), solver=None, model_callback=None):
if e in {True, 1, 1.0}:
return True
if e in {False, 0, 0.0}:
return False
if type(e) is Base and e.op == "BoolV" and len(e.args) == 1 and e.args[0] is True:
return True
return super().is_true(e, extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
# Override Backend.is_false() for a better performance
def is_false(self, e, extra_constraints=(), solver=None, model_callback=None):
if e in {False, 0, 0.0}:
return True
if e in {True, 1, 1.0}:
return False
if type(e) is Base and e.op == "BoolV" and len(e.args) == 1 and e.args[0] is False:
return True
return super().is_false(e, extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
# pylint:disable=singleton-comparison
def _is_true(self, e, extra_constraints=(), solver=None, model_callback=None):
return e == True
def _is_false(self, e, extra_constraints=(), solver=None, model_callback=None):
return e == False
def _has_true(self, e, extra_constraints=(), solver=None, model_callback=None):
return e == True
def _has_false(self, e, extra_constraints=(), solver=None, model_callback=None):
return e == False
from ..operations import backend_operations, backend_fp_operations, backend_strings_operations
from .. import bv, fp, strings
from ..ast import Base
from ..ast.bv import BV, BVV
from ..ast.strings import StringV
from ..ast.fp import FPV
from ..ast.bool import Bool, BoolV
from ..errors import UnsatError

View File

@@ -0,0 +1,322 @@
# pylint:disable=no-self-use
import logging
from pysmt.shortcuts import (
Symbol,
String,
StrConcat,
NotEquals,
StrSubstr,
Int,
StrLength,
StrReplace,
Bool,
Or,
LT,
LE,
GT,
GE,
StrContains,
StrPrefixOf,
StrSuffixOf,
StrIndexOf,
StrToInt,
Ite,
EqualsOrIff,
Minus,
Plus,
IntToStr,
Not,
And,
)
from pysmt.typing import STRING, INT, BOOL
l = logging.getLogger("claripy.backends.backend_smt")
from . import BackendError, Backend
def _expr_to_smtlib(e, daggify=True):
"""
Dump the symbol in its smt-format depending on its type
:param e: symbol to dump
:param daggify: The daggify parameter can be used to switch from a linear-size representation that uses let
operators to represent the formula as a dag or a simpler (but possibly exponential) representation
that expands the formula as a tree
:return string: smt-lib representation of the symbol
"""
if e.is_symbol():
return f"(declare-fun {e.symbol_name()} {e.symbol_type().as_smtlib()})"
else:
return "(assert %s)" % e.to_smtlib(daggify=daggify)
def _exprs_to_smtlib(*exprs, **kwargs):
"""
Dump all the variables and all the constraints in an smt-lib format
:param exprs : List of variable declaration and constraints that have to be
dumped in an smt-lib format
:return string: smt-lib representation of the list of expressions
"""
return "\n".join(_expr_to_smtlib(e, **kwargs) for e in exprs) + "\n"
def _normalize_arguments(expr_left, expr_right):
"""
Since we decided to emulate integer with bitvector, this method transform their
concrete value (if any) in the corresponding integer
"""
if expr_left.is_str_op() and expr_right.is_bv_constant():
return expr_left, Int(expr_right.bv_signed_value())
elif expr_left.is_bv_constant() and expr_right.is_str_op():
return expr_right, Int(expr_left.bv_signed_value())
return expr_left, expr_right
class BackendSMTLibBase(Backend):
def __init__(self, *args, **kwargs):
self.daggify = kwargs.pop("daggify", True)
self.reuse_z3_solver = False
Backend.__init__(self, *args, **kwargs)
# ------------------- LEAF OPERATIONS -------------------
self._op_expr["StringV"] = self.StringV
self._op_expr["StringS"] = self.StringS
self._op_expr["BoolV"] = self.BoolV
self._op_expr["BoolS"] = self.BoolS
self._op_expr["BVV"] = self.BVV
self._op_expr["BVS"] = self.BVS
# ------------------- BITVECTOR OPERATIONS -------------------
# self._op_raw['Extract'] = self._op_raw_extract
# self._op_raw['Concat'] = self._op_raw_concat
self._op_raw["__add__"] = self._op_raw_add
self._op_raw["__sub__"] = self._op_raw_sub
# ------------------- GENERAL PURPOSE OPERATIONS -------------------
self._op_raw["__eq__"] = self._op_raw_eq
self._op_raw["__ne__"] = self._op_raw_ne
self._op_raw["__lt__"] = self._op_raw_lt
self._op_raw["__le__"] = self._op_raw_le
self._op_raw["__gt__"] = self._op_raw_gt
self._op_raw["__ge__"] = self._op_raw_ge
self._op_raw["Or"] = self._op_raw_or
self._op_raw["If"] = self._op_raw_if
self._op_raw["Not"] = self._op_raw_not
self._op_raw["And"] = self._op_raw_and
# ------------------- STRINGS OPERATIONS -------------------
self._op_raw["StrConcat"] = self._op_raw_str_concat
self._op_raw["StrSubstr"] = self._op_raw_str_substr
self._op_raw["StrLen"] = self._op_raw_str_strlen
self._op_raw["StrReplace"] = self._op_raw_str_replace
self._op_raw["StrContains"] = self._op_raw_str_contains
self._op_raw["StrPrefixOf"] = self._op_raw_str_prefixof
self._op_raw["StrSuffixOf"] = self._op_raw_str_suffixof
self._op_raw["StrIndexOf"] = self._op_raw_str_indexof
self._op_raw["StrToInt"] = self._op_raw_str_strtoint
self._op_raw["StrIsDigit"] = self._op_raw_isdigit
self._op_raw["IntToStr"] = self._op_raw_inttostr
@property
def is_smt_backend(self):
return True
def _smtlib_exprs(self, exprs):
return _exprs_to_smtlib(*exprs, daggify=self.daggify)
def _get_satisfiability_smt_script(self, constraints=(), variables=()):
"""
Returns a SMT script that declare all the symbols and constraint and checks
their satisfiability (check-sat)
:param extra-constraints: list of extra constraints that we want to evaluate only
in the scope of this call
:return string: smt-lib representation of the script that checks the satisfiability
"""
smt_script = "(set-logic ALL)\n"
smt_script += self._smtlib_exprs(variables)
smt_script += self._smtlib_exprs(constraints)
smt_script += "(check-sat)\n"
return smt_script
def _get_full_model_smt_script(self, constraints=(), variables=()):
"""
Returns a SMT script that declare all the symbols and constraint and checks
their satisfiability (check-sat)
:param extra-constraints: list of extra constraints that we want to evaluate only
in the scope of this call
:return string: smt-lib representation of the script that checks the satisfiability
"""
smt_script = "(set-logic ALL)\n"
smt_script += "(set-option :produce-models true)\n"
smt_script += self._smtlib_exprs(variables)
smt_script += self._smtlib_exprs(constraints)
smt_script += "(check-sat)\n"
smt_script += "(get-model)\n"
return smt_script
def _get_all_vars_and_constraints(self, solver=None, e_c=(), e_v=()):
all_csts = tuple(e_c) + (tuple(solver.constraints) if solver is not None else ())
free_variables = set(e_v).union(*[c.get_free_variables() for c in all_csts])
sorted_vars = sorted(free_variables, key=lambda s: s.symbol_name())
return sorted_vars, all_csts
def _check_satisfiability(self, extra_constraints=(), solver=None, model_callback=None):
raise BackendError("Use a specialized backend for solving SMTLIB formatted constraints!")
def _satisfiable(self, extra_constraints=(), solver=None, model_callback=None):
raise BackendError("Use a specialized backend for solving SMTLIB formatted constraints!")
# ------------------- LEAF OPERATIONS -------------------
def StringV(self, ast):
content, _ = ast.args
return String(content)
def StringS(self, ast):
# TODO: check correct format
# if format not correct throw exception BackError()
name, _ = ast.args
return Symbol(name, STRING)
def BoolV(self, ast):
return Bool(ast.args[0])
def BoolS(self, ast):
return Symbol(ast.args[0], BOOL)
def BVV(self, ast):
val, _ = ast.args
if val & (1 << (ast.length - 1)):
# negative
val = -((1 << ast.length) - val)
return Int(val)
def BVS(self, ast):
return Symbol(ast.args[0], INT) # BVType(ast.length))
# ------------------- BITVECTOR OPERATIONS -------------------
def _op_raw_add(self, *args):
return Plus(*args)
def _op_raw_sub(self, *args):
return Minus(*args)
# ------------------- GENERAL PURPOSE OPERATIONS -------------------
def _op_raw_eq(self, *args):
# We emulate the integer through a bitvector but
# since a constraint with the form (assert (= (str.len Symb_str) bit_vect))
# is not valid we need to tranform the concrete vqalue of the bitvector in an integer
#
# TODO: implement logic for integer
return EqualsOrIff(*_normalize_arguments(*args))
def _op_raw_ne(self, *args):
# We emulate the integer through a bitvector but
# since a constraint with the form (assert (= (str.len Symb_str) bit_vect))
# is not valid we need to tranform the concrete vqalue of the bitvector in an integer
#
# TODO: implement logic for integer
return NotEquals(*_normalize_arguments(*args))
def _op_raw_lt(self, *args):
# We emulate the integer through a bitvector but
# since a constraint with the form (assert (= (str.len Symb_str) bit_vect))
# is not valid we need to tranform the concrete vqalue of the bitvector in an integer
#
# TODO: implement logic for integer
return LT(*_normalize_arguments(*args))
def _op_raw_le(self, *args):
# We emulate the integer through a bitvector but
# since a constraint with the form (assert (= (str.len Symb_str) bit_vect))
# is not valid we need to tranform the concrete vqalue of the bitvector in an integer
#
# TODO: implement logic for integer
return LE(*_normalize_arguments(*args))
def _op_raw_gt(self, *args):
# We emulate the integer through a bitvector but
# since a constraint with the form (assert (= (str.len Symb_str) bit_vect))
# is not valid we need to tranform the concrete vqalue of the bitvector in an integer
#
# TODO: implement logic for integer
return GT(*_normalize_arguments(*args))
def _op_raw_ge(self, *args):
# We emulate the integer through a bitvector but
# since a constraint with the form (assert (= (str.len Symb_str) bit_vect))
# is not valid we need to tranform the concrete vqalue of the bitvector in an integer
#
# TODO: implement logic for integer
return GE(*_normalize_arguments(*args))
def _op_raw_or(self, *args):
return Or(*args)
def _op_raw_if(self, *args):
return Ite(*args)
def _op_raw_not(self, *args):
return Not(*args)
def _op_raw_and(self, *args):
return And(*args)
# ------------------- STRINGS OPERATIONS -------------------
def _op_raw_str_concat(self, *args):
return StrConcat(*args)
def _op_raw_str_substr(self, *args):
start_idx, count, symb = args
start_idx_operand = start_idx
count_operand = count
# start_idx_operand = BVToNatural(start_idx) if start_idx.get_type().is_bv_type() else start_idx
# count_operand = BVToNatural(count) if count.get_type().is_bv_type() else count
return StrSubstr(symb, start_idx_operand, count_operand)
def _op_raw_str_strlen(self, *args):
return StrLength(args[0])
def _op_raw_str_replace(self, *args):
initial_str, pattern_to_replace, replacement_pattern = args
return StrReplace(initial_str, pattern_to_replace, replacement_pattern)
def _op_raw_str_contains(self, *args):
input_string, substring = args
return StrContains(input_string, substring)
def _op_raw_str_prefixof(self, *args):
prefix, input_string = args
return StrPrefixOf(prefix, input_string)
def _op_raw_str_suffixof(self, *args):
suffix, input_string = args
return StrSuffixOf(suffix, input_string)
def _op_raw_str_indexof(self, *args):
input_string, substring, start, bitlength = args # pylint:disable=unused-variable
return StrIndexOf(input_string, substring, start)
def _op_raw_str_strtoint(self, *args):
input_string, _ = args
return StrToInt(input_string)
def _op_raw_isdigit(self, input_string):
return NotEquals(StrToInt(input_string), Int(-1))
def _op_raw_inttostr(self, input_bv):
return IntToStr(input_bv)

View File

@@ -0,0 +1,239 @@
from io import StringIO
import hashlib
import os
from claripy.ast.bv import BV
from .. import BackendError
from ..backend_smtlib import BackendSMTLibBase
from ...smtlib_utils import SMTParser, make_pysmt_const_from_type
from pysmt.smtlib.parser import Tokenizer
from pysmt.shortcuts import NotEquals
class AbstractSMTLibSolverProxy:
def write(self, smt):
raise NotImplementedError
def read(self, n):
raise NotImplementedError
def setup(self):
pass
def reset(self):
self.write("(reset)\n")
def readuntil(self, s):
buf = b""
s = s.encode()
while s not in buf:
buf += self.read(1)
return buf
def readline(self):
return self.readuntil("\n")
def writeline(self, l):
return self.write(l + "\n")
def read_sat(self):
return self.readline().strip().decode("utf-8")
def read_model(self):
read_model = self.readuntil("\n)\n").strip().decode("utf-8")
return read_model
def create_process(self):
raise NotImplementedError
class PopenSolverProxy(AbstractSMTLibSolverProxy):
def __init__(self, p):
super().__init__()
self.p = p
self.constraints = []
def read(self, n):
if self.p is None:
self.p = self.create_process()
return self.p.stdout.read(n)
def write(self, smt):
if self.p is None:
self.p = self.create_process()
self.p.stdin.write(smt.encode())
self.p.stdin.flush()
def add_constraints(self, csts, track=False):
self.constraints.extend(csts)
def terminate(self):
self.p.terminate()
self.p = None
class SMTLibSolverBackend(BackendSMTLibBase):
def __init__(self, *args, **kwargs):
kwargs["solver_required"] = True
self.smt_script_log_dir = kwargs.pop("smt_script_log_dir", None)
super().__init__(*args, **kwargs)
def solver(self, timeout=None, max_memory=None): # pylint:disable=no-self-use,unused-argument
"""
This function should return an instance of whatever object handles
solving for this backend. For example, in Z3, this would be z3.Solver().
"""
raise NotImplementedError
def _get_primitive_for_expr(self, model, e):
substituted = e.substitute(model).simplify()
if not substituted.is_constant():
raise BackendError(
"CVC4 backend currently only supports requests for symbols directly! This is a weird one that doesn't "
"turn constant after substitution??"
)
return substituted.constant_value()
def _simplify(self, e):
return e
def _is_false(self, e, extra_constraints=(), solver=None, model_callback=None):
if e.is_constant() and e.constant_value() is False:
return True
return False
def _is_true(self, e, extra_constraints=(), solver=None, model_callback=None):
if e.is_constant() and e.constant_value() is True:
return True
return False
def _batch_eval(self, exprs, n, extra_constraints=(), solver=None, model_callback=None):
return [
self._eval(e, n, extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
for e in exprs
]
def _add(self, s, c, track=False):
s.add_constraints(c, track=track)
def _check_satness(self, solver=None, extra_constraints=(), model_callback=None, extra_variables=()):
vars, csts = self._get_all_vars_and_constraints(solver=solver, e_c=extra_constraints, e_v=extra_variables)
smt_script = self._get_satisfiability_smt_script(constraints=csts, variables=vars)
if self.smt_script_log_dir is not None:
fname = f"check-sat_{hashlib.md5(smt_script.encode()).hexdigest()}.smt2"
with open(os.path.join(self.smt_script_log_dir, fname), "wb") as f:
f.write(smt_script.encode())
solver.reset()
solver.write(smt_script)
sat = solver.read_sat().upper()
if sat not in {"SAT", "UNSAT", "UNKNOWN"}:
raise ValueError(f"Solver error, don't understand (check-sat) response: {repr(sat)}")
return sat.upper()
def _check_satisfiability(self, extra_constraints=(), solver=None, model_callback=None, extra_variables=()):
satness = self._check_satness(solver, extra_constraints, model_callback, extra_variables)
return satness
def _satisfiable(self, solver=None, extra_constraints=(), model_callback=None, extra_variables=()):
satness = self._check_satness(solver, extra_constraints, model_callback, extra_variables)
# solver is done, terminate process
solver.terminate()
return satness == "SAT"
def _get_model(self, solver=None, extra_constraints=(), extra_variables=()):
vars, csts = self._get_all_vars_and_constraints(solver=solver, e_c=extra_constraints, e_v=extra_variables)
smt_script = self._get_full_model_smt_script(constraints=csts, variables=vars)
if self.smt_script_log_dir is not None:
fname = f"get-model_{hashlib.md5(smt_script.encode()).hexdigest()}.smt2"
with open(os.path.join(self.smt_script_log_dir, fname), "wb") as f:
f.write(smt_script.encode())
solver.reset()
solver.write(smt_script)
sat = solver.read_sat()
if sat == "sat":
model_string = solver.read_model()
tokens = Tokenizer(StringIO(model_string), interactive=True)
ass_list = SMTParser(tokens).consume_assignment_list()
return sat, {s: val for s, val in ass_list}, ass_list
else:
error = solver.readline()
return sat, error, None
def _eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None):
e_c = list(extra_constraints)
if expr.is_constant():
return [expr.constant_value()]
expr_vars = expr.get_free_variables()
results = []
while len(results) < n:
sat, model, ass_list = self._get_model(solver=solver, extra_constraints=e_c, extra_variables=expr_vars)
if sat != "sat":
break
val = self._get_primitive_for_expr(model, expr)
if val in results:
# import ipdb; ipdb.set_trace()
raise ValueError("Solver error, solver returned the same value twice incorrectly!")
results.append(val)
# e_c.append(And(*[NotEquals(s, val) for s, val in ass_list]))
e_c.append(NotEquals(make_pysmt_const_from_type(val, expr.get_type()), expr))
return tuple(results)
def eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None):
"""
This function returns up to `n` possible solutions for expression `expr`.
:param expr: expression (an AST) to evaluate
:param n: number of results to return
:param solver: a solver object, native to the backend, to assist in
the evaluation (for example, a z3.Solver)
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:param model_callback: a function that will be executed with recovered models (if any)
:return: A sequence of up to n results (backend objects)
"""
if self._solver_required and solver is None:
raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
results = self._eval(
self.convert(expr),
n,
extra_constraints=self.convert_list(extra_constraints),
solver=solver,
model_callback=model_callback,
)
results = list(results)
if type(expr) is not BV:
return results
size = expr.length
for i in range(len(results)):
results[i] &= (1 << size) - 1 # convert it back to unsigned
# solver is done, terminate process
solver.terminate()
return results
from . import cvc4_popen
from . import z3_popen
from . import abc_popen
from . import z3str_popen

View File

@@ -0,0 +1,55 @@
import subprocess
import logging
import re
from . import SMTLibSolverBackend, PopenSolverProxy
from ...errors import MissingSolverError
log = logging.getLogger(__name__)
def get_version():
try:
version_string = subprocess.check_output(["abc", "--help"]).decode("utf-8")
# version_match = re.match('This is CVC4 version (.*)\n', version_string)
# if not version_match:
# return False, None, "Found malformed version string: {}".format(version_string)
return True, None, None # True, version_match.group(1), None
except subprocess.CalledProcessError as ex:
return False, None, f"Not found, error: {ex}"
except OSError as ex:
return False, None, f"Not found, error: {ex}"
IS_INSTALLED, VERSION, ERROR = get_version()
class ABCProxy(PopenSolverProxy):
def __init__(self):
self.installed = False
p = None
super().__init__(p)
def create_process(self):
if not IS_INSTALLED:
raise MissingSolverError("ABC not found! Please install ABC before using this backend")
p = subprocess.Popen(["abc"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.installed = True
return p
class SolverBackendABC(SMTLibSolverBackend):
def solver(self, timeout=None, max_memory=None):
"""
This function should return an instance of whatever object handles
solving for this backend. For example, in Z3, this would be z3.Solver().
"""
return ABCProxy()
from ... import backend_manager as backend_manager
backend_manager.backends._register_backend(SolverBackendABC(), "smtlib_abc", False, False)

View File

@@ -0,0 +1,62 @@
import subprocess
import logging
import re
from . import SMTLibSolverBackend, PopenSolverProxy
from ...errors import MissingSolverError
log = logging.getLogger(__name__)
def get_version():
try:
version_string = subprocess.check_output(["cvc4", "--version"]).decode("utf-8")
version_match = re.match("This is CVC4 version (.*)\n", version_string)
if not version_match:
return False, None, f"Found malformed version string: {version_string}"
return True, version_match.group(1), None
except subprocess.CalledProcessError as ex:
return False, None, f"Not found, error: {ex}"
except OSError as ex:
return False, None, f"Not found, error: {ex}"
IS_INSTALLED, VERSION, ERROR = get_version()
class CVC4Proxy(PopenSolverProxy):
def __init__(self, timeout=None, max_memory=None):
# lazy instantiation: Here we don't spawn the subprocess
self.timeout = timeout
self.max_memory = max_memory # TODO: not used
self.installed = False
p = None
super().__init__(p)
def create_process(self):
# spawn the subprocess
if not IS_INSTALLED:
raise MissingSolverError("CVC4 not found! Please install CVC4 before using this backend")
cmd = ["cvc4", "--lang=smt", "-q", "--strings-exp"]
if self.timeout is not None:
cmd += [f"--tlimit-per={self.timeout}"]
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.installed = True
return p
class SolverBackendCVC4(SMTLibSolverBackend):
def solver(self, timeout=None, max_memory=None):
"""
This function should return an instance of whatever object handles
solving for this backend. For example, in Z3, this would be z3.Solver().
"""
return CVC4Proxy(timeout, max_memory)
from ... import backend_manager as backend_manager
backend_manager.backends._register_backend(SolverBackendCVC4(), "smtlib_cvc4", False, False)

View File

@@ -0,0 +1,63 @@
import subprocess
import logging
import re
from . import SMTLibSolverBackend, PopenSolverProxy
from ...errors import MissingSolverError
log = logging.getLogger(__name__)
def get_version():
try:
version_string = subprocess.check_output(["z3", "-version"]).decode("utf-8")
version_match = re.match("Z3 version (.*)\n", version_string)
if not version_match:
return False, None, f"Found malformed version string: {version_string}"
return True, version_match.group(1), None
except subprocess.CalledProcessError as ex:
return False, None, f"Not found, error: {ex}"
except OSError as ex:
return False, None, f"Not found, error: {ex}"
IS_INSTALLED, VERSION, ERROR = get_version()
class Z3Proxy(PopenSolverProxy):
def __init__(self, timeout=None, max_memory=None):
self.timeout = timeout
self.max_memory = max_memory
self.installed = False
p = None
super().__init__(p)
def create_process(self):
if not IS_INSTALLED:
raise MissingSolverError("Z3 not found! Please install Z3 before using this backend")
cmd = ["z3", "-smt2", "-in"]
if self.timeout is not None:
cmd.append(f"-t:{self.timeout//1000}") # our timeout is in milliseconds
if self.max_memory is not None:
cmd.append(f"-memory:{self.max_memory}")
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.installed = True
return p
class SolverBackendZ3(SMTLibSolverBackend):
def solver(self, timeout=None, max_memory=None):
"""
This function should return an instance of whatever object handles
solving for this backend. For example, in Z3, this would be z3.Solver().
"""
return Z3Proxy(timeout=timeout, max_memory=max_memory)
from ... import backend_manager as backend_manager
backend_manager.backends._register_backend(SolverBackendZ3(), "smtlib_z3", False, False)

View File

@@ -0,0 +1,63 @@
import subprocess
import logging
import re
from . import SMTLibSolverBackend, PopenSolverProxy
from ...errors import MissingSolverError
log = logging.getLogger(__name__)
def get_version():
try:
version_string = subprocess.check_output(["z3", "-version"]).decode("utf-8")
version_match = re.match("Z3 version (.*)\n", version_string)
if not version_match:
return False, None, f"Found malformed version string: {version_string}"
return True, version_match.group(1), None
except subprocess.CalledProcessError as ex:
return False, None, f"Not found, error: {ex}"
except OSError as ex:
return False, None, f"Not found, error: {ex}"
IS_INSTALLED, VERSION, ERROR = get_version()
class Z3StrProxy(PopenSolverProxy):
def __init__(self, timeout=None, max_memory=None):
self.timeout = timeout
self.max_memory = max_memory
self.installed = False
p = None
super().__init__(p)
def create_process(self):
if not IS_INSTALLED:
raise MissingSolverError("Z3str not found! Please install Z3str before using this backend")
cmd = ["z3", "-smt2", "smt.string_solver=z3str3", "-in"]
if self.timeout is not None:
cmd.append(f"-t:{self.timeout//1000}") # our timeout is in milliseconds
if self.max_memory is not None:
cmd.append(f"-memory:{self.max_memory}")
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.installed = False
return p
class SolverBackendZ3Str(SMTLibSolverBackend):
def solver(self, timeout=None, max_memory=None):
"""
This function should return an instance of whatever object handles
solving for this backend. For example, in Z3, this would be z3.Solver().
"""
return Z3StrProxy(timeout=timeout, max_memory=max_memory)
from ... import backend_manager as backend_manager
backend_manager.backends._register_backend(SolverBackendZ3Str(), "smtlib_z3str", False, False)

View File

@@ -0,0 +1,476 @@
import logging
import numbers
import functools
import operator
from functools import reduce
l = logging.getLogger("claripy.backends.backend_vsa")
from . import Backend, BackendError
from ..vsa import RegionAnnotation
def arg_filter(f):
@functools.wraps(f)
def filter(*args): # pylint:disable=redefined-builtin
if isinstance(args[0], numbers.Number): # pylint:disable=unidiomatic-typecheck
raise BackendError("Unsupported argument type %s" % type(args[0]))
return f(*args)
return filter
def normalize_arg_order(f):
@functools.wraps(f)
def normalizer(*args):
if len(args) != 2:
raise BackendError("Unsupported arguments number %d" % len(args))
if type(args[0]) not in {
StridedInterval,
DiscreteStridedIntervalSet,
ValueSet,
}: # pylint:disable=unidiomatic-typecheck
if type(args[1]) not in {
StridedInterval,
DiscreteStridedIntervalSet,
ValueSet,
}: # pylint:disable=unidiomatic-typecheck
raise BackendError("Unsupported arguments")
args = [args[1], args[0]]
return f(*args)
return normalizer
def convert_args(f):
@functools.wraps(f)
def converter(self, ast):
raw_args = []
for i in range(len(ast.args)):
# It's not reversed
raw_args.append(ast.args[i])
for i in range(len(raw_args)):
raw_args[i] = self.convert(raw_args[i])
normalized = ast.swap_args(raw_args)
ret = f(self, normalized)
return ret
return converter
class BackendVSA(Backend):
def __init__(self):
Backend.__init__(self)
# self._make_raw_ops(set(expression_operations) - set(expression_set_operations), op_module=BackendVSA)
self._make_expr_ops(set(expression_set_operations), op_class=self)
self._make_raw_ops(set(backend_operations_vsa_compliant), op_module=BackendVSA)
self._op_raw["Reverse"] = BackendVSA.Reverse
self._op_raw["If"] = self.If
self._op_expr["BVV"] = self.BVV
self._op_expr["BoolV"] = self.BoolV
self._op_expr["BVS"] = self.BVS
# reduceable
self._op_raw["__add__"] = self._op_add
self._op_raw["__sub__"] = self._op_sub
self._op_raw["__mul__"] = self._op_mul
self._op_raw["__or__"] = self._op_or
self._op_raw["__xor__"] = self._op_xor
self._op_raw["__and__"] = self._op_and
self._op_raw["__mod__"] = self._op_mod
@staticmethod
def _op_add(*args):
return reduce(operator.__add__, args)
@staticmethod
def _op_sub(*args):
return reduce(operator.__sub__, args)
@staticmethod
def _op_mul(*args):
return reduce(operator.__mul__, args)
@staticmethod
def _op_or(*args):
return reduce(operator.__or__, args)
@staticmethod
def _op_xor(*args):
return reduce(operator.__xor__, args)
@staticmethod
def _op_and(*args):
return reduce(operator.__and__, args)
@staticmethod
def _op_mod(*args):
return reduce(operator.__mod__, args)
def convert(self, expr):
return Backend.convert(self, expr.ite_excavated if isinstance(expr, Base) else expr)
def _convert(self, a):
if isinstance(a, numbers.Number):
return a
elif isinstance(a, bool):
return TrueResult() if a else FalseResult()
if isinstance(a, (StridedInterval, DiscreteStridedIntervalSet, ValueSet)):
return a
if isinstance(a, BoolResult):
return a
# Not supported
raise BackendError()
def _eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None):
if isinstance(expr, StridedInterval):
return expr.eval(n)
elif isinstance(expr, ValueSet):
return expr.eval(n)
elif isinstance(expr, BoolResult):
return expr.value
else:
raise BackendError("Unsupported type %s" % type(expr))
def _min(self, expr, extra_constraints=(), signed=False, solver=None, model_callback=None):
# TODO: signed min
if isinstance(expr, StridedInterval):
if expr.is_top:
# TODO: Return
return 0
return expr.min
elif isinstance(expr, ValueSet):
return expr.min
else:
raise BackendError("Unsupported expr type %s" % type(expr))
def _max(self, expr, extra_constraints=(), signed=False, solver=None, model_callback=None):
# TODO: signed max
if isinstance(expr, StridedInterval):
if expr.is_top:
# TODO:
return StridedInterval.max_int(expr.bits)
return expr.max
elif isinstance(expr, ValueSet):
return expr.max
else:
raise BackendError("Unsupported expr type %s" % type(expr))
def _solution(self, obj, v, extra_constraints=(), solver=None, model_callback=None):
if isinstance(obj, BoolResult):
return len(set(v.value) & set(obj.value)) > 0
if isinstance(obj, StridedInterval):
return not obj.intersection(v).is_empty
if isinstance(obj, ValueSet):
for _, si in obj.items():
if not si.intersection(v).is_empty:
return True
return False
raise NotImplementedError(type(obj).__name__)
def _has_true(self, o, extra_constraints=(), solver=None, model_callback=None):
return BoolResult.has_true(o)
def _has_false(self, o, extra_constraints=(), solver=None, model_callback=None):
return BoolResult.has_false(o)
def _is_true(self, o, extra_constraints=(), solver=None, model_callback=None):
return BoolResult.is_true(o)
def _is_false(self, o, extra_constraints=(), solver=None, model_callback=None):
return BoolResult.is_false(o)
#
# Backend Operations
#
def simplify(self, e):
raise BackendError("nope")
def _identical(self, a, b):
if type(a) != type(b):
return False
return a.identical(b)
def _unique(self, obj): # pylint:disable=unused-argument,no-self-use
if isinstance(obj, StridedInterval):
return obj.unique
elif isinstance(obj, ValueSet):
return obj.unique
else:
raise BackendError("Not supported type of operand %s" % type(obj))
def _cardinality(self, a): # pylint:disable=unused-argument,no-self-use
return a.cardinality
def name(self, a):
if isinstance(a, StridedInterval):
return a.name
else:
return None
def apply_annotation(self, bo, annotation):
"""
Apply an annotation on the backend object.
:param BackendObject bo: The backend object.
:param Annotation annotation: The annotation to be applied
:return: A new BackendObject
:rtype: BackendObject
"""
# Currently we only support RegionAnnotation
if not isinstance(annotation, RegionAnnotation):
return bo
if not isinstance(bo, ValueSet):
# Convert it to a ValueSet first
# Note that the original value is not kept at all. If you want to convert a StridedInterval to a ValueSet,
# you gotta do the conversion by calling AST.annotate() from outside.
bo = ValueSet.empty(bo.bits)
return bo.apply_annotation(annotation)
def BVV(self, ast): # pylint:disable=unused-argument,no-self-use
if ast.args[0] is None:
return StridedInterval.empty(ast.args[1])
else:
return CreateStridedInterval(bits=ast.args[1], stride=0, lower_bound=ast.args[0], upper_bound=ast.args[0])
@staticmethod
def BoolV(ast): # pylint:disable=unused-argument
return TrueResult() if ast.args[0] else FalseResult()
@staticmethod
def And(a, *args):
return reduce(operator.__and__, args, a)
@staticmethod
def Not(a):
return ~a
@staticmethod
@normalize_arg_order
def ULT(a, b):
return a.ULT(b)
@staticmethod
@normalize_arg_order
def ULE(a, b):
return a.ULE(b)
@staticmethod
@normalize_arg_order
def UGT(a, b):
return a.UGT(b)
@staticmethod
@normalize_arg_order
def UGE(a, b):
return a.UGE(b)
@staticmethod
@normalize_arg_order
def SLT(a, b):
return a.SLT(b)
@staticmethod
@normalize_arg_order
def SLE(a, b):
return a.SLE(b)
@staticmethod
@normalize_arg_order
def SGT(a, b):
return a.SGT(b)
@staticmethod
@normalize_arg_order
def SGE(a, b):
return a.SGE(b)
@staticmethod
def BVS(ast): # pylint:disable=unused-argument
size = ast.size()
name, mn, mx, stride, uninitialized, discrete_set, max_card = ast.args
return CreateStridedInterval(
name=name,
bits=size,
lower_bound=mn,
upper_bound=mx,
stride=stride,
uninitialized=uninitialized,
discrete_set=discrete_set,
discrete_set_max_cardinality=max_card,
)
def If(self, cond, t, f):
if not self.has_true(cond):
return f
elif not self.has_false(cond):
return t
else:
return t.union(f)
# TODO: Implement other operations!
@staticmethod
def Or(*args):
first = args[0]
others = args[1:]
for o in others:
first = first.union(o)
return first
@staticmethod
def __rshift__(expr, shift_amount): # pylint:disable=unexpected-special-method-signature
return expr.__rshift__(shift_amount)
@staticmethod
def LShR(expr, shift_amount):
return expr.LShR(shift_amount)
@staticmethod
def Concat(*args):
ret = None
for expr in args:
if type(expr) not in {
StridedInterval,
DiscreteStridedIntervalSet,
ValueSet,
}: # pylint:disable=unidiomatic-typecheck
raise BackendError("Unsupported expr type %s" % type(expr))
ret = ret.concat(expr) if ret is not None else expr
return ret
@staticmethod
def Extract(*args):
low_bit = args[1]
high_bit = args[0]
expr = args[2]
if type(expr) not in {
StridedInterval,
DiscreteStridedIntervalSet,
ValueSet,
}: # pylint:disable=unidiomatic-typecheck
raise BackendError("Unsupported expr type %s" % type(expr))
ret = expr.extract(high_bit, low_bit)
return ret
@staticmethod
def SignExt(*args):
new_bits = args[0]
expr = args[1]
if type(expr) not in {StridedInterval, DiscreteStridedIntervalSet}: # pylint:disable=unidiomatic-typecheck
raise BackendError("Unsupported expr type %s" % type(expr))
return expr.sign_extend(new_bits + expr.bits)
@staticmethod
def ZeroExt(*args):
new_bits = args[0]
expr = args[1]
if type(expr) not in {StridedInterval, DiscreteStridedIntervalSet}: # pylint:disable=unidiomatic-typecheck
raise BackendError("Unsupported expr type %s" % type(expr))
return expr.zero_extend(new_bits + expr.bits)
@staticmethod
def Reverse(arg):
if type(arg) not in {
StridedInterval,
DiscreteStridedIntervalSet,
ValueSet,
}: # pylint:disable=unidiomatic-typecheck
raise BackendError("Unsupported expr type %s" % type(arg))
return arg.reverse()
@convert_args
def union(self, ast): # pylint:disable=unused-argument,no-self-use
if len(ast.args) != 2:
raise BackendError("Incorrect number of arguments (%d) passed to BackendVSA.union()." % len(ast.args))
ret = ast.args[0].union(ast.args[1])
if ret is NotImplemented:
ret = ast.args[1].union(ast.args[0])
return ret
@convert_args
def intersection(self, ast): # pylint:disable=unused-argument,no-self-use
if len(ast.args) != 2:
raise BackendError(
"Incorrect number of arguments (%d) passed to BackendVSA.intersection()." % len(ast.args)
)
ret = None
for arg in ast.args:
if ret is None:
ret = arg
else:
ret = ret.intersection(arg)
return ret
@convert_args
def widen(self, ast): # pylint:disable=unused-argument,no-self-use
if len(ast.args) != 2:
raise BackendError("Incorrect number of arguments (%d) passed to BackendVSA.widen()." % len(ast.args))
ret = ast.args[0].widen(ast.args[1])
if ret is NotImplemented:
ret = ast.args[1].widen(ast.args[0])
return ret
@staticmethod
def CreateTopStridedInterval(bits, name=None, uninitialized=False): # pylint:disable=unused-argument,no-self-use
return StridedInterval.top(bits, name, uninitialized=uninitialized)
def constraint_to_si(self, expr):
return Balancer(self, expr).compat_ret
from ..ast.base import Base
from ..operations import backend_operations_vsa_compliant, expression_set_operations
from ..vsa import (
StridedInterval,
CreateStridedInterval,
DiscreteStridedIntervalSet,
ValueSet,
AbstractLocation,
BoolResult,
TrueResult,
FalseResult,
)
from ..balancer import Balancer
BackendVSA.CreateStridedInterval = staticmethod(CreateStridedInterval)

View File

@@ -0,0 +1,140 @@
import os
import pickle
import threading
num_children = 0
import logging
l = logging.getLogger("claripy.backends.backend_z3_parallel")
from .backend_z3 import BackendZ3
class BackendZ3Parallel(BackendZ3):
def __init__(self):
BackendZ3.__init__(self)
self._child = False
self._lock = threading.RLock()
self._cache_objects = False
def _background(self, f_name, *args, **kwargs):
global num_children
f = getattr(BackendZ3, f_name)
if self._child:
return f(self, *args, **kwargs)
p_r, p_w = os.pipe()
p = os.fork()
if p == 0:
self._child = True
self._lock = threading.RLock()
try:
r = f(self, *args, **kwargs)
except UnsatError as e:
r = e
# print("WRITING (%d)" % os.getpid())
pickled = pickle.dumps(r, -1)
written = 0
while written < len(pickled):
written += os.write(p_w, pickled[written:])
os.close(p_w)
os.close(p_r)
# print("WROTE (%d)" % os.getpid())
os.kill(os.getpid(), 9)
# os.abort()
# sys.exit(1)
else:
os.close(p_w)
num_children += 1
l.debug("in _background with function %s and child %d (among %d)", f, p, num_children)
# print("READING (from %d)" % p)
try:
strs = [os.read(p_r, 1024 * 1024)]
while strs[-1] != "":
strs.append(os.read(p_r, 1024 * 1024))
except EOFError:
raise ClaripyError("read error while receiving data from child")
os.close(p_r)
# print("READ (from %d)" % p)
# thread.start_new_thread(os.wait, ())
r = pickle.loads("".join(strs))
os.waitpid(p, 0)
num_children -= 1
l.debug("... child %d is done. %d left", p, num_children)
if isinstance(r, Exception):
raise r
else:
return r
def _synchronize(self, f, *args, **kwargs):
if self._child:
return getattr(BackendZ3, f)(self, *args, **kwargs)
self._lock.acquire()
# while not self._lock.acquire(blocking=False): print("ACQUIRING...",__import__('time').sleep(1))
try:
return getattr(BackendZ3, f)(self, *args, **kwargs)
finally:
self._lock.release()
@staticmethod
def _translate(*args):
for a in args:
if hasattr(a, "translate"):
a.translate()
# backgrounded
def _results(self, *args, **kwargs):
return self._background("_results", *args, **kwargs)
def _eval(self, *args, **kwargs):
return self._background("_eval", *args, **kwargs)
def _batch_eval(self, *args, **kwargs):
return self._background("_batch_eval", *args, **kwargs)
def _min(self, *args, **kwargs):
return self._background("_min", *args, **kwargs)
def _max(self, *args, **kwargs):
return self._background("_max", *args, **kwargs)
def _convert(self, *args, **kwargs):
return self._synchronize("_convert", *args, **kwargs)
def abstract(self, *args, **kwargs):
return self._synchronize("abstract", *args, **kwargs)
def solver(self, *args, **kwargs):
return self._synchronize("solver", *args, **kwargs)
def _add(self, *args, **kwargs):
return self._synchronize("_add", *args, **kwargs)
def _check(self, *args, **kwargs):
return self._synchronize("_check", *args, **kwargs)
def _simplify(self, *args, **kwargs):
return self._synchronize("_simplify", *args, **kwargs)
def call(self, *args, **kwargs):
return self._synchronize("call", *args, **kwargs)
def resolve(self, *args, **kwargs):
return self._synchronize("resolve", *args, **kwargs)
def simplify(self, *args, **kwargs):
return self._synchronize("simplify", *args, **kwargs)
from ..errors import ClaripyError, UnsatError

View File

@@ -0,0 +1,724 @@
from typing import Set
import logging
import operator
l = logging.getLogger("claripy.balancer")
class Balancer:
"""
The Balancer is an equation redistributor. The idea is to take an AST and rebalance it to, for example, isolate
unknown terms on one side of an inequality.
"""
def __init__(self, helper, c, validation_frontend=None):
self._helper = helper
self._validation_frontend = validation_frontend
self._truisms = []
self._processed_truisms = set()
self._identified_assumptions = set()
self._lower_bounds = {}
self._upper_bounds = {}
self._queue_truism(c.ite_excavated)
self.sat = True
try:
self._doit()
except ClaripyBalancerUnsatError:
self.bounds = {}
self.sat = False
except BackendError:
l.debug("Backend error in balancer.", exc_info=True)
@property
def compat_ret(self):
return (self.sat, self.replacements)
def _replacements_iter(self):
all_keys = set(self._lower_bounds.keys()) | set(self._upper_bounds.keys())
for k in all_keys:
max_int = (1 << len(k.ast)) - 1
min_int = 0
mn = self._lower_bounds.get(k, min_int)
mx = self._upper_bounds.get(k, max_int)
bound_si = BVS("bound", len(k.ast), min=mn, max=mx)
l.debug("Yielding bound %s for %s.", bound_si, k.ast)
if k.ast.op == "Reverse":
yield (k.ast.args[0], k.ast.intersection(bound_si).reversed)
else:
yield (k.ast, k.ast.intersection(bound_si))
def _add_lower_bound(self, o, b):
l.debug("Adding lower bound %s for %s.", b, o)
if o.cache_key in self._lower_bounds:
old_b = self._lower_bounds[o.cache_key]
l.debug("... old bound: %s", old_b)
b = max(b, old_b)
l.debug("... new bound: %s", b)
if self._validation_frontend is not None:
emin = self._validation_frontend.min(o)
bmin = self._helper.min(b)
assert emin >= bmin
self._lower_bounds[o.cache_key] = b
def _add_upper_bound(self, o, b):
l.debug("Adding upper bound %s for %s.", b, o)
if o.cache_key in self._upper_bounds:
old_b = self._upper_bounds[o.cache_key]
l.debug("... old bound: %s", old_b)
b = min(b, old_b)
l.debug("... new bound: %s", b)
if self._validation_frontend is not None:
emax = self._validation_frontend.max(o)
bmax = self._helper.max(b)
assert emax <= bmax
self._upper_bounds[o.cache_key] = b
@property
def replacements(self):
return list(self._replacements_iter())
#
# AST helper functions
#
def _same_bound_bv(self, a):
si = backends.vsa.convert(a)
mx = self._max(a)
mn = self._min(a)
return BVS("bounds", len(a), min=mn, max=mx, stride=si._stride)
@staticmethod
def _cardinality(a):
return a.cardinality if isinstance(a, Base) else 0
@staticmethod
def _min(a, signed=False):
converted = backends.vsa.convert(a)
if isinstance(converted, vsa.ValueSet):
if len(converted.regions) == 1:
converted = list(converted.regions.values())[0]
else:
# unfortunately, this is a real abstract pointer
# the minimum value will be 0 or MIN_INT
if signed:
return -(1 << (len(converted) - 1))
else:
return 0
if not signed:
bounds = converted._unsigned_bounds()
else:
bounds = converted._signed_bounds()
return min(mn for mn, mx in bounds)
@staticmethod
def _max(a, signed=False):
converted = backends.vsa.convert(a)
if isinstance(converted, vsa.ValueSet):
if len(converted.regions) == 1:
converted = list(converted.regions.values())[0]
else:
# unfortunately, this is a real abstract pointer
# the minimum value will be 0 or MIN_INT
if signed:
return (1 << (len(converted) - 1)) - 1
else:
return (1 << len(converted)) - 1
if not signed:
bounds = converted._unsigned_bounds()
else:
bounds = converted._signed_bounds()
return max(mx for mn, mx in bounds)
def _range(self, a, signed=False):
return (self._min(a, signed=signed), self._max(a, signed=signed))
@staticmethod
def _invert_comparison(a):
return _all_operations.Not(a)
#
# Truism alignment
#
def _align_truism(self, truism):
outer_aligned = self._align_ast(truism)
inner_aligned = outer_aligned.make_like(
outer_aligned.op, (self._align_ast(outer_aligned.args[0]),) + outer_aligned.args[1:]
)
if not backends.vsa.identical(inner_aligned, truism):
l.critical(
"ERROR: the balancer is messing up an AST. This must be looked into. Please submit the binary and script to the angr project, if possible. Outer op is %s and inner op is %s.",
truism.op,
truism.args[0].op,
)
return truism
return inner_aligned
def _align_ast(self, a):
"""
Aligns the AST so that the argument with the highest cardinality is on the left.
:return: a new AST.
"""
try:
if isinstance(a, BV):
return self._align_bv(a)
elif isinstance(a, Bool) and len(a.args) == 2 and a.args[1].cardinality > a.args[0].cardinality:
return self._reverse_comparison(a)
else:
return a
except ClaripyBalancerError:
return a
@staticmethod
def _reverse_comparison(a):
try:
new_op = opposites[a.op]
except KeyError:
raise ClaripyBalancerError("unable to reverse comparison %s (missing from 'opposites')" % a.op)
try:
if new_op.startswith("__"):
op = getattr(operator, new_op)
else:
op = getattr(_all_operations, new_op)
except AttributeError:
raise ClaripyBalancerError("unable to reverse comparison %s (AttributeError)" % a.op)
try:
return op(*a.args[::-1])
except ClaripyOperationError:
# TODO: copy trace
raise ClaripyBalancerError("unable to reverse comparison %s (ClaripyOperationError)" % a.op)
def _align_bv(self, a):
if a.op in commutative_operations:
return a.make_like(a.op, tuple(sorted(a.args, key=lambda v: -self._cardinality(v))))
else:
try:
op = getattr(self, "_align_" + a.op)
except AttributeError:
return a
return op(a)
def _align___sub__(self, a):
cardinalities = [self._cardinality(v) for v in a.args]
if max(cardinalities) == cardinalities[0]:
return a
adjusted = tuple(operator.__neg__(v) for v in a.args[1:]) + a.args[:1]
return a.make_like("__add__", tuple(sorted(adjusted, key=lambda v: -self._cardinality(v))))
#
# Find bounds
#
def _doit(self):
"""
This function processes the list of truisms and finds bounds for ASTs.
"""
while len(self._truisms):
truism = self._truisms.pop()
if truism in self._processed_truisms:
continue
unpacked_truisms = self._unpack_truisms(truism)
if is_false(truism):
raise ClaripyBalancerUnsatError()
self._processed_truisms.add(truism)
if len(unpacked_truisms):
self._queue_truisms(unpacked_truisms, check_true=True)
continue
if not self._handleable_truism(truism):
continue
truism = self._adjust_truism(truism)
assumptions = self._get_assumptions(truism)
if truism not in self._identified_assumptions and len(assumptions):
l.debug("Queued assumptions %s for truism %s.", assumptions, truism)
self._truisms.extend(assumptions)
self._identified_assumptions.update(assumptions)
l.debug("Processing truism %s", truism)
balanced_truism = self._balance(truism)
l.debug("... handling")
self._handle(balanced_truism)
def _queue_truism(self, t, check_true=False):
if not check_true:
self._truisms.append(t)
elif check_true and not is_true(t):
self._truisms.append(t)
def _queue_truisms(self, ts, check_true=False):
if check_true:
self._truisms.extend(t for t in ts if not is_true(t))
else:
self._truisms.extend(ts)
@staticmethod
def _handleable_truism(t):
"""
Checks whether we can handle this truism. The truism should already be aligned.
"""
if len(t.args) < 2:
l.debug("can't do anything with an unop bool")
elif t.args[0].cardinality > 1 and t.args[1].cardinality > 1:
l.debug("can't do anything because we have multiple multivalued guys")
return False
elif t.op == "If":
l.debug("can't handle If")
return False
else:
return True
@staticmethod
def _adjust_truism(t):
"""
Swap the operands of the truism if the unknown variable is on the right side and the concrete value is on the
left side.
"""
if t.args[0].cardinality == 1 and t.args[1].cardinality > 1:
swapped = Balancer._reverse_comparison(t)
return swapped
return t
#
# Assumptions management
#
@staticmethod
def _get_assumptions(t):
"""
Given a constraint, _get_assumptions() returns a set of constraints that are implicitly
assumed to be true. For example, `x <= 10` would return `x >= 0`.
"""
if t.op in ("__le__", "__lt__", "ULE", "ULT"):
return [t.args[0] >= 0]
elif t.op in ("__ge__", "__gt__", "UGE", "UGT"):
return [t.args[0] <= 2 ** len(t.args[0]) - 1]
elif t.op in ("SLE", "SLT"):
return [_all_operations.SGE(t.args[0], -(1 << (len(t.args[0]) - 1)))]
elif t.op in ("SGE", "SGT"):
return [_all_operations.SLE(t.args[0], (1 << (len(t.args[0]) - 1)) - 1)]
else:
return []
#
# Truism extractor
#
def _unpack_truisms(self, c) -> Set:
"""
Given a constraint, _unpack_truisms() returns a set of constraints that must be True for
this constraint to be True.
"""
try:
op = getattr(self, "_unpack_truisms_" + c.op)
except AttributeError:
return set()
return op(c)
def _unpack_truisms_And(self, c):
return set.union(*[self._unpack_truisms(a) for a in c.args])
def _unpack_truisms_Not(self, c):
if c.args[0].op == "And":
return self._unpack_truisms(_all_operations.Or(*[_all_operations.Not(a) for a in c.args[0].args]))
elif c.args[0].op == "Or":
return self._unpack_truisms(_all_operations.And(*[_all_operations.Not(a) for a in c.args[0].args]))
else:
return set()
def _unpack_truisms_Or(self, c):
vals = [is_false(v) for v in c.args]
if all(vals):
raise ClaripyBalancerUnsatError()
elif vals.count(False) == 1:
return self._unpack_truisms(c.args[vals.index(False)])
else:
return set()
#
# Dealing with constraints
#
comparison_info = {}
# Tuples look like (is_lt, is_eq, is_unsigned)
comparison_info["SLT"] = (True, False, False)
comparison_info["SLE"] = (True, True, False)
comparison_info["SGT"] = (False, False, False)
comparison_info["SGE"] = (False, True, False)
comparison_info["ULT"] = (True, False, True)
comparison_info["ULE"] = (True, True, True)
comparison_info["UGT"] = (False, False, True)
comparison_info["UGE"] = (False, True, True)
comparison_info["__lt__"] = comparison_info["ULT"]
comparison_info["__le__"] = comparison_info["ULE"]
comparison_info["__gt__"] = comparison_info["UGT"]
comparison_info["__ge__"] = comparison_info["UGE"]
#
# Simplification routines
#
def _balance(self, truism):
l.debug("Balancing %s", truism)
# can't balance single-arg bools (Not) for now
if len(truism.args) == 1:
return truism
if not isinstance(truism.args[0], Base):
return truism
try:
inner_aligned = self._align_truism(truism)
if inner_aligned.args[1].cardinality > 1:
l.debug("can't do anything because we have multiple multivalued guys")
return truism
try:
balancer = getattr(self, "_balance_%s" % inner_aligned.args[0].op)
except AttributeError:
l.debug("Balance handler %s is not found in balancer. Consider implementing.", truism.args[0].op)
return truism
balanced = balancer(inner_aligned)
if balanced is inner_aligned:
# print("... balanced:", balanced)
return balanced
else:
return self._balance(balanced)
except ClaripyBalancerError:
l.warning("Balance handler for operation %s raised exception.", truism.args[0].op)
return truism
@staticmethod
def _balance_Reverse(truism):
if truism.op in ["__eq__", "__ne__"]:
return truism.make_like(truism.op, (truism.args[0].args[0], truism.args[1].reversed))
else:
return truism
@staticmethod
def _balance___add__(truism):
if len(truism.args) != 2:
return truism
new_lhs = truism.args[0].args[0]
old_rhs = truism.args[1]
other_adds = truism.args[0].args[1:]
new_rhs = truism.args[0].make_like("__sub__", (old_rhs,) + other_adds)
return truism.make_like(truism.op, (new_lhs, new_rhs))
@staticmethod
def _balance___sub__(truism):
if len(truism.args) != 2:
return truism
new_lhs = truism.args[0].args[0]
old_rhs = truism.args[1]
other_adds = truism.args[0].args[1:]
new_rhs = truism.args[0].make_like("__add__", (old_rhs,) + other_adds)
return truism.make_like(truism.op, (new_lhs, new_rhs))
@staticmethod
def _balance_ZeroExt(truism):
num_zeroes, inner = truism.args[0].args
other_side = truism.args[1][len(truism.args[1]) - 1 : len(truism.args[1]) - num_zeroes]
if is_true(other_side == 0):
# We can safely eliminate this layer of ZeroExt
new_args = (inner, truism.args[1][len(truism.args[1]) - num_zeroes - 1 : 0])
return truism.make_like(truism.op, new_args)
return truism
@staticmethod
def _balance_SignExt(truism):
num_zeroes = truism.args[0].args[0]
left_side = truism.args[0][len(truism.args[1]) - 1 : len(truism.args[1]) - num_zeroes]
other_side = truism.args[1][len(truism.args[1]) - 1 : len(truism.args[1]) - num_zeroes]
# TODO: what if this is a set value, but *not* the same as other_side
if backends.vsa.identical(left_side, other_side):
# We can safely eliminate this layer of ZeroExt
new_args = (truism.args[0].args[1], truism.args[1][len(truism.args[1]) - num_zeroes - 1 : 0])
return truism.make_like(truism.op, new_args)
return truism
@staticmethod
def _balance_Extract(truism):
high, low, inner = truism.args[0].args
inner_size = len(inner)
if high < inner_size - 1:
left_msb = inner[inner_size - 1 : high + 1]
left_msb_zero = is_true(left_msb == 0)
else:
left_msb = None
left_msb_zero = None
if low > 0:
left_lsb = inner[high - 1 : 0]
left_lsb_zero = is_true(left_lsb == 0)
else:
left_lsb = None
left_lsb_zero = None
if left_msb_zero and left_lsb_zero:
new_left = inner
new_right = _all_operations.Concat(BVV(0, len(left_msb)), truism.args[1], BVV(0, len(left_lsb)))
return truism.make_like(truism.op, (new_left, new_right))
elif left_msb_zero:
new_left = inner
new_right = _all_operations.Concat(BVV(0, len(left_msb)), truism.args[1])
return truism.make_like(truism.op, (new_left, new_right))
elif left_lsb_zero:
new_left = inner
new_right = _all_operations.Concat(truism.args[1], BVV(0, len(left_lsb)))
return truism.make_like(truism.op, (new_left, new_right))
if low == 0 and truism.args[1].op == "BVV" and truism.op not in {"SGE", "SLE", "SGT", "SLT"}:
# single-valued rhs value with an unsigned operator
# Eliminate Extract on lhs and zero-extend the value on rhs
new_left = inner
new_right = _all_operations.ZeroExt(inner.size() - truism.args[1].size(), truism.args[1])
return truism.make_like(truism.op, (new_left, new_right))
return truism
@staticmethod
def _balance___and__(truism):
if len(truism.args[0].args) != 2:
return truism
op0, op1 = truism.args[0].args
if op1.op == "BVV":
# if all low bits of right are 1 and all high bits of right are 0, then this is equivalent to Extract()
v = op1.args[0]
low_ones = 0
while v != 0:
if v & 1 == 0:
# not all high bits are 0. abort
return truism
low_ones += 1
v >>= 1
if low_ones == 0:
# this should probably never happen
new_left = truism.args[0].make_like("BVV", (0, truism.args[0].size()))
return truism.make_like(truism.op, (new_left, truism.args[1]))
if op0.op == "ZeroExt" and op0.args[0] + low_ones == op0.size():
# ZeroExt(56, a) & 0xff == a if a.size() == 8
# we can safely remove __and__
new_left = op0
return truism.make_like(truism.op, (new_left, truism.args[1]))
return truism
@staticmethod
def _balance_Concat(truism):
size = len(truism.args[0])
left_msb = truism.args[0].args[0]
right_msb = truism.args[1][size - 1 : size - len(left_msb)]
if is_true(left_msb == 0) and is_true(right_msb == 0):
# we can cut these guys off!
remaining_left = _all_operations.Concat(*truism.args[0].args[1:])
remaining_right = truism.args[1][size - len(left_msb) - 1 : 0]
return truism.make_like(truism.op, (remaining_left, remaining_right))
else:
# TODO: handle non-zero single-valued cases
return truism
def _balance___lshift__(self, truism):
lhs = truism.args[0]
rhs = truism.args[1]
shift_amount_expr = lhs.args[1]
expr = lhs.args[0]
shift_amount_values = self._helper.eval(shift_amount_expr, 2)
if len(shift_amount_values) != 1:
return truism
shift_amount = shift_amount_values[0]
rhs_lower = _all_operations.Extract(shift_amount - 1, 0, rhs)
rhs_lower_values = self._helper.eval(rhs_lower, 2)
if len(rhs_lower_values) == 1 and rhs_lower_values[0] == 0:
# we can remove the __lshift__
return truism.make_like(truism.op, (expr, rhs >> shift_amount))
return truism
def _balance_If(self, truism):
condition, true_expr, false_expr = truism.args[0].args
try:
if truism.op.startswith("__"):
true_condition = getattr(operator, truism.op)(true_expr, truism.args[1])
false_condition = getattr(operator, truism.op)(false_expr, truism.args[1])
else:
true_condition = getattr(_all_operations, truism.op)(true_expr, truism.args[1])
false_condition = getattr(_all_operations, truism.op)(false_expr, truism.args[1])
except ClaripyOperationError:
# the condition was probably a Not (TODO)
return truism
can_true = backends.vsa.has_true(true_condition)
can_false = backends.vsa.has_true(false_condition)
must_true = backends.vsa.is_true(true_condition)
must_false = backends.vsa.is_true(false_condition)
if can_true and can_false:
# always satisfiable
return truism
elif not (can_true or can_false):
# neither are satisfiable. This truism is fucked
raise ClaripyBalancerUnsatError()
elif must_true or (can_true and not can_false):
# it will always be true
self._queue_truism(condition)
return truism.make_like(truism.op, (true_expr, truism.args[1]))
elif must_false or (can_false and not can_true):
# it will always be false
self._queue_truism(self._invert_comparison(condition))
return truism.make_like(truism.op, (false_expr, truism.args[1]))
#
# Constraint handlers
#
def _handle(self, truism):
l.debug("Handling %s", truism)
if is_false(truism):
raise ClaripyBalancerUnsatError()
elif self._cardinality(truism.args[0]) == 1:
# we are down to single-cardinality arguments, so our work is not
# necessary
return
try:
handler = getattr(self, "_handle_%s" % truism.op)
except AttributeError:
l.debug("No handler for operation %s", truism.op)
return
handler(truism)
def _handle_comparison(self, truism):
"""
Handles all comparisons.
"""
# print("COMP:", truism)
is_lt, is_equal, is_unsigned = self.comparison_info[truism.op]
size = len(truism.args[0])
int_max = 2**size - 1 if is_unsigned else 2 ** (size - 1) - 1
int_min = -(2 ** (size - 1))
left_min = self._min(truism.args[0], signed=not is_unsigned)
left_max = self._max(truism.args[0], signed=not is_unsigned)
right_min = self._min(truism.args[1], signed=not is_unsigned)
right_max = self._max(truism.args[1], signed=not is_unsigned)
bound_max = right_max if is_equal else (right_max - 1 if is_lt else right_max + 1)
bound_min = right_min if is_equal else (right_min - 1 if is_lt else right_min + 1)
if is_lt and bound_max < int_min:
# if the bound max is negative and we're unsigned less than, we're fucked
raise ClaripyBalancerUnsatError()
elif not is_lt and bound_min > int_max:
# if the bound min is too big, we're fucked
raise ClaripyBalancerUnsatError()
current_min = int_min
current_max = int_max
if is_lt:
current_max = min(int_max, left_max, bound_max)
self._add_upper_bound(truism.args[0], current_max)
else:
current_min = max(int_min, left_min, bound_min)
self._add_lower_bound(truism.args[0], current_min)
def _handle___eq__(self, truism):
lhs, rhs = truism.args
if rhs.cardinality != 1:
common = self._same_bound_bv(lhs.intersection(rhs))
mn, mx = self._range(common)
self._add_upper_bound(lhs, mx)
self._add_upper_bound(rhs, mx)
self._add_lower_bound(lhs, mn)
self._add_lower_bound(rhs, mn)
else:
mn, mx = self._range(rhs)
self._add_upper_bound(lhs, mx)
self._add_lower_bound(lhs, mn)
def _handle___ne__(self, truism):
lhs, rhs = truism.args
if rhs.cardinality == 1:
val = self._helper.eval(rhs, 1)[0]
max_int = vsa.StridedInterval.max_int(len(rhs))
if val == 0:
self._add_lower_bound(lhs, val + 1)
elif val == max_int or val == -1:
self._add_upper_bound(lhs, max_int - 1)
def _handle_If(self, truism):
if is_false(truism.args[2]):
self._queue_truism(truism.args[0])
elif is_false(truism.args[1]):
self._queue_truism(self._invert_comparison(truism.args[0]))
_handle___lt__ = _handle_comparison
_handle___le__ = _handle_comparison
_handle___gt__ = _handle_comparison
_handle___ge__ = _handle_comparison
_handle_ULT = _handle_comparison
_handle_ULE = _handle_comparison
_handle_UGT = _handle_comparison
_handle_UGE = _handle_comparison
_handle_SLT = _handle_comparison
_handle_SLE = _handle_comparison
_handle_SGT = _handle_comparison
_handle_SGE = _handle_comparison
def is_true(a):
return backends.vsa.is_true(a)
def is_false(a):
return backends.vsa.is_false(a)
from .errors import ClaripyBalancerError, ClaripyBalancerUnsatError, ClaripyOperationError, BackendError
from .ast.base import Base
from .ast.bool import Bool
from .ast.bv import BVV, BVS, BV
from . import _all_operations
from .backend_manager import backends
from . import vsa
from .operations import opposites, commutative_operations

View File

@@ -0,0 +1,471 @@
import functools
import numbers
from .errors import ClaripyOperationError, ClaripyTypeError, ClaripyZeroDivisionError
from .backend_object import BackendObject
from . import debug as _d
def compare_bits(f):
@functools.wraps(f)
def compare_guard(self, o):
if self.bits == 0 or o.bits == 0:
raise ClaripyTypeError("The operation is not allowed on zero-length bitvectors.")
if self.bits != o.bits:
raise ClaripyTypeError("bitvectors are differently-sized (%d and %d)" % (self.bits, o.bits))
return f(self, o)
return compare_guard
def compare_bits_0_length(f):
@functools.wraps(f)
def compare_guard(self, o):
if self.bits != o.bits:
raise ClaripyTypeError("bitvectors are differently-sized (%d and %d)" % (self.bits, o.bits))
return f(self, o)
return compare_guard
def normalize_types(f):
@functools.wraps(f)
def normalize_helper(self, o):
if _d._DEBUG:
if hasattr(o, "__module__") and o.__module__ == "z3":
raise ValueError("this should no longer happen")
if isinstance(o, numbers.Number):
o = BVV(o, self.bits)
if isinstance(self, numbers.Number):
self = BVV(self, self.bits)
if not isinstance(self, BVV) or not isinstance(o, BVV):
return NotImplemented
return f(self, o)
return normalize_helper
class BVV(BackendObject):
__slots__ = ["bits", "_value", "mod"]
def __init__(self, value, bits):
if _d._DEBUG:
if bits < 0 or not isinstance(bits, numbers.Number) or not isinstance(value, numbers.Number):
raise ClaripyOperationError("BVV needs a non-negative length and an int value")
if bits == 0 and value not in (0, "", None):
raise ClaripyOperationError("Zero-length BVVs cannot have a meaningful value.")
self.bits = bits
self._value = 0
self.mod = 1 << bits
self.value = value
def __hash__(self):
return hash((str(self.value), self.bits))
def __getstate__(self):
return (self.bits, self.value)
def __setstate__(self, s):
self.bits = s[0]
self.mod = 1 << self.bits
self.value = s[1]
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v & (self.mod - 1)
@property
def signed(self):
return self._value if self._value < self.mod // 2 else self._value % (self.mod // 2) - (self.mod // 2)
@signed.setter
def signed(self, v):
self._value = v % -self.mod
#
# Arithmetic stuff
#
@normalize_types
@compare_bits
def __add__(self, o):
return BVV(self.value + o.value, self.bits)
@normalize_types
@compare_bits
def __sub__(self, o):
return BVV(self.value - o.value, self.bits)
@normalize_types
@compare_bits
def __mul__(self, o):
return BVV(self.value * o.value, self.bits)
@normalize_types
@compare_bits
def __mod__(self, o):
if o.value == 0:
raise ClaripyZeroDivisionError()
return BVV(self.value % o.value, self.bits)
@normalize_types
@compare_bits
def __floordiv__(self, o):
if o.value == 0:
raise ClaripyZeroDivisionError()
return BVV(self.value // o.value, self.bits)
def __truediv__(self, other):
return self // other # decline to implicitly have anything to do with floats
#
# Reverse arithmetic stuff
#
@normalize_types
@compare_bits
def __radd__(self, o):
return BVV(self.value + o.value, self.bits)
@normalize_types
@compare_bits
def __rsub__(self, o):
return BVV(o.value - self.value, self.bits)
@normalize_types
@compare_bits
def __rmul__(self, o):
return BVV(self.value * o.value, self.bits)
@normalize_types
@compare_bits
def __rmod__(self, o):
if self.value == 0:
raise ClaripyZeroDivisionError()
return BVV(o.value % self.value, self.bits)
@normalize_types
@compare_bits
def __rfloordiv__(self, o):
if self.value == 0:
raise ClaripyZeroDivisionError()
return BVV(o.value // self.value, self.bits)
def __rtruediv__(self, o):
return self.__rfloordiv__(o)
#
# Bit operations
#
@normalize_types
@compare_bits
def __and__(self, o):
return BVV(self.value & o.value, self.bits)
@normalize_types
@compare_bits
def __or__(self, o):
return BVV(self.value | o.value, self.bits)
@normalize_types
@compare_bits
def __xor__(self, o):
return BVV(self.value ^ o.value, self.bits)
@normalize_types
@compare_bits
def __lshift__(self, o):
return BVV(self.value << o.value, self.bits)
@normalize_types
@compare_bits
def __rshift__(self, o):
# arithmetic shift uses the signed version
return BVV(self.signed >> o.value, self.bits)
def __invert__(self):
return BVV(self.value ^ self.mod - 1, self.bits)
def __neg__(self):
return BVV((-self.value) % self.mod, self.bits)
#
# Reverse bit operations
#
@normalize_types
@compare_bits
def __rand__(self, o):
return BVV(self.value & o.value, self.bits)
@normalize_types
@compare_bits
def __ror__(self, o):
return BVV(self.value | o.value, self.bits)
@normalize_types
@compare_bits
def __rxor__(self, o):
return BVV(self.value ^ o.value, self.bits)
@normalize_types
@compare_bits
def __rlshift__(self, o):
return BVV(o.value << self.signed, self.bits)
@normalize_types
@compare_bits
def __rrshift__(self, o):
return BVV(o.signed >> self.signed, self.bits)
#
# Boolean stuff
#
@normalize_types
@compare_bits_0_length
def __eq__(self, o):
return self.value == o.value
@normalize_types
@compare_bits_0_length
def __ne__(self, o):
return self.value != o.value
@normalize_types
@compare_bits
def __lt__(self, o):
return self.value < o.value
@normalize_types
@compare_bits
def __gt__(self, o):
return self.value > o.value
@normalize_types
@compare_bits
def __le__(self, o):
return self.value <= o.value
@normalize_types
@compare_bits
def __ge__(self, o):
return self.value >= o.value
#
# Conversions
#
def size(self):
return self.bits
def __repr__(self):
return "BVV(0x%x, %d)" % (self.value, self.bits)
#
# External stuff
#
def BitVecVal(value, bits):
return BVV(value, bits)
def ZeroExt(num, o):
return BVV(o.value, o.bits + num)
def SignExt(num, o):
return BVV(o.signed, o.bits + num)
def Extract(f, t, o):
return BVV((o.value >> t) & ((1 << (f + 2 - t)) - 1), f + 1 - t)
def Concat(*args):
total_bits = 0
total_value = 0
for o in args:
total_value = (total_value << o.bits) | o.value
total_bits += o.bits
return BVV(total_value, total_bits)
def RotateRight(self, bits):
bits_smaller = bits % self.size()
return LShR(self, bits_smaller) | (self << (self.size() - bits_smaller))
def RotateLeft(self, bits):
bits_smaller = bits % self.size()
return (self << bits_smaller) | (LShR(self, (self.size() - bits_smaller)))
def Reverse(a):
size = a.size()
if size == 8:
return a
elif size % 8 != 0:
raise ClaripyOperationError("can't reverse non-byte sized bitvectors")
else:
value = a.value
out = 0
if size == 64:
out = _reverse_64(value)
elif size == 32:
out = _reverse_32(value)
elif size == 16:
out = _reverse_16(value)
else:
for i in range(0, size, 8):
out |= ((value & (0xFF << i)) >> i) << (size - 8 - i)
return BVV(out, size)
# the RIGHT way to do it:
# return BVV(int(("%x" % a.value).rjust(size/4, '0').decode('hex')[::-1].encode('hex'), 16), size)
def _reverse_16(v):
return ((v & 0xFF) << 8) | ((v & 0xFF00) >> 8)
def _reverse_32(v):
return ((v & 0xFF) << 24) | ((v & 0xFF00) << 8) | ((v & 0xFF0000) >> 8) | ((v & 0xFF000000) >> 24)
def _reverse_64(v):
return (
((v & 0xFF) << 56)
| ((v & 0xFF00) << 40)
| ((v & 0xFF0000) << 24)
| ((v & 0xFF000000) << 8)
| ((v & 0xFF00000000) >> 8)
| ((v & 0xFF0000000000) >> 24)
| ((v & 0xFF000000000000) >> 40)
| ((v & 0xFF00000000000000) >> 56)
)
@normalize_types
@compare_bits
def ULT(self, o):
return self.value < o.value
@normalize_types
@compare_bits
def UGT(self, o):
return self.value > o.value
@normalize_types
@compare_bits
def ULE(self, o):
return self.value <= o.value
@normalize_types
@compare_bits
def UGE(self, o):
return self.value >= o.value
@normalize_types
@compare_bits
def SLT(self, o):
return self.signed < o.signed
@normalize_types
@compare_bits
def SGT(self, o):
return self.signed > o.signed
@normalize_types
@compare_bits
def SLE(self, o):
return self.signed <= o.signed
@normalize_types
@compare_bits
def SGE(self, o):
return self.signed >= o.signed
@normalize_types
@compare_bits
def SMod(self, o):
# compute the remainder like the % operator in C
a = self.signed
b = o.signed
if b == 0:
raise ClaripyZeroDivisionError()
division_result = a // b if a * b > 0 else (a + (-a % b)) // b
val = a - division_result * b
return BVV(val, self.bits)
@normalize_types
@compare_bits
def SDiv(self, o):
# compute the round towards 0 division
a = self.signed
b = o.signed
if b == 0:
raise ClaripyZeroDivisionError()
val = a // b if a * b > 0 else (a + (-a % b)) // b
return BVV(val, self.bits)
#
# Pure boolean stuff
#
def BoolV(b):
return b
def And(*args):
return all(args)
def Or(*args):
return any(args)
def Not(b):
return not b
@normalize_types
def normalizer(*args):
return args
def If(c, t, f):
t, f = normalizer(t, f) # pylint:disable=unbalanced-tuple-unpacking
if c:
return t
else:
return f
@normalize_types
@compare_bits
def LShR(a, b):
return BVV(a.value >> b.value, a.bits)

View File

@@ -0,0 +1,12 @@
# to enable debug mode, please set _DEBUG to True via the set_debug method.
_DEBUG = True
def set_debug(enabled):
"""
Enable or disable the debug mode. In debug mode, a bunch of extra checks in claripy will be executed. You'll want to
disable debug mode if you are running performance critical code.
"""
global _DEBUG
_DEBUG = enabled

View File

@@ -0,0 +1,83 @@
class ClaripyError(Exception):
pass
class UnsatError(ClaripyError):
pass
class ClaripyFrontendError(ClaripyError):
pass
class ClaripySerializationError(ClaripyError):
pass
class BackendError(ClaripyError):
pass
class BackendUnsupportedError(BackendError):
pass
class ClaripyZ3Error(ClaripyError):
pass
class ClaripyBackendVSAError(BackendError):
pass
class MissingSolverError(ClaripyError):
pass
class ClaripySolverInterruptError(ClaripyError):
pass
#
# AST errors
#
class ClaripyASTError(ClaripyError):
pass
class ClaripyBalancerError(ClaripyASTError):
pass
class ClaripyBalancerUnsatError(ClaripyBalancerError):
pass
class ClaripyTypeError(ClaripyASTError):
pass
class ClaripyValueError(ClaripyASTError):
pass
class ClaripySizeError(ClaripyASTError):
pass
class ClaripyOperationError(ClaripyASTError):
pass
class ClaripyReplacementError(ClaripyASTError):
pass
class ClaripyRecursionError(ClaripyOperationError):
pass
class ClaripyZeroDivisionError(ClaripyOperationError, ZeroDivisionError):
pass

View File

@@ -0,0 +1,482 @@
import decimal
import functools
import math
import struct
from decimal import Decimal
from enum import Enum
from .errors import ClaripyOperationError
from .backend_object import BackendObject
def compare_sorts(f):
@functools.wraps(f)
def compare_guard(self, o):
if self.sort != o.sort:
raise TypeError(f"FPVs are differently-sorted ({self.sort} and {o.sort})")
return f(self, o)
return compare_guard
def normalize_types(f):
@functools.wraps(f)
def normalize_helper(self, o):
if isinstance(o, float):
o = FPV(o, self.sort)
if not isinstance(self, FPV) or not isinstance(o, FPV):
raise TypeError("must have two FPVs")
return f(self, o)
return normalize_helper
class RM(Enum):
# see https://en.wikipedia.org/wiki/IEEE_754#Rounding_rules
RM_NearestTiesEven = "RM_RNE"
RM_NearestTiesAwayFromZero = "RM_RNA"
RM_TowardsZero = "RM_RTZ"
RM_TowardsPositiveInf = "RM_RTP"
RM_TowardsNegativeInf = "RM_RTN"
@staticmethod
def default():
return RM.RM_NearestTiesEven
def pydecimal_equivalent_rounding_mode(self):
return {
RM.RM_TowardsPositiveInf: decimal.ROUND_CEILING,
RM.RM_TowardsNegativeInf: decimal.ROUND_FLOOR,
RM.RM_TowardsZero: decimal.ROUND_DOWN,
RM.RM_NearestTiesEven: decimal.ROUND_HALF_EVEN,
RM.RM_NearestTiesAwayFromZero: decimal.ROUND_UP,
}[self]
RM_NearestTiesEven = RM.RM_NearestTiesEven
RM_NearestTiesAwayFromZero = RM.RM_NearestTiesAwayFromZero
RM_TowardsZero = RM.RM_TowardsZero
RM_TowardsPositiveInf = RM.RM_TowardsPositiveInf
RM_TowardsNegativeInf = RM.RM_TowardsNegativeInf
class FSort:
def __init__(self, name, exp, mantissa):
self.name = name
self.exp = exp
self.mantissa = mantissa
def __eq__(self, other):
return self.exp == other.exp and self.mantissa == other.mantissa
def __repr__(self):
return self.name
def __hash__(self):
return hash((self.name, self.exp, self.mantissa))
@property
def length(self):
return self.exp + self.mantissa
@staticmethod
def from_size(n):
if n == 32:
return FSORT_FLOAT
elif n == 64:
return FSORT_DOUBLE
else:
raise ClaripyOperationError(f"{n} is not a valid FSort size")
@staticmethod
def from_params(exp, mantissa):
if exp == 8 and mantissa == 24:
return FSORT_FLOAT
elif exp == 11 and mantissa == 53:
return FSORT_DOUBLE
else:
raise ClaripyOperationError("unrecognized FSort params")
FSORT_FLOAT = FSort("FLOAT", 8, 24)
FSORT_DOUBLE = FSort("DOUBLE", 11, 53)
class FPV(BackendObject):
__slots__ = ["sort", "value"]
def __init__(self, value, sort):
if not isinstance(value, float) or sort not in {FSORT_FLOAT, FSORT_DOUBLE}:
raise ClaripyOperationError("FPV needs a sort (FSORT_FLOAT or FSORT_DOUBLE) and a float value")
self.value = value
self.sort = sort
def __hash__(self):
return hash((self.value, self.sort))
def __getstate__(self):
return self.value, self.sort
def __setstate__(self, st):
self.value, self.sort = st
def __abs__(self):
return FPV(abs(self.value), self.sort)
def __neg__(self):
return FPV(-self.value, self.sort)
def fpSqrt(self):
return FPV(math.sqrt(self.value), self.sort)
@normalize_types
@compare_sorts
def __add__(self, o):
return FPV(self.value + o.value, self.sort)
@normalize_types
@compare_sorts
def __sub__(self, o):
return FPV(self.value - o.value, self.sort)
@normalize_types
@compare_sorts
def __mul__(self, o):
return FPV(self.value * o.value, self.sort)
@normalize_types
@compare_sorts
def __mod__(self, o):
return FPV(self.value % o.value, self.sort)
@normalize_types
@compare_sorts
def __truediv__(self, o):
try:
return FPV(self.value / o.value, self.sort)
except ZeroDivisionError:
if str(self.value * o.value)[0] == "-":
return FPV(float("-inf"), self.sort)
else:
return FPV(float("inf"), self.sort)
def __floordiv__(self, other): # decline to involve integers in this floating point process
return self.__truediv__(other)
#
# Reverse arithmetic stuff
#
@normalize_types
@compare_sorts
def __radd__(self, o):
return FPV(o.value + self.value, self.sort)
@normalize_types
@compare_sorts
def __rsub__(self, o):
return FPV(o.value - self.value, self.sort)
@normalize_types
@compare_sorts
def __rmul__(self, o):
return FPV(o.value * self.value, self.sort)
@normalize_types
@compare_sorts
def __rmod__(self, o):
return FPV(o.value % self.value, self.sort)
@normalize_types
@compare_sorts
def __rtruediv__(self, o):
try:
return FPV(o.value / self.value, self.sort)
except ZeroDivisionError:
if str(o.value * self.value)[0] == "-":
return FPV(float("-inf"), self.sort)
else:
return FPV(float("inf"), self.sort)
def __rfloordiv__(self, other): # decline to involve integers in this floating point process
return self.__rtruediv__(other)
#
# Boolean stuff
#
@normalize_types
@compare_sorts
def __eq__(self, o):
return self.value == o.value
@normalize_types
@compare_sorts
def __ne__(self, o):
return self.value != o.value
@normalize_types
@compare_sorts
def __lt__(self, o):
return self.value < o.value
@normalize_types
@compare_sorts
def __gt__(self, o):
return self.value > o.value
@normalize_types
@compare_sorts
def __le__(self, o):
return self.value <= o.value
@normalize_types
@compare_sorts
def __ge__(self, o):
return self.value >= o.value
def __repr__(self):
return f"FPV({self.value:f}, {self.sort})"
def fpToFP(a1, a2, a3=None):
"""
Returns a FP AST and has three signatures:
fpToFP(ubvv, sort)
Returns a FP AST whose value is the same as the unsigned BVV `a1`
and whose sort is `a2`.
fpToFP(rm, fpv, sort)
Returns a FP AST whose value is the same as the floating point `a2`
and whose sort is `a3`.
fpToTP(rm, sbvv, sort)
Returns a FP AST whose value is the same as the signed BVV `a2` and
whose sort is `a3`.
"""
if isinstance(a1, BVV) and isinstance(a2, FSort):
sort = a2
if sort == FSORT_FLOAT:
pack, unpack = "I", "f"
elif sort == FSORT_DOUBLE:
pack, unpack = "Q", "d"
else:
raise ClaripyOperationError("unrecognized float sort")
try:
packed = struct.pack("<" + pack, a1.value)
(unpacked,) = struct.unpack("<" + unpack, packed)
except OverflowError as e:
# struct.pack sometimes overflows
raise ClaripyOperationError("OverflowError: " + str(e)) from e
return FPV(unpacked, sort)
elif isinstance(a1, RM) and isinstance(a2, FPV) and isinstance(a3, FSort):
return FPV(a2.value, a3)
elif isinstance(a1, RM) and isinstance(a2, BVV) and isinstance(a3, FSort):
return FPV(float(a2.signed), a3)
else:
raise ClaripyOperationError("unknown types passed to fpToFP")
def fpToFPUnsigned(_rm, thing, sort):
"""
Returns a FP AST whose value is the same as the unsigned BVV `thing` and
whose sort is `sort`.
"""
# thing is a BVV
return FPV(float(thing.value), sort)
def fpToIEEEBV(fpv):
"""
Interprets the bit-pattern of the IEEE754 floating point number `fpv` as a
bitvector.
:return: A BV AST whose bit-pattern is the same as `fpv`
"""
if fpv.sort == FSORT_FLOAT:
pack, unpack = "f", "I"
elif fpv.sort == FSORT_DOUBLE:
pack, unpack = "d", "Q"
else:
raise ClaripyOperationError("unrecognized float sort")
try:
packed = struct.pack("<" + pack, fpv.value)
(unpacked,) = struct.unpack("<" + unpack, packed)
except OverflowError as e:
# struct.pack sometimes overflows
raise ClaripyOperationError("OverflowError: " + str(e)) from e
return BVV(unpacked, fpv.sort.length)
def fpFP(sgn, exp, mantissa):
"""
Concatenates the bitvectors `sgn`, `exp` and `mantissa` and returns the
corresponding IEEE754 floating point number.
:return: A FP AST whose bit-pattern is the same as the concatenated
bitvector
"""
concatted = Concat(sgn, exp, mantissa)
sort = FSort.from_size(concatted.size())
if sort == FSORT_FLOAT:
pack, unpack = "I", "f"
elif sort == FSORT_DOUBLE:
pack, unpack = "Q", "d"
else:
raise ClaripyOperationError("unrecognized float sort")
try:
packed = struct.pack("<" + pack, concatted.value)
(unpacked,) = struct.unpack("<" + unpack, packed)
except OverflowError as e:
# struct.pack sometimes overflows
raise ClaripyOperationError("OverflowError: " + str(e)) from e
return FPV(unpacked, sort)
def fpToSBV(rm, fp, size):
try:
rounding_mode = rm.pydecimal_equivalent_rounding_mode()
val = int(Decimal(fp.value).to_integral_value(rounding_mode))
return BVV(val, size)
except (ValueError, OverflowError):
return BVV(0, size)
except Exception as ex:
print(f"Unhandled error during floating point rounding! {ex}")
raise
def fpToUBV(rm, fp, size):
# todo: actually make unsigned
try:
rounding_mode = rm.pydecimal_equivalent_rounding_mode()
val = int(Decimal(fp.value).to_integral_value(rounding_mode))
assert (
val & ((1 << size) - 1) == val
), f"Rounding produced values outside the BV range! rounding {fp.value} with rounding mode {rm} produced {val}"
if val < 0:
val = (1 << size) + val
return BVV(val, size)
except (ValueError, OverflowError):
return BVV(0, size)
def fpEQ(a, b):
"""
Checks if floating point `a` is equal to floating point `b`.
"""
return a == b
def fpNE(a, b):
"""
Checks if floating point `a` is not equal to floating point `b`.
"""
return a != b
def fpGT(a, b):
"""
Checks if floating point `a` is greater than floating point `b`.
"""
return a > b
def fpGEQ(a, b):
"""
Checks if floating point `a` is greater than or equal to floating point `b`.
"""
return a >= b
def fpLT(a, b):
"""
Checks if floating point `a` is less than floating point `b`.
"""
return a < b
def fpLEQ(a, b):
"""
Checks if floating point `a` is less than or equal to floating point `b`.
"""
return a <= b
def fpAbs(x):
"""
Returns the absolute value of the floating point `x`. So:
a = FPV(-3.2, FSORT_DOUBLE)
b = fpAbs(a)
b is FPV(3.2, FSORT_DOUBLE)
"""
return abs(x)
def fpNeg(x):
"""
Returns the additive inverse of the floating point `x`. So:
a = FPV(3.2, FSORT_DOUBLE)
b = fpAbs(a)
b is FPV(-3.2, FSORT_DOUBLE)
"""
return -x
def fpSub(_rm, a, b):
"""
Returns the subtraction of the floating point `a` by the floating point `b`.
"""
return a - b
def fpAdd(_rm, a, b):
"""
Returns the addition of two floating point numbers, `a` and `b`.
"""
return a + b
def fpMul(_rm, a, b):
"""
Returns the multiplication of two floating point numbers, `a` and `b`.
"""
return a * b
def fpDiv(_rm, a, b):
"""
Returns the division of the floating point `a` by the floating point `b`.
"""
return a / b
def fpIsNaN(x):
"""
Checks whether the argument is a floating point NaN.
"""
return math.isnan(x)
def fpIsInf(x):
"""
Checks whether the argument is a floating point infinity.
"""
return math.isinf(x)
from .bv import BVV, Concat

View File

@@ -0,0 +1,276 @@
#!/usr/bin/env python
import logging
import numbers
l = logging.getLogger("claripy.frontends.frontend")
class Frontend:
def __init__(self):
pass
def __getstate__(self):
return True # need to return something so that pickle calls setstate
def __setstate__(self, s): # pylint:disable=unused-argument
return
def branch(self):
c = self.blank_copy()
self._copy(c)
return c
def blank_copy(self):
c = self.__class__.__new__(self.__class__)
self._blank_copy(c)
return c
def _blank_copy(self, c): # pylint:disable=no-self-use,unused-argument
return
def _copy(self, c): # pylint:disable=no-self-use,unused-argument
return
#
# Stuff that should be implemented by subclasses
#
def eval_to_ast(self, e, n, extra_constraints=(), exact=None):
"""
Evaluates expression `e`, returning a list of `n` concrete ASTs.
:param e: the expression
:param n: the number of ASTs to return
:param extra_constraints: extra constraints to consider when performing the evaluation
:param exact: whether or not to perform an exact evaluation. Ignored by
non-approximating backends.
:return: list of concrete ASTs
"""
return [ast.bv.BVV(v, e.size()) for v in self.eval(e, n, extra_constraints=extra_constraints, exact=exact)]
def finalize(self):
raise NotImplementedError("finalize() is not implemented")
def merge(self, others, merge_conditions, common_ancestor=None):
raise NotImplementedError("merge() is not implemented")
def combine(self, others):
raise NotImplementedError("combine() is not implemented")
def split(self):
raise NotImplementedError("split() is not implemented")
def add(self, constraints):
"""
Adds constraint(s) to constraints list.
:param constraints: constraint(s) to add
:return:
"""
raise NotImplementedError()
def simplify(self):
"""
Simplifies the stored constraints conjunction.
"""
raise NotImplementedError()
def check_satisfiability(self, extra_constraints=(), exact=None):
"""
Checks the satisfiability of stored constraints conjunction.
:param extra_constraints: extra constraints to consider when checking satisfiability
:param exact: whether or not to perform exact checking. Ignored by
non-approximating backends.
:return: 'SAT' if the conjunction is satisfiable otherwise 'UNSAT'
"""
raise NotImplementedError
def satisfiable(self, extra_constraints=(), exact=None):
"""
Checks if stored constraints conjunction is satisfiable.
:param extra_constraints: extra constraints to consider when checking satisfiability
:param exact: whether or not to perform exact checking. Ignored by
non-approximating backends.
:return: True if the conjunction is satisfiable otherwise False
"""
raise NotImplementedError()
def eval(self, e, n, extra_constraints=(), exact=None):
"""
Evaluates expression `e`, returning a tuple of `n` solutions.
:param e: the expression
:param n: the number of solutions to return
:param extra_constraints: extra constraints to consider when performing the evaluation
:param exact: whether or not to perform an exact evaluation. Ignored by
non-approximating backends.
:return: tuple of python primitives representing results
"""
raise NotImplementedError()
def batch_eval(self, exprs, n, extra_constraints=(), exact=None):
"""
Evaluates `exprs`, returning a list of tuples (one tuple of `n` solutions for expression).
:param exprs: expressions
:param n: the number of solutions to return
:param extra_constraints: extra constraints to consider when performing the evaluation
:param exact: whether or not to perform an exact evaluation. Ignored by
non-approximating backends.
:return: list of tuples of python primitives representing results
"""
raise NotImplementedError()
def max(self, e, extra_constraints=(), signed=False, exact=None):
"""
Evaluates `e`, returning its max possible value.
:param e: the expression
:param extra_constraints: extra constraints to consider when performing the evaluation
:param signed: whether the value should be treated as a signed integer
:param exact: whether or not to perform an exact evaluation. Ignored by
non-approximating backends.
:return: max possible value
"""
raise NotImplementedError()
def min(self, e, extra_constraints=(), signed=False, exact=None):
"""
Evaluates `e`, returning its min possible value.
:param e: the expression
:param extra_constraints: extra constraints to consider when performing the evaluation
:param signed: whether the value should be treated as a signed integer
:param exact: whether or not to perform an exact evaluation. Ignored by
non-approximating backends.
:return: min possible value
"""
raise NotImplementedError()
def solution(self, e, v, extra_constraints=(), exact=None):
"""
Checks if `v` is a possible solution to `e`.
:param e: the expression
:param v: the value
:param extra_constraints: extra constraints to consider when performing the evaluation
:param exact: whether or not to perform an exact evaluation. Ignored by
non-approximating backends.
:return: True if it is a possible solution otherwise False
"""
raise NotImplementedError()
def is_true(self, e, extra_constraints=(), exact=None):
"""
Checks if `e` can only (and TRIVIALLY) evaluate to True. If this function returns True,
then the expression cannot ever be False, regardless of constraints or anything else.
If the expression returns False, then the expression might STILL not ever be False; it's just
that we can't trivially prove it. In other words, a return value of False gives you no
information whatsoever.
:param e: the expression
:param extra_constraints: extra constraints to consider when performing the evaluation
:param exact: whether or not to perform an exact evaluation. Ignored by
non-approximating backends.
:return: True if it can only evaluate to True otherwise False
"""
raise NotImplementedError()
def is_false(self, e, extra_constraints=(), exact=None):
"""
Checks if `e` can only (and TRIVIALLY) evaluate to False. If this function returns True,
then the expression cannot ever be True, regardless of constraints or anything else.
If the expression returns False, then the expression might STILL not ever be True; it's just
that we can't trivially prove it. In other words, a return value of False gives you no
information whatsoever.
:param e: the expression
:param extra_constraints: extra constraints to consider when performing the evaluation
:param exact: whether or not to perform an exact evaluation. Ignored by
non-approximating backends.
:return: True if it can only evaluate to False otherwise False
"""
raise NotImplementedError()
def downsize(self): # pylint:disable=no-self-use
pass
#
# Some utility functions
#
def _concrete_value(self, e): # pylint:disable=no-self-use
if isinstance(e, numbers.Number):
return e
else:
return None
_concrete_constraint = _concrete_value
def _constraint_filter(self, c): # pylint:disable=no-self-use
return c
@staticmethod
def _split_constraints(constraints, concrete=True):
"""
Returns independent constraints, split from this Frontend's `constraints`.
"""
splitted = []
for i in constraints:
splitted.extend(i.split(["And"]))
l.debug("... splitted of size %d", len(splitted))
concrete_constraints = []
variable_connections = {}
constraint_connections = {}
for n, s in enumerate(splitted):
l.debug("... processing constraint with %d variables", len(s.variables))
connected_variables = set(s.variables)
connected_constraints = {n}
if len(connected_variables) == 0:
concrete_constraints.append(s)
for v in s.variables:
if v in variable_connections:
connected_variables |= variable_connections[v]
if v in constraint_connections:
connected_constraints |= constraint_connections[v]
for v in connected_variables:
variable_connections[v] = connected_variables
constraint_connections[v] = connected_constraints
unique_constraint_sets = set()
for v in variable_connections:
unique_constraint_sets.add((frozenset(variable_connections[v]), frozenset(constraint_connections[v])))
results = []
for v, c_indexes in unique_constraint_sets:
results.append((set(v), [splitted[c] for c in c_indexes]))
if concrete and len(concrete_constraints) > 0:
results.append(({"CONCRETE"}, concrete_constraints))
return results
from . import ast

View File

@@ -0,0 +1,16 @@
# mixins for frontends
from .constraint_fixer_mixin import ConstraintFixerMixin
from .constraint_expansion_mixin import ConstraintExpansionMixin
from .model_cache_mixin import ModelCacheMixin
from .constraint_filter_mixin import ConstraintFilterMixin
from .eager_resolution_mixin import EagerResolutionMixin
from .constraint_deduplicator_mixin import ConstraintDeduplicatorMixin
from .concrete_handler_mixin import ConcreteHandlerMixin
from .debug_mixin import DebugMixin
from .solve_block_mixin import SolveBlockMixin
from .simplify_helper_mixin import SimplifyHelperMixin
from .simplify_skipper_mixin import SimplifySkipperMixin
from .composited_cache_mixin import CompositedCacheMixin
from .sat_cache_mixin import SatCacheMixin
from .eval_string_to_ast_mixin import EvalStringsToASTsMixin
from .smtlib_script_dumper_mixin import SMTLibScriptDumperMixin

View File

@@ -0,0 +1,55 @@
# hits = 0
# misses = 0
# ejects = 0
class CompositedCacheMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._merged_solvers = {}
def _blank_copy(self, c):
super()._blank_copy(c)
c._merged_solvers = {}
def _copy(self, c):
super()._copy(c)
c._merged_solvers = dict(self._merged_solvers)
def __setstate__(self, base_state):
super().__setstate__(base_state)
self._merged_solvers = {}
#
# Cache stuff
#
def _remove_cached(self, names):
# global ejects
for k in list(self._merged_solvers.keys()):
if k & names:
# ejects += 1
self._merged_solvers.pop(k)
def _solver_for_names(self, names):
# global hits, misses
n = frozenset(names)
try:
r = self._merged_solvers[frozenset(n)]
# hits += 1
return r
except KeyError:
# misses += 1
s = super()._solver_for_names(names)
self._merged_solvers[n] = s
return s
def downsize(self):
super().downsize()
self._merged_solvers = {}
def _store_child(self, s, **kwargs):
self._remove_cached(s.variables)
return super()._store_child(s, **kwargs)

View File

@@ -0,0 +1,54 @@
class ConcreteHandlerMixin:
def eval(self, e, n, **kwargs): # pylint:disable=unused-argument
c = self._concrete_value(e)
if c is not None:
return (c,)
else:
return super().eval(e, n, **kwargs)
def batch_eval(self, exprs, n, **kwargs): # pylint:disable=unused-argument
concrete_exprs = [self._concrete_value(e) for e in exprs]
symbolic_exprs = [e for e, c in zip(exprs, concrete_exprs) if c is None]
if len(symbolic_exprs) == 0:
return [concrete_exprs]
symbolic_results = map(list, super().batch_eval(symbolic_exprs, n, **kwargs))
return [tuple((c if c is not None else r.pop(0)) for c in concrete_exprs) for r in symbolic_results]
def max(self, e, **kwargs):
c = self._concrete_value(e)
if c is not None:
return c
else:
return super().max(e, **kwargs)
def min(self, e, **kwargs):
c = self._concrete_value(e)
if c is not None:
return c
else:
return super().min(e, **kwargs)
def solution(self, e, v, **kwargs):
ce = self._concrete_value(e)
cv = self._concrete_value(v)
if ce is None or cv is None:
return super().solution(e, v, **kwargs)
else:
return ce == cv
def is_true(self, e, **kwargs):
c = self._concrete_value(e)
if c is not None:
return c
else:
return super().is_true(e, **kwargs)
def is_false(self, e, **kwargs):
c = self._concrete_value(e)
if c is not None:
return not c
else:
return super().is_false(e, **kwargs)

View File

@@ -0,0 +1,36 @@
class ConstraintDeduplicatorMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._constraint_hashes = set()
def _blank_copy(self, c):
super()._blank_copy(c)
c._constraint_hashes = set()
def _copy(self, c):
super()._copy(c)
c._constraint_hashes = set(self._constraint_hashes)
def __getstate__(self):
return self._constraint_hashes, super().__getstate__()
def __setstate__(self, s):
self._constraint_hashes, base_state = s
super().__setstate__(base_state)
def simplify(self, **kwargs):
added = super().simplify(**kwargs)
# we only add to the constraint hashes because we want to
# prevent previous (now simplified) constraints from
# being re-added
self._constraint_hashes.update(map(hash, added))
return added
def add(self, constraints, **kwargs):
filtered = tuple(c for c in constraints if hash(c) not in self._constraint_hashes)
if len(filtered) == 0:
return filtered
added = super().add(filtered, **kwargs)
self._constraint_hashes.update(map(hash, added))
return added

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env python
import logging
l = logging.getLogger("claripy.frontends.cache_mixin")
class ConstraintExpansionMixin:
def eval(self, e, n, extra_constraints=(), exact=None, **kwargs):
results = super().eval(e, n, extra_constraints=extra_constraints, exact=exact, **kwargs)
# if there are less possible solutions than n (i.e., meaning we got all the solutions for e),
# add constraints to help the solver out later
# TODO: does this really help?
if len(extra_constraints) == 0 and len(results) < n:
self.add([Or(*[e == v for v in results])], invalidate_cache=False)
return results
def max(self, e, extra_constraints=(), exact=None, signed=False, **kwargs):
m = super().max(e, extra_constraints=extra_constraints, exact=exact, signed=signed, **kwargs)
if len(extra_constraints) == 0:
self.add([SLE(e, m) if signed else ULE(e, m)], invalidate_cache=False)
return m
def min(self, e, extra_constraints=(), exact=None, signed=False, **kwargs):
m = super().min(e, extra_constraints=extra_constraints, exact=exact, signed=signed, **kwargs)
if len(extra_constraints) == 0:
self.add([SGE(e, m) if signed else UGE(e, m)], invalidate_cache=False)
return m
def solution(self, e, v, extra_constraints=(), exact=None, **kwargs):
b = super().solution(e, v, extra_constraints=extra_constraints, exact=exact, **kwargs)
if b is False and len(extra_constraints) == 0:
self.add([e != v], invalidate_cache=False)
return b
from ..ast.bool import Or
from ..ast.bv import SGE, SLE, UGE, ULE

View File

@@ -0,0 +1,68 @@
class ConstraintFilterMixin:
def _constraint_filter(self, constraints, **kwargs):
if type(constraints) not in (tuple, list):
raise ClaripyValueError("The extra_constraints argument should be a list of constraints.")
if len(constraints) == 0:
return constraints
filtered = super()._constraint_filter(constraints, **kwargs)
ccs = [self._concrete_constraint(c) for c in filtered]
if False in ccs:
raise UnsatError("Constraints contain False.")
else:
return tuple((o if n is None else o) for o, n in zip(constraints, ccs) if n is not True)
def add(self, constraints, **kwargs):
try:
ec = self._constraint_filter(constraints)
except UnsatError:
# filter out concrete False
ec = list(c for c in constraints if c not in {False, false}) + [false]
if len(constraints) == 0:
return []
if len(ec) > 0:
return super().add(ec, **kwargs)
else:
return []
def satisfiable(self, extra_constraints=(), **kwargs):
try:
ec = self._constraint_filter(extra_constraints)
return super().satisfiable(extra_constraints=ec, **kwargs)
except UnsatError:
return False
def eval(self, e, n, extra_constraints=(), **kwargs):
ec = self._constraint_filter(extra_constraints)
return super().eval(e, n, extra_constraints=ec, **kwargs)
def batch_eval(self, exprs, n, extra_constraints=(), **kwargs):
ec = self._constraint_filter(extra_constraints)
return super().batch_eval(exprs, n, extra_constraints=ec, **kwargs)
def max(self, e, extra_constraints=(), **kwargs):
ec = self._constraint_filter(extra_constraints)
return super().max(e, extra_constraints=ec, **kwargs)
def min(self, e, extra_constraints=(), **kwargs):
ec = self._constraint_filter(extra_constraints)
return super().min(e, extra_constraints=ec, **kwargs)
def solution(self, e, v, extra_constraints=(), **kwargs):
ec = self._constraint_filter(extra_constraints)
return super().solution(e, v, extra_constraints=ec, **kwargs)
def is_true(self, e, extra_constraints=(), **kwargs):
ec = self._constraint_filter(extra_constraints)
return super().is_true(e, extra_constraints=ec, **kwargs)
def is_false(self, e, extra_constraints=(), **kwargs):
ec = self._constraint_filter(extra_constraints)
return super().is_false(e, extra_constraints=ec, **kwargs)
from ..errors import UnsatError, ClaripyValueError
from .. import false

View File

@@ -0,0 +1,12 @@
class ConstraintFixerMixin:
def add(self, constraints, **kwargs):
constraints = [constraints] if not isinstance(constraints, (list, tuple, set)) else constraints
if len(constraints) == 0:
return []
constraints = [BoolV(c) if isinstance(c, bool) else c for c in constraints]
return super().add(constraints, **kwargs)
from .. import BoolV

View File

@@ -0,0 +1,64 @@
call_depth = 0
def _log(s):
print("| " * call_depth + s)
def _debugged(f):
def debugged(*args, **kwargs):
global call_depth
_log(f.__name__ + " " + str(args) + " " + str(kwargs))
call_depth += 1
try:
r = f(*args, **kwargs)
except Exception as e: # pylint:disable=broad-except
r = "EXCEPTION: %s" % str(e)
raise
finally:
call_depth -= 1
if r is not None:
_log(r"\... " + str(r))
if hasattr(r, "__iter__") and hasattr(r, "next"):
return _debug_iterator(r)
else:
return r
return debugged
def _debug_iterator(i):
for v in i:
_log("NEXT: " + str(v))
yield v
_log("FINISHED: " + str(i))
class DebugMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattribute__(self, a):
o = super().__getattribute__(a)
if a.startswith("__"):
return o
elif hasattr(o, "__call__"):
return _debugged(o)
elif hasattr(o, "__next__"):
return _debug_iterator(o)
else:
return o
def debug_decorator(o):
if isinstance(o, type):
class Debugged(DebugMixin, o):
pass
Debugged.__name__ = "Debugged" + o.__name__
return Debugged
elif hasattr(o, "__call__"):
return _debugged(o)

View File

@@ -0,0 +1,19 @@
class EagerResolutionMixin:
def _concrete_value(self, e):
r = super()._concrete_value(e)
if r is not None:
return r
for b in backends._eager_backends:
try:
return b.eval(e, 1)[0]
except BackendError:
pass
return None
_concrete_constraint = _concrete_value
from .. import backends
from ..errors import BackendError

View File

@@ -0,0 +1,14 @@
class EvalStringsToASTsMixin:
def eval_to_ast(self, e, n, extra_constraints=(), exact=None):
if type(e) is String:
return [
StringV(
v,
)
for v in self.eval(e, n, extra_constraints=extra_constraints, exact=exact)
]
return super().eval_to_ast(e, n, extra_constraints=extra_constraints, exact=None)
from .. import String, StringV

View File

@@ -0,0 +1,383 @@
from typing import Tuple
import weakref
import itertools
from .. import errors
class ModelCache:
_defaults = {0, 0.0, True}
def __init__(self, model):
self.model = model
self.replacements = weakref.WeakKeyDictionary()
self.constraint_only_replacements = weakref.WeakKeyDictionary()
def __hash__(self):
if not hasattr(self, "_hash"):
self._hash = hash(frozenset(self.model.items())) # pylint:disable=attribute-defined-outside-init
return self._hash
def __eq__(self, other):
return self.model == other.model
def __getstate__(self):
return (self.model,)
def __setstate__(self, s):
self.model = s[0]
self.replacements = weakref.WeakKeyDictionary()
self.constraint_only_replacements = weakref.WeakKeyDictionary()
#
# Splitting support
#
def filter(self, variables):
return ModelCache({k: self.model[k] for k in self.model if k in variables})
@staticmethod
def combine(*models):
return ModelCache(dict(itertools.chain.from_iterable(m.model.items() for m in models)))
#
# Model-driven evaluation
#
def _leaf_op(self, a):
return (
all_operations.BVV(self.model.get(a.args[0], 0), a.length)
if a.op == "BVS"
else all_operations.BoolV(self.model.get(a.args[0], True))
if a.op == "BoolS"
else all_operations.FPV(self.model.get(a.args[0], 0.0), a.args[1])
if a.op == "FPS"
else all_operations.StringV(self.model.get(a.args[0], ""))
if a.op == "StringS"
else a
)
def _leaf_op_existonly(self, a):
return (
all_operations.BVV(self.model[a.args[0]], a.length)
if a.op == "BVS"
else all_operations.BoolV(self.model[a.args[0]])
if a.op == "BoolS"
else all_operations.FPV(self.model[a.args[0]], a.args[1])
if a.op == "FPS"
else all_operations.StringV(self.model[a.args[0]])
if a.op == "StringS"
else a
)
def eval_ast(self, ast, allow_unconstrained: bool = True):
"""
Eval the ast, replacing symbols by their last value in the model.
:param ast: The AST to evaluate.
:param allow_unconstrained: When set to True, we will treat non-existent variables as unconstrained variables
and will use arbitrary concrete values for them during evaluation. Otherwise, raise
KeyErrors for non-existent variables.
"""
if allow_unconstrained:
new_ast = ast.replace_dict(self.replacements, leaf_operation=self._leaf_op)
else:
new_ast = ast.replace_dict(self.constraint_only_replacements, leaf_operation=self._leaf_op_existonly)
return backends.concrete.eval(new_ast, 1)[0]
def eval_constraints(self, constraints):
"""Returns whether the constraints is satisfied trivially by using the
last model."""
# eval_ast is concretizing symbols and evaluating them, this can raise
# exceptions.
try:
return all(self.eval_ast(c) for c in constraints)
except errors.ClaripyZeroDivisionError:
return False
def eval_list(self, asts, allow_unconstrained: bool = True) -> Tuple:
"""
Evaluate a list of ASTs.
:param asts: A list of ASTs to evaluate.
:param allow_unconstrained: When set to True, we will treat non-existent variables as unconstrained variables
and will use arbitrary concrete values for them during evaluation. Otherwise, raise
KeyErrors for non-existent variables.
:return: A tuple of evaluated results, one element per AST.
"""
return tuple(self.eval_ast(c, allow_unconstrained=allow_unconstrained) for c in asts)
class ModelCacheMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._models = set()
self._exhausted = False
self._eval_exhausted = weakref.WeakSet()
self._max_exhausted = weakref.WeakSet()
self._min_exhausted = weakref.WeakSet()
self._max_signed_exhausted = weakref.WeakSet()
self._min_signed_exhausted = weakref.WeakSet()
def _blank_copy(self, c):
super()._blank_copy(c)
c._models = set()
c._exhausted = False
c._eval_exhausted = weakref.WeakSet()
c._max_exhausted = weakref.WeakSet()
c._min_exhausted = weakref.WeakSet()
c._max_signed_exhausted = weakref.WeakSet()
c._min_signed_exhausted = weakref.WeakSet()
def _copy(self, c):
super()._copy(c)
c._models = set(self._models)
c._exhausted = self._exhausted
c._eval_exhausted = weakref.WeakSet(self._eval_exhausted)
c._max_exhausted = weakref.WeakSet(self._max_exhausted)
c._min_exhausted = weakref.WeakSet(self._min_exhausted)
c._max_signed_exhausted = weakref.WeakSet(self._max_signed_exhausted)
c._min_signed_exhausted = weakref.WeakSet(self._min_signed_exhausted)
def __setstate__(self, base_state):
super().__setstate__(base_state)
self._models = set()
self._exhausted = False
self._eval_exhausted = weakref.WeakSet()
self._max_exhausted = weakref.WeakSet()
self._min_exhausted = weakref.WeakSet()
self._max_signed_exhausted = weakref.WeakSet()
self._min_signed_exhausted = weakref.WeakSet()
#
# Model cleaning
#
def simplify(self, *args, **kwargs):
results = super().simplify(*args, **kwargs)
if len(results) > 0 and any(c is false for c in results):
self._models.clear()
return results
def _trivial_model_optimization(self):
c = self.constraints[0]
if not (
c.depth == 2
and c.op == "__eq__"
and len(c.variables) == 1
and c.args[0].symbolic
and not c.args[1].symbolic
and c.args[0].op == "BVS"
):
return
self._models.add(ModelCache({next(iter(c.args[0].variables)): backends.concrete.eval(c.args[1], 1)[0]}))
self._eval_exhausted.add(c.args[0].cache_key)
self._max_exhausted.add(c.args[0].cache_key)
self._min_exhausted.add(c.args[0].cache_key)
self._max_signed_exhausted.add(c.args[0].cache_key)
self._min_signed_exhausted.add(c.args[0].cache_key)
def add(self, constraints, invalidate_cache=True, **kwargs):
if len(constraints) == 0:
return constraints
old_vars = frozenset(self.variables)
added = super().add(constraints, **kwargs)
if len(added) == 0:
return added
if len(self.constraints) == 1 and len(self._models) == 0:
self._trivial_model_optimization()
new_vars = any(a.variables - old_vars for a in added)
if new_vars or invalidate_cache:
# shortcut for unsat
if any(c is false for c in constraints):
self._models.clear()
still_valid = set(self._get_models(extra_constraints=added))
if len(still_valid) != len(self._models):
self._exhausted = False
self._eval_exhausted.clear()
self._max_exhausted.clear()
self._min_exhausted.clear()
self._max_signed_exhausted.clear()
self._min_signed_exhausted.clear()
self._models = still_valid
return added
def split(self):
results = super().split()
for r in results:
r._models = {m.filter(r.variables) for m in self._models}
return results
def combine(self, others):
combined = super().combine(others)
if any(len(o._models) == 0 for o in others) or len(self._models) == 0:
# this would need a solve anyways, so screw it
return combined
vars_count = len(self.variables) + sum(len(s.variables) for s in others)
all_vars = self.variables.union(*[s.variables for s in others])
if vars_count != len(all_vars):
# this is the case where there are variables missing from the models.
# We'll need more intelligence here to handle it
return combined
model_lists = [self._models]
model_lists.extend(o._models for o in others)
combined._models.update(
ModelCache.combine(*product)
for product in itertools.islice(itertools.product(*model_lists), len(self._models))
)
return combined
def update(self, other):
"""
Updates this cache mixin with results discovered by the other split off one.
"""
acceptable_models = [m for m in other._models if set(m.model.keys()) == self.variables]
self._models.update(acceptable_models)
self._eval_exhausted.update(other._eval_exhausted)
self._max_exhausted.update(other._max_exhausted)
self._min_exhausted.update(other._min_exhausted)
self._max_signed_exhausted.update(other._max_signed_exhausted)
self._min_signed_exhausted.update(other._min_signed_exhausted)
#
# Cache retrieval
#
def _model_hook(self, m):
# Z3 might give us solutions for variables that we did not ask for. so we create a new dict with solutions for
# only the variables that are under the solver's control
m_ = {k: v for k, v in m.items() if k in self.variables}
if m_:
model = ModelCache(m_)
self._models.add(model)
def _get_models(self, extra_constraints=()):
for m in self._models:
if m.eval_constraints(extra_constraints):
yield m
def _get_batch_solutions(self, asts, n=None, extra_constraints=(), allow_unconstrained=True):
results = set()
for m in self._get_models(extra_constraints):
try:
results.add(m.eval_list(asts, allow_unconstrained=allow_unconstrained))
except (ZeroDivisionError, KeyError):
continue
if len(results) == n:
break
return results
def _get_solutions(self, e, n=None, extra_constraints=(), allow_unconstrained=True):
return tuple(
v[0]
for v in self._get_batch_solutions(
[e],
n=n,
extra_constraints=extra_constraints,
allow_unconstrained=allow_unconstrained,
)
)
#
# Cached functions
#
def satisfiable(self, extra_constraints=(), **kwargs):
for _ in self._get_models(extra_constraints=extra_constraints):
return True
return super().satisfiable(extra_constraints=extra_constraints, **kwargs)
def batch_eval(self, asts, n, extra_constraints=(), **kwargs):
results = self._get_batch_solutions(asts, n=n, extra_constraints=extra_constraints)
if len(results) == n or (len(asts) == 1 and asts[0].cache_key in self._eval_exhausted):
return results
remaining = n - len(results)
# TODO: faster to concat?
if len(results) != 0:
constraints = (
all_operations.And(*[all_operations.Or(*[a != v for a, v in zip(asts, r)]) for r in results]),
) + tuple(extra_constraints)
else:
constraints = extra_constraints
try:
results.update(super().batch_eval(asts, remaining, extra_constraints=constraints, **kwargs))
except UnsatError:
if len(results) == 0:
raise
if len(extra_constraints) == 0 and len(results) < n:
for e in asts:
# only mark an AST as eval-exhausted if e.variables is a subset of variables that the current solver
# knows about (from its constraints)
if self.variables.issuperset(e.variables):
self._eval_exhausted.add(e.cache_key)
return results
def eval(self, e, n, **kwargs):
return tuple(r[0] for r in ModelCacheMixin.batch_eval(self, [e], n=n, **kwargs))
def min(self, e, extra_constraints=(), signed=False, **kwargs):
cached = []
if e.cache_key in self._eval_exhausted or e.cache_key in self._min_exhausted:
# we set allow_unconstrained to False because we expect all returned values for e are returned by Z3,
# instead of some arbitrarily assigned concrete values.
cached = self._get_solutions(e, extra_constraints=extra_constraints, allow_unconstrained=False)
if len(cached) > 0:
signed_key = lambda v: v if v < 2 ** (len(e) - 1) else v - 2 ** len(e)
return min(cached, key=signed_key if signed else lambda v: v)
else:
m = super().min(e, extra_constraints=extra_constraints, signed=signed, **kwargs)
if len(extra_constraints) == 0:
(self._min_signed_exhausted if signed else self._min_exhausted).add(e.cache_key)
return m
def max(self, e, extra_constraints=(), signed=False, **kwargs):
cached = []
if e.cache_key in self._eval_exhausted or e.cache_key in self._max_exhausted:
cached = self._get_solutions(e, extra_constraints=extra_constraints, allow_unconstrained=False)
if len(cached) > 0:
signed_key = lambda v: v if v < 2 ** (len(e) - 1) else v - 2 ** len(e)
return max(cached, key=signed_key if signed else lambda v: v)
else:
m = super().max(e, extra_constraints=extra_constraints, signed=signed, **kwargs)
if len(extra_constraints) == 0:
(self._max_signed_exhausted if signed else self._max_exhausted).add(e.cache_key)
return m
def solution(self, e, v, extra_constraints=(), **kwargs):
if isinstance(v, Base):
cached = self._get_batch_solutions([e, v], extra_constraints=extra_constraints)
if any(ec == vc for ec, vc in cached):
return True
else:
cached = self._get_solutions(e, extra_constraints=extra_constraints)
if v in cached:
return True
return super().solution(e, v, extra_constraints=extra_constraints, **kwargs)
from .. import backends, false
from ..errors import UnsatError
from ..ast import all_operations, Base

View File

@@ -0,0 +1,111 @@
class SatCacheMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cached_satness = None
def _blank_copy(self, c):
super()._blank_copy(c)
c._cached_satness = None
def _copy(self, c):
super()._copy(c)
c._cached_satness = self._cached_satness
def __getstate__(self):
return self._cached_satness, super().__getstate__()
def __setstate__(self, s):
self._cached_satness, base_state = s
super().__setstate__(base_state)
#
# SAT caching
#
def add(self, constraints, **kwargs):
added = super().add(constraints, **kwargs)
if len(added) > 0 and any(c is false for c in added):
self._cached_satness = False
elif self._cached_satness is True:
self._cached_satness = None
return added
def simplify(self):
new_constraints = super().simplify()
if len(new_constraints) > 0 and any(c is false for c in new_constraints):
self._cached_satness = False
return new_constraints
def satisfiable(self, extra_constraints=(), **kwargs):
if self._cached_satness is False:
return False
if self._cached_satness is True and len(extra_constraints) == 0:
return True
r = super().satisfiable(extra_constraints=extra_constraints, **kwargs)
if len(extra_constraints) == 0:
self._cached_satness = r
return r
def eval(self, e, n, extra_constraints=(), **kwargs):
if self._cached_satness is False:
raise UnsatError("cached unsat")
try:
r = super().eval(e, n, extra_constraints=extra_constraints, **kwargs)
self._cached_satness = True
return r
except UnsatError:
if len(extra_constraints) == 0:
self._cached_satness = False
raise
def batch_eval(self, e, n, extra_constraints=(), **kwargs):
if self._cached_satness is False:
raise UnsatError("cached unsat")
try:
r = super().batch_eval(e, n, extra_constraints=extra_constraints, **kwargs)
self._cached_satness = True
return r
except UnsatError:
if len(extra_constraints) == 0:
self._cached_satness = False
raise
def max(self, e, extra_constraints=(), **kwargs):
if self._cached_satness is False:
raise UnsatError("cached unsat")
try:
r = super().max(e, extra_constraints=extra_constraints, **kwargs)
self._cached_satness = True
return r
except UnsatError:
if len(extra_constraints) == 0:
self._cached_satness = False
raise
def min(self, e, extra_constraints=(), **kwargs):
if self._cached_satness is False:
raise UnsatError("cached unsat")
try:
r = super().min(e, extra_constraints=extra_constraints, **kwargs)
self._cached_satness = True
return r
except UnsatError:
if len(extra_constraints) == 0:
self._cached_satness = False
raise
def solution(self, e, v, extra_constraints=(), **kwargs):
if self._cached_satness is False:
raise UnsatError("cached unsat")
try:
r = super().solution(e, v, extra_constraints=extra_constraints, **kwargs)
self._cached_satness = True
return r
except UnsatError:
if len(extra_constraints) == 0:
self._cached_satness = False
raise
from .. import false
from ..errors import UnsatError

View File

@@ -0,0 +1,18 @@
class SimplifyHelperMixin:
def max(self, *args, **kwargs):
self.simplify()
return super().max(*args, **kwargs)
def min(self, *args, **kwargs):
self.simplify()
return super().min(*args, **kwargs)
def eval(self, e, n, *args, **kwargs):
if n > 1:
self.simplify()
return super().eval(e, n, *args, **kwargs)
def batch_eval(self, e, n, *args, **kwargs):
if n > 1:
self.simplify()
return super().batch_eval(e, n, *args, **kwargs)

View File

@@ -0,0 +1,36 @@
class SimplifySkipperMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._simplified = True
def _blank_copy(self, c):
super()._blank_copy(c)
c._simplified = True
def _copy(self, c):
super()._copy(c)
c._simplified = self._simplified
def __getstate__(self):
return self._simplified, super().__getstate__()
def __setstate__(self, s):
self._simplified, base_state = s
super().__setstate__(base_state)
#
# Simplification skipping
#
def add(self, *args, **kwargs):
added = super().add(*args, **kwargs)
if len(added) > 0:
self._simplified = False
return added
def simplify(self, *args, **kwargs):
if self._simplified:
return self.constraints
else:
self._simplified = True
return super().simplify(*args, **kwargs)

View File

@@ -0,0 +1,32 @@
import logging
import sys
import threading
from ..frontends.constrained_frontend import ConstrainedFrontend
l = logging.getLogger(__name__)
class SMTLibScriptDumperMixin:
def get_smtlib_script_satisfiability(self, extra_constraints=(), extra_variables=()):
"""
Return an smt-lib script that check the satisfiability of the current constraints
:return string: smt-lib script
"""
try:
e_csts = self._solver_backend.convert_list(extra_constraints + tuple(self.constraints))
e_variables = self._solver_backend.convert_list(extra_variables)
variables, csts = self._solver_backend._get_all_vars_and_constraints(e_c=e_csts, e_v=e_variables)
return self._solver_backend._get_satisfiability_smt_script(csts, variables)
except BackendError as e:
raise ClaripyFrontendError("Backend error during smtlib script generation") from e
# def merge(self, others, merge_conditions, common_ancestor=None):
# return self._solver_backend.__class__.__name__ == 'BackendZ3', ConstrainedFrontend.merge(
# self, others, merge_conditions, common_ancestor=common_ancestor
# )[1]
from ..errors import BackendError, ClaripyFrontendError

View File

@@ -0,0 +1,43 @@
class SolveBlockMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.can_solve = True
def _blank_copy(self, c):
super()._blank_copy(c)
c.can_solve = True
def _copy(self, c):
super()._copy(c)
c.can_solve = self.can_solve
def __getstate__(self):
return self.can_solve, super().__getstate__()
def __setstate__(self, s):
self.can_solve, base_state = s
super().__setstate__(base_state)
def eval(self, *args, **kwargs):
assert self.can_solve
return super().eval(*args, **kwargs)
def batch_eval(self, *args, **kwargs):
assert self.can_solve
return super().batch_eval(*args, **kwargs)
def min(self, *args, **kwargs):
assert self.can_solve
return super().min(*args, **kwargs)
def max(self, *args, **kwargs):
assert self.can_solve
return super().max(*args, **kwargs)
def satisfiable(self, *args, **kwargs):
assert self.can_solve
return super().satisfiable(*args, **kwargs)
def solution(self, *args, **kwargs):
assert self.can_solve
return super().solution(*args, **kwargs)

View File

@@ -0,0 +1,5 @@
from .light_frontend import LightFrontend
from .full_frontend import FullFrontend
from .hybrid_frontend import HybridFrontend
from .composite_frontend import CompositeFrontend
from .replacement_frontend import ReplacementFrontend

Some files were not shown because too many files have changed in this diff Show More