Commit 4bcef6c6 by Craig Heffner

Modified code to use PEP8 formatting via autopep8

parent 1eab95fb
...@@ -4,9 +4,13 @@ from binwalk.core.module import Modules ...@@ -4,9 +4,13 @@ from binwalk.core.module import Modules
from binwalk.core.exceptions import ModuleException from binwalk.core.exceptions import ModuleException
# Convenience functions # Convenience functions
def scan(*args, **kwargs): def scan(*args, **kwargs):
with Modules(*args, **kwargs) as m: with Modules(*args, **kwargs) as m:
objs = m.execute() objs = m.execute()
return objs return objs
def execute(*args, **kwargs): def execute(*args, **kwargs):
return scan(*args, **kwargs) return scan(*args, **kwargs)
...@@ -6,10 +6,13 @@ import ctypes.util ...@@ -6,10 +6,13 @@ import ctypes.util
import binwalk.core.common import binwalk.core.common
from binwalk.core.compat import * from binwalk.core.compat import *
class Function(object): class Function(object):
''' '''
Container class for defining library functions. Container class for defining library functions.
''' '''
def __init__(self, **kwargs): def __init__(self, **kwargs):
self.name = None self.name = None
self.type = int self.type = int
...@@ -17,28 +20,30 @@ class Function(object): ...@@ -17,28 +20,30 @@ class Function(object):
for (k, v) in iterator(kwargs): for (k, v) in iterator(kwargs):
setattr(self, k, v) setattr(self, k, v)
class FunctionHandler(object): class FunctionHandler(object):
''' '''
Class for abstracting function calls via ctypes and handling Python 2/3 compatibility issues. Class for abstracting function calls via ctypes and handling Python 2/3 compatibility issues.
''' '''
PY2CTYPES = { PY2CTYPES = {
bytes : ctypes.c_char_p, bytes: ctypes.c_char_p,
str : ctypes.c_char_p, str: ctypes.c_char_p,
int : ctypes.c_int, int: ctypes.c_int,
float : ctypes.c_float, float: ctypes.c_float,
bool : ctypes.c_int, bool: ctypes.c_int,
None : ctypes.c_int, None: ctypes.c_int,
} }
RETVAL_CONVERTERS = { RETVAL_CONVERTERS = {
None : int, None: int,
int : int, int: int,
float : float, float: float,
bool : bool, bool: bool,
str : bytes2str, str: bytes2str,
bytes : str2bytes, bytes: str2bytes,
} }
def __init__(self, library, function): def __init__(self, library, function):
''' '''
Class constructor. Class constructor.
...@@ -58,7 +63,7 @@ class FunctionHandler(object): ...@@ -58,7 +63,7 @@ class FunctionHandler(object):
else: else:
self.function.restype = self.retype self.function.restype = self.retype
self.retval_converter = None self.retval_converter = None
#raise Exception("Unknown return type: '%s'" % self.retype) # raise Exception("Unknown return type: '%s'" % self.retype)
def run(self, *args): def run(self, *args):
''' '''
...@@ -70,7 +75,7 @@ class FunctionHandler(object): ...@@ -70,7 +75,7 @@ class FunctionHandler(object):
''' '''
args = list(args) args = list(args)
# Python3 expects a bytes object for char *'s, not a str. # Python3 expects a bytes object for char *'s, not a str.
# This allows us to pass either, regardless of the Python version. # This allows us to pass either, regardless of the Python version.
for i in range(0, len(args)): for i in range(0, len(args)):
if isinstance(args[i], str): if isinstance(args[i], str):
...@@ -81,8 +86,10 @@ class FunctionHandler(object): ...@@ -81,8 +86,10 @@ class FunctionHandler(object):
retval = self.retval_converter(retval) retval = self.retval_converter(retval)
return retval return retval
class Library(object): class Library(object):
''' '''
Class for loading the specified library via ctypes. Class for loading the specified library via ctypes.
''' '''
...@@ -101,7 +108,7 @@ class Library(object): ...@@ -101,7 +108,7 @@ class Library(object):
if not self.library: if not self.library:
raise Exception("Failed to load library '%s'" % library) raise Exception("Failed to load library '%s'" % library)
for function in functions: for function in functions:
f = FunctionHandler(self.library, function) f = FunctionHandler(self.library, function)
setattr(self, function.name, f.run) setattr(self, function.name, f.run)
...@@ -110,25 +117,26 @@ class Library(object): ...@@ -110,25 +117,26 @@ class Library(object):
Locates the specified library. Locates the specified library.
@libraries - Library name (e.g., 'magic' for libmagic), or a list of names. @libraries - Library name (e.g., 'magic' for libmagic), or a list of names.
Returns a string to be passed to ctypes.cdll.LoadLibrary. Returns a string to be passed to ctypes.cdll.LoadLibrary.
''' '''
lib_path = None lib_path = None
prefix = binwalk.core.common.get_libs_path() prefix = binwalk.core.common.get_libs_path()
if isinstance(libraries, str): if isinstance(libraries, str):
libraries = [libraries] libraries = [libraries]
for library in libraries: for library in libraries:
system_paths = { system_paths = {
'linux' : [os.path.join(prefix, 'lib%s.so' % library), '/usr/local/lib/lib%s.so' % library], 'linux': [os.path.join(prefix, 'lib%s.so' % library), '/usr/local/lib/lib%s.so' % library],
'cygwin' : [os.path.join(prefix, 'lib%s.so' % library), '/usr/local/lib/lib%s.so' % library], 'cygwin': [os.path.join(prefix, 'lib%s.so' % library), '/usr/local/lib/lib%s.so' % library],
'win32' : [os.path.join(prefix, 'lib%s.dll' % library), '%s.dll' % library], 'win32': [os.path.join(prefix, 'lib%s.dll' % library), '%s.dll' % library],
'darwin' : [os.path.join(prefix, 'lib%s.dylib' % library), 'darwin': [os.path.join(prefix, 'lib%s.dylib' % library),
'/opt/local/lib/lib%s.dylib' % library, '/opt/local/lib/lib%s.dylib' % library,
'/usr/local/lib/lib%s.dylib' % library, '/usr/local/lib/lib%s.dylib' % library,
] + glob.glob('/usr/local/Cellar/*%s*/*/lib/lib%s.dylib' % (library, library)), ] + glob.glob(
'/usr/local/Cellar/*%s*/*/lib/lib%s.dylib' % (library, library)),
} }
for i in range(2, 4): for i in range(2, 4):
...@@ -136,27 +144,30 @@ class Library(object): ...@@ -136,27 +144,30 @@ class Library(object):
# Search the common install directories first; these are usually not in the library search path # Search the common install directories first; these are usually not in the library search path
# Search these *first*, since a) they are the most likely locations and b) there may be a # Search these *first*, since a) they are the most likely locations and b) there may be a
# discrepency between where ctypes.util.find_library and ctypes.cdll.LoadLibrary search for libs. # discrepency between where ctypes.util.find_library and
# ctypes.cdll.LoadLibrary search for libs.
for path in system_paths[sys.platform]: for path in system_paths[sys.platform]:
binwalk.core.common.debug("Searching for '%s'" % path) binwalk.core.common.debug("Searching for '%s'" % path)
if os.path.exists(path): if os.path.exists(path):
lib_path = path lib_path = path
break break
# If we failed to find the library, check the standard library search paths # If we failed to find the library, check the standard library
# search paths
if not lib_path: if not lib_path:
lib_path = ctypes.util.find_library(library) lib_path = ctypes.util.find_library(library)
# Use the first library that we can find # Use the first library that we can find
if lib_path: if lib_path:
binwalk.core.common.debug("Found library '%s' at: %s" % (library, lib_path)) binwalk.core.common.debug(
"Found library '%s' at: %s" % (library, lib_path))
break break
else: else:
binwalk.core.common.debug("Could not find library '%s'" % library) binwalk.core.common.debug(
"Could not find library '%s'" % library)
# If we still couldn't find the library, error out # If we still couldn't find the library, error out
if not lib_path: if not lib_path:
raise Exception("Failed to locate libraries '%s'" % str(libraries)) raise Exception("Failed to locate libraries '%s'" % str(libraries))
return lib_path return lib_path
...@@ -21,10 +21,12 @@ if not __debug__: ...@@ -21,10 +21,12 @@ if not __debug__:
else: else:
DEBUG = False DEBUG = False
def MSWindows(): def MSWindows():
# Returns True if running in a Microsoft Windows OS # Returns True if running in a Microsoft Windows OS
return (platform.system() == 'Windows') return (platform.system() == 'Windows')
def debug(msg): def debug(msg):
''' '''
Displays debug messages to stderr only if the Python interpreter was invoked with the -O flag. Displays debug messages to stderr only if the Python interpreter was invoked with the -O flag.
...@@ -33,27 +35,32 @@ def debug(msg): ...@@ -33,27 +35,32 @@ def debug(msg):
sys.stderr.write("DEBUG: " + msg + "\n") sys.stderr.write("DEBUG: " + msg + "\n")
sys.stderr.flush() sys.stderr.flush()
def warning(msg): def warning(msg):
''' '''
Prints warning messages to stderr Prints warning messages to stderr
''' '''
sys.stderr.write("\nWARNING: " + msg + "\n") sys.stderr.write("\nWARNING: " + msg + "\n")
def error(msg): def error(msg):
''' '''
Prints error messages to stderr Prints error messages to stderr
''' '''
sys.stderr.write("\nERROR: " + msg + "\n") sys.stderr.write("\nERROR: " + msg + "\n")
def get_module_path(): def get_module_path():
root = __file__ root = __file__
if os.path.islink(root): if os.path.islink(root):
root = os.path.realpath(root) root = os.path.realpath(root)
return os.path.dirname(os.path.dirname(os.path.abspath(root))) return os.path.dirname(os.path.dirname(os.path.abspath(root)))
def get_libs_path(): def get_libs_path():
return os.path.join(get_module_path(), "libs") return os.path.join(get_module_path(), "libs")
def file_md5(file_name): def file_md5(file_name):
''' '''
Generate an MD5 hash of the specified file. Generate an MD5 hash of the specified file.
...@@ -65,11 +72,12 @@ def file_md5(file_name): ...@@ -65,11 +72,12 @@ def file_md5(file_name):
md5 = hashlib.md5() md5 = hashlib.md5()
with open(file_name, 'rb') as f: with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(128*md5.block_size), b''): for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk) md5.update(chunk)
return md5.hexdigest() return md5.hexdigest()
def file_size(filename): def file_size(filename):
''' '''
Obtains the size of a given file. Obtains the size of a given file.
...@@ -85,10 +93,12 @@ def file_size(filename): ...@@ -85,10 +93,12 @@ def file_size(filename):
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
except Exception as e: except Exception as e:
raise Exception("file_size failed to obtain the size of '%s': %s" % (filename, str(e))) raise Exception(
"file_size failed to obtain the size of '%s': %s" % (filename, str(e)))
finally: finally:
os.close(fd) os.close(fd)
def strip_quoted_strings(string): def strip_quoted_strings(string):
''' '''
Strips out data in between double quotes. Strips out data in between double quotes.
...@@ -101,9 +111,11 @@ def strip_quoted_strings(string): ...@@ -101,9 +111,11 @@ def strip_quoted_strings(string):
# Note that this removes everything in between the first and last double quote. # Note that this removes everything in between the first and last double quote.
# This is intentional, as printed (and quoted) strings from a target file may contain # This is intentional, as printed (and quoted) strings from a target file may contain
# double quotes, and this function should ignore those. However, it also means that any # double quotes, and this function should ignore those. However, it also means that any
# data between two quoted strings (ex: '"quote 1" you won't see me "quote 2"') will also be stripped. # data between two quoted strings (ex: '"quote 1" you won't see me "quote
# 2"') will also be stripped.
return re.sub(r'\"(.*)\"', "", string) return re.sub(r'\"(.*)\"', "", string)
def get_quoted_strings(string): def get_quoted_strings(string):
''' '''
Returns a string comprised of all data in between double quotes. Returns a string comprised of all data in between double quotes.
...@@ -118,13 +130,15 @@ def get_quoted_strings(string): ...@@ -118,13 +130,15 @@ def get_quoted_strings(string):
# Note that this gets everything in between the first and last double quote. # Note that this gets everything in between the first and last double quote.
# This is intentional, as printed (and quoted) strings from a target file may contain # This is intentional, as printed (and quoted) strings from a target file may contain
# double quotes, and this function should ignore those. However, it also means that any # double quotes, and this function should ignore those. However, it also means that any
# data between two quoted strings (ex: '"quote 1" non-quoted data "quote 2"') will also be included. # data between two quoted strings (ex: '"quote 1" non-quoted data
# "quote 2"') will also be included.
return re.findall(r'\"(.*)\"', string)[0] return re.findall(r'\"(.*)\"', string)[0]
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
except Exception: except Exception:
return '' return ''
def unique_file_name(base_name, extension=''): def unique_file_name(base_name, extension=''):
''' '''
Creates a unique file name based on the specified base name. Creates a unique file name based on the specified base name.
...@@ -147,6 +161,7 @@ def unique_file_name(base_name, extension=''): ...@@ -147,6 +161,7 @@ def unique_file_name(base_name, extension=''):
return fname return fname
def strings(filename, minimum=4): def strings(filename, minimum=4):
''' '''
A strings generator, similar to the Unix strings utility. A strings generator, similar to the Unix strings utility.
...@@ -174,13 +189,16 @@ def strings(filename, minimum=4): ...@@ -174,13 +189,16 @@ def strings(filename, minimum=4):
else: else:
result = "" result = ""
class GenericContainer(object): class GenericContainer(object):
def __init__(self, **kwargs): def __init__(self, **kwargs):
for (k,v) in iterator(kwargs): for (k, v) in iterator(kwargs):
setattr(self, k, v) setattr(self, k, v)
class MathExpression(object): class MathExpression(object):
''' '''
Class for safely evaluating mathematical expressions from a string. Class for safely evaluating mathematical expressions from a string.
Stolen from: http://stackoverflow.com/questions/2371436/evaluating-a-mathematical-expression-in-a-string Stolen from: http://stackoverflow.com/questions/2371436/evaluating-a-mathematical-expression-in-a-string
...@@ -213,22 +231,25 @@ class MathExpression(object): ...@@ -213,22 +231,25 @@ class MathExpression(object):
return self._eval(ast.parse(expr).body[0].value) return self._eval(ast.parse(expr).body[0].value)
def _eval(self, node): def _eval(self, node):
if isinstance(node, ast.Num): # <number> if isinstance(node, ast.Num): # <number>
return node.n return node.n
elif isinstance(node, ast.operator): # <operator> elif isinstance(node, ast.operator): # <operator>
return self.OPERATORS[type(node.op)] return self.OPERATORS[type(node.op)]
elif isinstance(node, ast.UnaryOp): elif isinstance(node, ast.UnaryOp):
return self.OPERATORS[type(node.op)](0, self._eval(node.operand)) return self.OPERATORS[type(node.op)](0, self._eval(node.operand))
elif isinstance(node, ast.BinOp): # <left> <operator> <right> elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return self.OPERATORS[type(node.op)](self._eval(node.left), self._eval(node.right)) return self.OPERATORS[type(node.op)](self._eval(node.left), self._eval(node.right))
else: else:
raise TypeError(node) raise TypeError(node)
class StringFile(object): class StringFile(object):
''' '''
A class to allow access to strings as if they were read from a file. A class to allow access to strings as if they were read from a file.
Used internally as a conditional superclass to InternalBlockFile. Used internally as a conditional superclass to InternalBlockFile.
''' '''
def __init__(self, fname, mode='r'): def __init__(self, fname, mode='r'):
self.string = fname self.string = fname
self.name = "String" self.name = "String"
...@@ -238,7 +259,7 @@ class StringFile(object): ...@@ -238,7 +259,7 @@ class StringFile(object):
if n == -1: if n == -1:
data = self.string[self.total_read:] data = self.string[self.total_read:]
else: else:
data = self.string[self.total_read:self.total_read+n] data = self.string[self.total_read:self.total_read + n]
return data return data
def tell(self): def tell(self):
...@@ -253,10 +274,12 @@ class StringFile(object): ...@@ -253,10 +274,12 @@ class StringFile(object):
def close(self): def close(self):
pass pass
def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs): def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs):
# Defining a class inside a function allows it to be dynamically subclassed # Defining a class inside a function allows it to be dynamically subclassed
class InternalBlockFile(subclass): class InternalBlockFile(subclass):
''' '''
Abstraction class for accessing binary files. Abstraction class for accessing binary files.
...@@ -289,7 +312,8 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs): ...@@ -289,7 +312,8 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs):
DEFAULT_BLOCK_PEEK_SIZE = 8 * 1024 DEFAULT_BLOCK_PEEK_SIZE = 8 * 1024
# Max number of bytes to process at one time. This needs to be large enough to # Max number of bytes to process at one time. This needs to be large enough to
# limit disk I/O, but small enough to limit the size of processed data blocks. # limit disk I/O, but small enough to limit the size of processed data
# blocks.
DEFAULT_BLOCK_READ_SIZE = 1 * 1024 * 1024 DEFAULT_BLOCK_READ_SIZE = 1 * 1024 * 1024
def __init__(self, fname, mode='r', length=0, offset=0, block=DEFAULT_BLOCK_READ_SIZE, peek=DEFAULT_BLOCK_PEEK_SIZE, swap=0): def __init__(self, fname, mode='r', length=0, offset=0, block=DEFAULT_BLOCK_READ_SIZE, peek=DEFAULT_BLOCK_PEEK_SIZE, swap=0):
...@@ -310,7 +334,8 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs): ...@@ -310,7 +334,8 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs):
self.block_read_size = self.DEFAULT_BLOCK_READ_SIZE self.block_read_size = self.DEFAULT_BLOCK_READ_SIZE
self.block_peek_size = self.DEFAULT_BLOCK_PEEK_SIZE self.block_peek_size = self.DEFAULT_BLOCK_PEEK_SIZE
# This is so that custom parent classes can access/modify arguments as necessary # This is so that custom parent classes can access/modify arguments
# as necessary
self.args = GenericContainer(fname=fname, self.args = GenericContainer(fname=fname,
mode=mode, mode=mode,
length=length, length=length,
...@@ -390,7 +415,7 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs): ...@@ -390,7 +415,7 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs):
if self.swap_size > 0: if self.swap_size > 0:
while i < len(block): while i < len(block):
data += block[i:i+self.swap_size][::-1] data += block[i:i + self.swap_size][::-1]
i += self.swap_size i += self.swap_size
else: else:
data = block data = block
...@@ -398,7 +423,8 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs): ...@@ -398,7 +423,8 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs):
return data return data
def reset(self): def reset(self):
self.set_block_size(block=self.base_block_size, peek=self.base_peek_size) self.set_block_size(
block=self.base_block_size, peek=self.base_peek_size)
self.seek(self.offset) self.seek(self.offset)
def set_block_size(self, block=None, peek=None): def set_block_size(self, block=None, peek=None):
...@@ -444,7 +470,7 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs): ...@@ -444,7 +470,7 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs):
n = self.length - self.total_read n = self.length - self.total_read
while n < 0 or l < n: while n < 0 or l < n:
tmp = super(self.__class__, self).read(n-l) tmp = super(self.__class__, self).read(n - l)
if tmp: if tmp:
data += tmp data += tmp
l += len(tmp) l += len(tmp)
...@@ -487,4 +513,3 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs): ...@@ -487,4 +513,3 @@ def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs):
return (data, dlen) return (data, dlen)
return InternalBlockFile(fname, mode=mode, **kwargs) return InternalBlockFile(fname, mode=mode, **kwargs)
...@@ -9,6 +9,7 @@ PY_MAJOR_VERSION = sys.version_info[0] ...@@ -9,6 +9,7 @@ PY_MAJOR_VERSION = sys.version_info[0]
if PY_MAJOR_VERSION > 2: if PY_MAJOR_VERSION > 2:
string.letters = string.ascii_letters string.letters = string.ascii_letters
def iterator(dictionary): def iterator(dictionary):
''' '''
For cross compatibility between Python 2 and Python 3 dictionaries. For cross compatibility between Python 2 and Python 3 dictionaries.
...@@ -18,6 +19,7 @@ def iterator(dictionary): ...@@ -18,6 +19,7 @@ def iterator(dictionary):
else: else:
return dictionary.iteritems() return dictionary.iteritems()
def has_key(dictionary, key): def has_key(dictionary, key):
''' '''
For cross compatibility between Python 2 and Python 3 dictionaries. For cross compatibility between Python 2 and Python 3 dictionaries.
...@@ -27,6 +29,7 @@ def has_key(dictionary, key): ...@@ -27,6 +29,7 @@ def has_key(dictionary, key):
else: else:
return dictionary.has_key(key) return dictionary.has_key(key)
def get_keys(dictionary): def get_keys(dictionary):
''' '''
For cross compatibility between Python 2 and Python 3 dictionaries. For cross compatibility between Python 2 and Python 3 dictionaries.
...@@ -36,6 +39,7 @@ def get_keys(dictionary): ...@@ -36,6 +39,7 @@ def get_keys(dictionary):
else: else:
return dictionary.keys() return dictionary.keys()
def str2bytes(string): def str2bytes(string):
''' '''
For cross compatibility between Python 2 and Python 3 strings. For cross compatibility between Python 2 and Python 3 strings.
...@@ -45,6 +49,7 @@ def str2bytes(string): ...@@ -45,6 +49,7 @@ def str2bytes(string):
else: else:
return string return string
def bytes2str(bs): def bytes2str(bs):
''' '''
For cross compatibility between Python 2 and Python 3 strings. For cross compatibility between Python 2 and Python 3 strings.
...@@ -54,6 +59,7 @@ def bytes2str(bs): ...@@ -54,6 +59,7 @@ def bytes2str(bs):
else: else:
return bs return bs
def string_decode(string): def string_decode(string):
''' '''
For cross compatibility between Python 2 and Python 3 strings. For cross compatibility between Python 2 and Python 3 strings.
...@@ -63,6 +69,7 @@ def string_decode(string): ...@@ -63,6 +69,7 @@ def string_decode(string):
else: else:
return string.decode('string_escape') return string.decode('string_escape')
def user_input(prompt=''): def user_input(prompt=''):
''' '''
For getting raw user input in Python 2 and 3. For getting raw user input in Python 2 and 3.
...@@ -71,4 +78,3 @@ def user_input(prompt=''): ...@@ -71,4 +78,3 @@ def user_input(prompt=''):
return input(prompt) return input(prompt)
else: else:
return raw_input(prompt) return raw_input(prompt)
...@@ -7,7 +7,9 @@ import datetime ...@@ -7,7 +7,9 @@ import datetime
import binwalk.core.common import binwalk.core.common
from binwalk.core.compat import * from binwalk.core.compat import *
class Display(object): class Display(object):
''' '''
Class to handle display of output and writing to log files. Class to handle display of output and writing to log files.
This class is instantiated for all modules implicitly and should not need to be invoked directly by most modules. This class is instantiated for all modules implicitly and should not need to be invoked directly by most modules.
...@@ -100,26 +102,32 @@ class Display(object): ...@@ -100,26 +102,32 @@ class Display(object):
self.log("", [file_name, md5sum, timestamp]) self.log("", [file_name, md5sum, timestamp])
self._fprint("%s", "\n", csv=False) self._fprint("%s", "\n", csv=False)
self._fprint("Scan Time: %s\n", [timestamp], csv=False, filter=False) self._fprint("Scan Time: %s\n", [
self._fprint("Target File: %s\n", [file_name], csv=False, filter=False) timestamp], csv=False, filter=False)
self._fprint("MD5 Checksum: %s\n", [md5sum], csv=False, filter=False) self._fprint("Target File: %s\n", [
file_name], csv=False, filter=False)
self._fprint(
"MD5 Checksum: %s\n", [md5sum], csv=False, filter=False)
if self.custom_verbose_format and self.custom_verbose_args: if self.custom_verbose_format and self.custom_verbose_args:
self._fprint(self.custom_verbose_format, self.custom_verbose_args, csv=False, filter=False) self._fprint(
self.custom_verbose_format, self.custom_verbose_args, csv=False, filter=False)
self._fprint("%s", "\n", csv=False, filter=False) self._fprint("%s", "\n", csv=False, filter=False)
self._fprint(self.header_format, args, filter=False) self._fprint(self.header_format, args, filter=False)
self._fprint("%s", ["-" * self.HEADER_WIDTH + "\n"], csv=False, filter=False) self._fprint(
"%s", ["-" * self.HEADER_WIDTH + "\n"], csv=False, filter=False)
def result(self, *args): def result(self, *args):
# Convert to list for item assignment # Convert to list for item assignment
args = list(args) args = list(args)
# Replace multiple spaces with single spaces. This is to prevent accidentally putting # Replace multiple spaces with single spaces. This is to prevent accidentally putting
# four spaces in the description string, which would break auto-formatting. # four spaces in the description string, which would break
# auto-formatting.
for i in range(len(args)): for i in range(len(args)):
if isinstance(args[i], str): if isinstance(args[i], str):
while " " in args[i]: while " " in args[i]:
args[i] = args[i].replace(" " , " ") args[i] = args[i].replace(" ", " ")
self._fprint(self.result_format, tuple(args)) self._fprint(self.result_format, tuple(args))
...@@ -177,13 +185,15 @@ class Display(object): ...@@ -177,13 +185,15 @@ class Display(object):
offset = 0 offset = 0
self.string_parts = [] self.string_parts = []
# Split the line into an array of columns, e.g., ['0', '0x00000000', 'Some description here'] # Split the line into an array of columns, e.g., ['0', '0x00000000',
line_columns = line.split(None, self.num_columns-1) # 'Some description here']
line_columns = line.split(None, self.num_columns - 1)
if line_columns: if line_columns:
# Find where the start of the last column (description) starts in the line of text. # Find where the start of the last column (description) starts in the line of text.
# All line wraps need to be aligned to this offset. # All line wraps need to be aligned to this offset.
offset = line.rfind(line_columns[-1]) offset = line.rfind(line_columns[-1])
# The delimiter will be a newline followed by spaces padding out the line wrap to the alignment offset. # The delimiter will be a newline followed by spaces padding out
# the line wrap to the alignment offset.
delim += ' ' * offset delim += ' ' * offset
if line_columns and self.fit_to_screen and len(line) > self.SCREEN_WIDTH: if line_columns and self.fit_to_screen and len(line) > self.SCREEN_WIDTH:
...@@ -194,19 +204,25 @@ class Display(object): ...@@ -194,19 +204,25 @@ class Display(object):
# Loop to split up line into multiple max_line_wrap_length pieces # Loop to split up line into multiple max_line_wrap_length pieces
while len(line[offset:]) > max_line_wrap_length: while len(line[offset:]) > max_line_wrap_length:
# Find the nearest space to wrap the line at (so we don't split a word across two lines) # Find the nearest space to wrap the line at (so we don't split
split_offset = line[offset:offset+max_line_wrap_length].rfind(' ') # a word across two lines)
# If there were no good places to split the line, just truncate it at max_line_wrap_length split_offset = line[
offset:offset + max_line_wrap_length].rfind(' ')
# If there were no good places to split the line, just truncate
# it at max_line_wrap_length
if split_offset < 1: if split_offset < 1:
split_offset = max_line_wrap_length split_offset = max_line_wrap_length
self._append_to_data_parts(line, offset, offset+split_offset) self._append_to_data_parts(line, offset, offset + split_offset)
offset += split_offset offset += split_offset
# Add any remaining data (guarunteed to be max_line_wrap_length long or shorter) to self.string_parts # Add any remaining data (guarunteed to be max_line_wrap_length
self._append_to_data_parts(line, offset, offset+len(line[offset:])) # long or shorter) to self.string_parts
self._append_to_data_parts(
line, offset, offset + len(line[offset:]))
# Append self.string_parts to formatted_line; each part seperated by delim # Append self.string_parts to formatted_line; each part seperated
# by delim
formatted_line += delim.join(self.string_parts) formatted_line += delim.join(self.string_parts)
else: else:
formatted_line = line formatted_line = line
...@@ -228,10 +244,10 @@ class Display(object): ...@@ -228,10 +244,10 @@ class Display(object):
import termios import termios
# Get the terminal window width # Get the terminal window width
hw = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234')) hw = struct.unpack(
'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234'))
self.SCREEN_WIDTH = self.HEADER_WIDTH = hw[1] self.SCREEN_WIDTH = self.HEADER_WIDTH = hw[1]
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
except Exception: except Exception:
pass pass
class ParserException(Exception): class ParserException(Exception):
''' '''
Exception thrown specifically for signature file parsing errors. Exception thrown specifically for signature file parsing errors.
''' '''
pass pass
class ModuleException(Exception): class ModuleException(Exception):
''' '''
Module exception class. Module exception class.
Nothing special here except the name. Nothing special here except the name.
''' '''
pass pass
class IgnoreFileException(Exception): class IgnoreFileException(Exception):
''' '''
Special exception class used by the load_file plugin method Special exception class used by the load_file plugin method
to indicate that the file that we are attempting to load to indicate that the file that we are attempting to load
......
...@@ -4,11 +4,14 @@ import io ...@@ -4,11 +4,14 @@ import io
import os import os
import logging import logging
class ShutUpHashlib(logging.Filter): class ShutUpHashlib(logging.Filter):
''' '''
This is used to suppress hashlib exception messages This is used to suppress hashlib exception messages
if using the Python interpreter bundled with IDA. if using the Python interpreter bundled with IDA.
''' '''
def filter(self, record): def filter(self, record):
return not record.getMessage().startswith("code for hash") return not record.getMessage().startswith("code for hash")
...@@ -16,14 +19,16 @@ try: ...@@ -16,14 +19,16 @@ try:
import idc import idc
import idaapi import idaapi
LOADED_IN_IDA = True LOADED_IN_IDA = True
logger = logging.getLogger() logger = logging.getLogger()
logger.addFilter(ShutUpHashlib()) logger.addFilter(ShutUpHashlib())
except ImportError: except ImportError:
LOADED_IN_IDA = False LOADED_IN_IDA = False
def start_address(): def start_address():
return idaapi.get_first_seg().startEA return idaapi.get_first_seg().startEA
def end_address(): def end_address():
last_ea = idc.BADADDR last_ea = idc.BADADDR
seg = idaapi.get_first_seg() seg = idaapi.get_first_seg()
...@@ -34,7 +39,9 @@ def end_address(): ...@@ -34,7 +39,9 @@ def end_address():
return last_ea return last_ea
class IDBFileIO(io.FileIO): class IDBFileIO(io.FileIO):
''' '''
A custom class to override binwalk.core.common.Blockfile in order to A custom class to override binwalk.core.common.Blockfile in order to
read data directly out of the IDB, rather than reading from the original read data directly out of the IDB, rather than reading from the original
...@@ -58,7 +65,7 @@ class IDBFileIO(io.FileIO): ...@@ -58,7 +65,7 @@ class IDBFileIO(io.FileIO):
if self.args.size == 0: if self.args.size == 0:
self.args.size = end_address() self.args.size = end_address()
if self.args.offset == 0: if self.args.offset == 0:
self.args.offset = start_address() self.args.offset = start_address()
elif self.args.offset < 0: elif self.args.offset < 0:
...@@ -89,7 +96,7 @@ class IDBFileIO(io.FileIO): ...@@ -89,7 +96,7 @@ class IDBFileIO(io.FileIO):
if filler_count: if filler_count:
data += "\x00" * filler_count data += "\x00" * filler_count
filler_count = 0 filler_count = 0
if (self.idb_pos + n) > segment.endEA: if (self.idb_pos + n) > segment.endEA:
read_count = segment.endEA - self.idb_pos read_count = segment.endEA - self.idb_pos
else: else:
...@@ -98,7 +105,8 @@ class IDBFileIO(io.FileIO): ...@@ -98,7 +105,8 @@ class IDBFileIO(io.FileIO):
try: try:
data += idc.GetManyBytes(self.idb_pos, read_count) data += idc.GetManyBytes(self.idb_pos, read_count)
except TypeError as e: except TypeError as e:
# This happens when trying to read from uninitialized segments (e.g., .bss) # This happens when trying to read from uninitialized
# segments (e.g., .bss)
data += "\x00" * read_count data += "\x00" * read_count
n -= read_count n -= read_count
...@@ -116,7 +124,7 @@ class IDBFileIO(io.FileIO): ...@@ -116,7 +124,7 @@ class IDBFileIO(io.FileIO):
else: else:
# Don't actually write anything to the IDB, as, IMHO, # Don't actually write anything to the IDB, as, IMHO,
# a binwalk plugin should never do this. But return the # a binwalk plugin should never do this. But return the
# number of bytes we were requested to write so that # number of bytes we were requested to write so that
# any callers are happy. # any callers are happy.
return len(data) return len(data)
...@@ -136,4 +144,3 @@ class IDBFileIO(io.FileIO): ...@@ -136,4 +144,3 @@ class IDBFileIO(io.FileIO):
return super(IDBFileIO, self).tell() return super(IDBFileIO, self).tell()
else: else:
return self.idb_pos return self.idb_pos
...@@ -9,12 +9,15 @@ import binwalk.core.settings ...@@ -9,12 +9,15 @@ import binwalk.core.settings
from binwalk.core.compat import * from binwalk.core.compat import *
from binwalk.core.exceptions import IgnoreFileException from binwalk.core.exceptions import IgnoreFileException
class Plugin(object): class Plugin(object):
''' '''
Class from which all plugin classes are based. Class from which all plugin classes are based.
''' '''
# A list of case-sensitive module names for which this plugin should be loaded. # A list of case-sensitive module names for which this plugin should be loaded.
# If no module names are specified, the plugin will be loaded for all modules. # If no module names are specified, the plugin will be loaded for all
# modules.
MODULES = [] MODULES = []
def __init__(self, module): def __init__(self, module):
...@@ -64,7 +67,9 @@ class Plugin(object): ...@@ -64,7 +67,9 @@ class Plugin(object):
''' '''
pass pass
class Plugins(object): class Plugins(object):
''' '''
Class to load and call plugin callback functions, handled automatically by Binwalk.scan / Binwalk.single_scan. Class to load and call plugin callback functions, handled automatically by Binwalk.scan / Binwalk.single_scan.
An instance of this class is available during a scan via the Binwalk.plugins object. An instance of this class is available during a scan via the Binwalk.plugins object.
...@@ -114,7 +119,8 @@ class Plugins(object): ...@@ -114,7 +119,8 @@ class Plugins(object):
except IgnoreFileException as e: except IgnoreFileException as e:
raise e raise e
except Exception as e: except Exception as e:
binwalk.core.common.warning("%s.%s failed: %s" % (callback.__module__, callback.__name__, e)) binwalk.core.common.warning(
"%s.%s failed: %s" % (callback.__module__, callback.__name__, e))
def _find_plugin_class(self, plugin): def _find_plugin_class(self, plugin):
for (name, klass) in inspect.getmembers(plugin, inspect.isclass): for (name, klass) in inspect.getmembers(plugin, inspect.isclass):
...@@ -145,17 +151,17 @@ class Plugins(object): ...@@ -145,17 +151,17 @@ class Plugins(object):
''' '''
plugins = { plugins = {
'user' : { 'user': {
'modules' : [], 'modules': [],
'descriptions' : {}, 'descriptions': {},
'enabled' : {}, 'enabled': {},
'path' : None, 'path': None,
}, },
'system' : { 'system': {
'modules' : [], 'modules': [],
'descriptions' : {}, 'descriptions': {},
'enabled' : {}, 'enabled': {},
'path' : None, 'path': None,
} }
} }
...@@ -171,7 +177,8 @@ class Plugins(object): ...@@ -171,7 +177,8 @@ class Plugins(object):
module = file_name[:-len(self.MODULE_EXTENSION)] module = file_name[:-len(self.MODULE_EXTENSION)]
try: try:
plugin = imp.load_source(module, os.path.join(plugins[key]['path'], file_name)) plugin = imp.load_source(
module, os.path.join(plugins[key]['path'], file_name))
plugin_class = self._find_plugin_class(plugin) plugin_class = self._find_plugin_class(plugin)
plugins[key]['enabled'][module] = True plugins[key]['enabled'][module] = True
...@@ -179,15 +186,18 @@ class Plugins(object): ...@@ -179,15 +186,18 @@ class Plugins(object):
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
except Exception as e: except Exception as e:
binwalk.core.common.warning("Error loading plugin '%s': %s" % (file_name, str(e))) binwalk.core.common.warning(
"Error loading plugin '%s': %s" % (file_name, str(e)))
plugins[key]['enabled'][module] = False plugins[key]['enabled'][module] = False
try: try:
plugins[key]['descriptions'][module] = plugin_class.__doc__.strip().split('\n')[0] plugins[key]['descriptions'][
module] = plugin_class.__doc__.strip().split('\n')[0]
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
except Exception as e: except Exception as e:
plugins[key]['descriptions'][module] = 'No description' plugins[key]['descriptions'][
module] = 'No description'
return plugins return plugins
def load_plugins(self): def load_plugins(self):
...@@ -198,7 +208,8 @@ class Plugins(object): ...@@ -198,7 +208,8 @@ class Plugins(object):
def _load_plugin_modules(self, plugins): def _load_plugin_modules(self, plugins):
for module in plugins['modules']: for module in plugins['modules']:
try: try:
file_path = os.path.join(plugins['path'], module + self.MODULE_EXTENSION) file_path = os.path.join(
plugins['path'], module + self.MODULE_EXTENSION)
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
except Exception: except Exception:
...@@ -220,7 +231,8 @@ class Plugins(object): ...@@ -220,7 +231,8 @@ class Plugins(object):
pass pass
try: try:
self.load_file.append(getattr(class_instance, self.LOADFILE)) self.load_file.append(
getattr(class_instance, self.LOADFILE))
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
except Exception as e: except Exception as e:
...@@ -234,7 +246,8 @@ class Plugins(object): ...@@ -234,7 +246,8 @@ class Plugins(object):
pass pass
try: try:
self.post_scan.append(getattr(class_instance, self.POSTSCAN)) self.post_scan.append(
getattr(class_instance, self.POSTSCAN))
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
except Exception as e: except Exception as e:
...@@ -250,7 +263,8 @@ class Plugins(object): ...@@ -250,7 +263,8 @@ class Plugins(object):
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
except Exception as e: except Exception as e:
binwalk.core.common.warning("Failed to load plugin module '%s': %s" % (module, str(e))) binwalk.core.common.warning(
"Failed to load plugin module '%s': %s" % (module, str(e)))
def pre_scan_callbacks(self, obj): def pre_scan_callbacks(self, obj):
return self._call_plugins(self.pre_scan) return self._call_plugins(self.pre_scan)
...@@ -266,4 +280,3 @@ class Plugins(object): ...@@ -266,4 +280,3 @@ class Plugins(object):
def scan_callbacks(self, obj): def scan_callbacks(self, obj):
return self._call_plugins(self.scan, obj) return self._call_plugins(self.scan, obj)
# Code for loading and accessing binwalk settings (extraction rules, signature files, etc). # Code for loading and accessing binwalk settings (extraction rules,
# signature files, etc).
import os import os
import binwalk.core.common as common import binwalk.core.common as common
from binwalk.core.compat import * from binwalk.core.compat import *
class Settings: class Settings:
''' '''
Binwalk settings class, used for accessing user and system file paths and general configuration settings. Binwalk settings class, used for accessing user and system file paths and general configuration settings.
...@@ -41,18 +44,26 @@ class Settings: ...@@ -41,18 +44,26 @@ class Settings:
self.system_dir = common.get_module_path() self.system_dir = common.get_module_path()
# Build the paths to all user-specific files # Build the paths to all user-specific files
self.user = common.GenericContainer(binarch=self._user_path(self.BINWALK_MAGIC_DIR, self.BINARCH_MAGIC_FILE), self.user = common.GenericContainer(
magic=self._magic_signature_files(user_only=True), binarch=self._user_path(
extract=self._user_path(self.BINWALK_CONFIG_DIR, self.EXTRACT_FILE), self.BINWALK_MAGIC_DIR, self.BINARCH_MAGIC_FILE),
modules=self._user_path(self.BINWALK_MODULES_DIR), magic=self._magic_signature_files(
plugins=self._user_path(self.BINWALK_PLUGINS_DIR)) user_only=True),
extract=self._user_path(
self.BINWALK_CONFIG_DIR, self.EXTRACT_FILE),
modules=self._user_path(
self.BINWALK_MODULES_DIR),
plugins=self._user_path(self.BINWALK_PLUGINS_DIR))
# Build the paths to all system-wide files # Build the paths to all system-wide files
self.system = common.GenericContainer(binarch=self._system_path(self.BINWALK_MAGIC_DIR, self.BINARCH_MAGIC_FILE), self.system = common.GenericContainer(
magic=self._magic_signature_files(system_only=True), binarch=self._system_path(
extract=self._system_path(self.BINWALK_CONFIG_DIR, self.EXTRACT_FILE), self.BINWALK_MAGIC_DIR, self.BINARCH_MAGIC_FILE),
plugins=self._system_path(self.BINWALK_PLUGINS_DIR)) magic=self._magic_signature_files(
system_only=True),
extract=self._system_path(
self.BINWALK_CONFIG_DIR, self.EXTRACT_FILE),
plugins=self._system_path(self.BINWALK_PLUGINS_DIR))
def _magic_signature_files(self, system_only=False, user_only=False): def _magic_signature_files(self, system_only=False, user_only=False):
''' '''
...@@ -64,15 +75,18 @@ class Settings: ...@@ -64,15 +75,18 @@ class Settings:
Returns a list of user/system magic signature files. Returns a list of user/system magic signature files.
''' '''
files = [] files = []
user_binarch = self._user_path(self.BINWALK_MAGIC_DIR, self.BINARCH_MAGIC_FILE) user_binarch = self._user_path(
system_binarch = self._system_path(self.BINWALK_MAGIC_DIR, self.BINARCH_MAGIC_FILE) self.BINWALK_MAGIC_DIR, self.BINARCH_MAGIC_FILE)
system_binarch = self._system_path(
self.BINWALK_MAGIC_DIR, self.BINARCH_MAGIC_FILE)
def list_files(dir_path): def list_files(dir_path):
# Ignore hidden dotfiles. # Ignore hidden dotfiles.
return [os.path.join(dir_path, x) for x in os.listdir(dir_path) if not x.startswith('.')] return [os.path.join(dir_path, x) for x in os.listdir(dir_path) if not x.startswith('.')]
if not system_only: if not system_only:
user_dir = os.path.join(self.user_dir, self.BINWALK_USER_DIR, self.BINWALK_MAGIC_DIR) user_dir = os.path.join(
self.user_dir, self.BINWALK_USER_DIR, self.BINWALK_MAGIC_DIR)
files += list_files(user_dir) files += list_files(user_dir)
if not user_only: if not user_only:
system_dir = os.path.join(self.system_dir, self.BINWALK_MAGIC_DIR) system_dir = os.path.join(self.system_dir, self.BINWALK_MAGIC_DIR)
...@@ -175,7 +189,7 @@ class Settings: ...@@ -175,7 +189,7 @@ class Settings:
''' '''
try: try:
return self._file_path(os.path.join(self.user_dir, self.BINWALK_USER_DIR, subdir), basename) return self._file_path(os.path.join(self.user_dir, self.BINWALK_USER_DIR, subdir), basename)
except KeyboardInterrupt as e : except KeyboardInterrupt as e:
raise e raise e
except Exception: except Exception:
return None return None
...@@ -191,8 +205,7 @@ class Settings: ...@@ -191,8 +205,7 @@ class Settings:
''' '''
try: try:
return self._file_path(os.path.join(self.system_dir, subdir), basename) return self._file_path(os.path.join(self.system_dir, subdir), basename)
except KeyboardInterrupt as e : except KeyboardInterrupt as e:
raise e raise e
except Exception: except Exception:
return None return None
...@@ -13,6 +13,7 @@ try: ...@@ -13,6 +13,7 @@ try:
except ImportError: except ImportError:
import socketserver as SocketServer import socketserver as SocketServer
class StatusRequestHandler(SocketServer.BaseRequestHandler): class StatusRequestHandler(SocketServer.BaseRequestHandler):
def handle(self): def handle(self):
...@@ -27,47 +28,57 @@ class StatusRequestHandler(SocketServer.BaseRequestHandler): ...@@ -27,47 +28,57 @@ class StatusRequestHandler(SocketServer.BaseRequestHandler):
time.sleep(0.1) time.sleep(0.1)
try: try:
self.request.send(binwalk.core.compat.str2bytes('\b' * last_status_message_len)) self.request.send(
self.request.send(binwalk.core.compat.str2bytes(' ' * last_status_message_len)) binwalk.core.compat.str2bytes('\b' * last_status_message_len))
self.request.send(binwalk.core.compat.str2bytes('\b' * last_status_message_len)) self.request.send(
binwalk.core.compat.str2bytes(' ' * last_status_message_len))
self.request.send(
binwalk.core.compat.str2bytes('\b' * last_status_message_len))
if self.server.binwalk.status.shutdown: if self.server.binwalk.status.shutdown:
self.server.binwalk.status.finished = True self.server.binwalk.status.finished = True
break break
if self.server.binwalk.status.total != 0: if self.server.binwalk.status.total != 0:
percentage = ((float(self.server.binwalk.status.completed) / float(self.server.binwalk.status.total)) * 100) percentage = (
status_message = message_format % (self.server.binwalk.status.fp.path, (float(self.server.binwalk.status.completed) / float(self.server.binwalk.status.total)) * 100)
percentage, status_message = message_format % (
self.server.binwalk.status.completed, self.server.binwalk.status.fp.path,
self.server.binwalk.status.total) percentage,
self.server.binwalk.status.completed,
self.server.binwalk.status.total)
elif not message_sent: elif not message_sent:
status_message = "No status information available at this time!" status_message = "No status information available at this time!"
else: else:
continue continue
last_status_message_len = len(status_message) last_status_message_len = len(status_message)
self.request.send(binwalk.core.compat.str2bytes(status_message)) self.request.send(
binwalk.core.compat.str2bytes(status_message))
message_sent = True message_sent = True
except IOError as e: except IOError as e:
if e.errno == errno.EPIPE: if e.errno == errno.EPIPE:
break break
except Exception as e: except Exception as e:
binwalk.core.common.debug('StatusRequestHandler exception: ' + str(e) + '\n') binwalk.core.common.debug(
'StatusRequestHandler exception: ' + str(e) + '\n')
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
raise e raise e
self.server.binwalk.status.running = False self.server.binwalk.status.running = False
return return
class ThreadedStatusServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): class ThreadedStatusServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True daemon_threads = True
allow_reuse_address = True allow_reuse_address = True
class StatusServer(object): class StatusServer(object):
def __init__(self, port, binwalk): def __init__(self, port, binwalk):
self.server = ThreadedStatusServer(('127.0.0.1', port), StatusRequestHandler) self.server = ThreadedStatusServer(
('127.0.0.1', port), StatusRequestHandler)
self.server.binwalk = binwalk self.server.binwalk = binwalk
t = threading.Thread(target=self.server.serve_forever) t = threading.Thread(target=self.server.serve_forever)
......
...@@ -17,6 +17,6 @@ from binwalk.modules.extractor import Extractor ...@@ -17,6 +17,6 @@ from binwalk.modules.extractor import Extractor
from binwalk.modules.entropy import Entropy from binwalk.modules.entropy import Entropy
# These are depreciated. # These are depreciated.
#from binwalk.modules.binvis import Plotter # from binwalk.modules.binvis import Plotter
#from binwalk.modules.hashmatch import HashMatch # from binwalk.modules.hashmatch import HashMatch
#from binwalk.modules.heuristics import HeuristicCompressionAnalyzer # from binwalk.modules.heuristics import HeuristicCompressionAnalyzer
...@@ -5,7 +5,9 @@ from binwalk.core.compat import * ...@@ -5,7 +5,9 @@ from binwalk.core.compat import *
from binwalk.core.common import BlockFile from binwalk.core.common import BlockFile
from binwalk.core.module import Module, Option, Kwarg from binwalk.core.module import Module, Option, Kwarg
class Plotter(Module): class Plotter(Module):
''' '''
Base class for visualizing binaries in Qt. Base class for visualizing binaries in Qt.
Other plotter classes are derived from this. Other plotter classes are derived from this.
...@@ -17,33 +19,34 @@ class Plotter(Module): ...@@ -17,33 +19,34 @@ class Plotter(Module):
TITLE = "Binary Visualization" TITLE = "Binary Visualization"
CLI = [ CLI = [
Option(short='3', Option(short='3',
long='3D', long='3D',
kwargs={'axis' : 3, 'enabled' : True}, kwargs={'axis': 3, 'enabled': True},
description='Generate a 3D binary visualization'), description='Generate a 3D binary visualization'),
Option(short='2', Option(short='2',
long='2D', long='2D',
kwargs={'axis' : 2, 'enabled' : True}, kwargs={'axis': 2, 'enabled': True},
description='Project data points onto 3D cube walls only'), description='Project data points onto 3D cube walls only'),
Option(short='V', Option(short='V',
long='points', long='points',
type=int, type=int,
kwargs={'max_points' : 0}, kwargs={'max_points': 0},
description='Set the maximum number of plotted data points'), description='Set the maximum number of plotted data points'),
# Option(short='V', # Option(short='V',
# long='grids', # long='grids',
# kwargs={'show_grids' : True}, # kwargs={'show_grids' : True},
# description='Display the x-y-z grids in the resulting plot'), # description='Display the x-y-z grids in the resulting plot'),
] ]
KWARGS = [ KWARGS = [
Kwarg(name='axis', default=3), Kwarg(name='axis', default=3),
Kwarg(name='max_points', default=0), Kwarg(name='max_points', default=0),
Kwarg(name='show_grids', default=False), Kwarg(name='show_grids', default=False),
Kwarg(name='enabled', default=False), Kwarg(name='enabled', default=False),
] ]
# There isn't really any useful data to print to console. Disable header and result output. # There isn't really any useful data to print to console. Disable header
# and result output.
HEADER = None HEADER = None
RESULT = None RESULT = None
...@@ -64,7 +67,8 @@ class Plotter(Module): ...@@ -64,7 +67,8 @@ class Plotter(Module):
self.MAX_PLOT_POINTS = self.MAX_3D_PLOT_POINTS self.MAX_PLOT_POINTS = self.MAX_3D_PLOT_POINTS
self._generate_data_point = self._generate_3d_data_point self._generate_data_point = self._generate_3d_data_point
else: else:
raise Exception("Invalid Plotter axis specified: %d. Must be one of: [2,3]" % self.axis) raise Exception(
"Invalid Plotter axis specified: %d. Must be one of: [2,3]" % self.axis)
if not self.max_points: if not self.max_points:
self.max_points = self.MAX_PLOT_POINTS self.max_points = self.MAX_PLOT_POINTS
...@@ -106,7 +110,8 @@ class Plotter(Module): ...@@ -106,7 +110,8 @@ class Plotter(Module):
# Go through every data point and how many times that point occurs # Go through every data point and how many times that point occurs
for (point, count) in iterator(data_points): for (point, count) in iterator(data_points):
# For each data point, compare it to each remaining weight value # For each data point, compare it to each remaining weight
# value
for w in get_keys(weightings): for w in get_keys(weightings):
# If the number of times this data point occurred is >= the weight value, # If the number of times this data point occurred is >= the weight value,
...@@ -119,18 +124,21 @@ class Plotter(Module): ...@@ -119,18 +124,21 @@ class Plotter(Module):
else: else:
break break
# Throw out weight values that exceed the maximum number of data points # Throw out weight values that exceed the maximum number of
# data points
if weightings[w] > self.max_points: if weightings[w] > self.max_points:
del weightings[w] del weightings[w]
# If there's only one weight value left, no sense in continuing the loop... # If there's only one weight value left, no sense in continuing
# the loop...
if len(weightings) == 1: if len(weightings) == 1:
break break
# The least weighted value is our minimum weight # The least weighted value is our minimum weight
min_weight = min(weightings) min_weight = min(weightings)
# Get rid of all data points that occur less frequently than our minimum weight # Get rid of all data points that occur less frequently than our
# minimum weight
for point in get_keys(data_points): for point in get_keys(data_points):
if data_points[point] < min_weight: if data_points[point] < min_weight:
del data_points[point] del data_points[point]
...@@ -138,7 +146,8 @@ class Plotter(Module): ...@@ -138,7 +146,8 @@ class Plotter(Module):
for point in sorted(data_points, key=data_points.get, reverse=True): for point in sorted(data_points, key=data_points.get, reverse=True):
plot_points[point] = data_points[point] plot_points[point] = data_points[point]
# Register this as a result in case future modules need access to the raw point information, # Register this as a result in case future modules need access to the raw point information,
# but mark plot as False to prevent the entropy module from attempting to overlay this data on its graph. # but mark plot as False to prevent the entropy module from
# attempting to overlay this data on its graph.
self.result(point=point, plot=False) self.result(point=point, plot=False)
total += 1 total += 1
if total >= self.max_points: if total >= self.max_points:
...@@ -154,7 +163,7 @@ class Plotter(Module): ...@@ -154,7 +163,7 @@ class Plotter(Module):
Returns a data point tuple. Returns a data point tuple.
''' '''
return (0,0,0) return (0, 0, 0)
def _generate_data_points(self, fp): def _generate_data_points(self, fp):
''' '''
...@@ -178,8 +187,8 @@ class Plotter(Module): ...@@ -178,8 +187,8 @@ class Plotter(Module):
break break
i = 0 i = 0
while (i+(self.axis-1)) < dlen: while (i + (self.axis - 1)) < dlen:
point = self._generate_data_point(data[i:i+self.axis]) point = self._generate_data_point(data[i:i + self.axis])
if has_key(data_points, point): if has_key(data_points, point):
data_points[point] += 1 data_points[point] += 1
else: else:
...@@ -208,7 +217,8 @@ class Plotter(Module): ...@@ -208,7 +217,8 @@ class Plotter(Module):
frequency_percentage = (weight / nitems) frequency_percentage = (weight / nitems)
# Give points that occur more frequently a brighter color and larger point size. # Give points that occur more frequently a brighter color and larger point size.
# Frequency is determined as a percentage of total unique data points. # Frequency is determined as a percentage of total unique data
# points.
if frequency_percentage > .010: if frequency_percentage > .010:
size[i] = .20 size[i] = .20
r = 1.0 r = 1.0
...@@ -227,7 +237,8 @@ class Plotter(Module): ...@@ -227,7 +237,8 @@ class Plotter(Module):
i += 1 i += 1
scatter_plot = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False) scatter_plot = gl.GLScatterPlotItem(
pos=pos, size=size, color=color, pxMode=False)
scatter_plot.translate(-127.5, -127.5, -127.5) scatter_plot.translate(-127.5, -127.5, -127.5)
return scatter_plot return scatter_plot
...@@ -258,12 +269,14 @@ class Plotter(Module): ...@@ -258,12 +269,14 @@ class Plotter(Module):
for fd in iter(self.next_file, None): for fd in iter(self.next_file, None):
data_points = self._generate_data_points(fd) data_points = self._generate_data_points(fd)
self._print("Generating plot points from %d data points" % len(data_points)) self._print("Generating plot points from %d data points" %
len(data_points))
self.plot_points = self._generate_plot_points(data_points) self.plot_points = self._generate_plot_points(data_points)
del data_points del data_points
self._print("Generating graph from %d plot points" % len(self.plot_points)) self._print("Generating graph from %d plot points" %
len(self.plot_points))
self.window.addItem(self._generate_plot(self.plot_points)) self.window.addItem(self._generate_plot(self.plot_points))
...@@ -307,4 +320,3 @@ class Plotter(Module): ...@@ -307,4 +320,3 @@ class Plotter(Module):
def run(self): def run(self):
self.plot() self.plot()
return True return True
# Module to process general user input options (scan length, starting offset, etc). # Module to process general user input options (scan length, starting
# offset, etc).
import io import io
import os import os
...@@ -12,6 +13,7 @@ import binwalk.core.settings ...@@ -12,6 +13,7 @@ import binwalk.core.settings
from binwalk.core.compat import * from binwalk.core.compat import *
from binwalk.core.module import Module, Option, Kwarg, show_help from binwalk.core.module import Module, Option, Kwarg, show_help
class General(Module): class General(Module):
TITLE = "General" TITLE = "General"
...@@ -23,77 +25,77 @@ class General(Module): ...@@ -23,77 +25,77 @@ class General(Module):
Option(long='length', Option(long='length',
short='l', short='l',
type=int, type=int,
kwargs={'length' : 0}, kwargs={'length': 0},
description='Number of bytes to scan'), description='Number of bytes to scan'),
Option(long='offset', Option(long='offset',
short='o', short='o',
type=int, type=int,
kwargs={'offset' : 0}, kwargs={'offset': 0},
description='Start scan at this file offset'), description='Start scan at this file offset'),
Option(long='base', Option(long='base',
short='O', short='O',
type=int, type=int,
kwargs={'base' : 0}, kwargs={'base': 0},
description='Add a base address to all printed offsets'), description='Add a base address to all printed offsets'),
Option(long='block', Option(long='block',
short='K', short='K',
type=int, type=int,
kwargs={'block' : 0}, kwargs={'block': 0},
description='Set file block size'), description='Set file block size'),
Option(long='swap', Option(long='swap',
short='g', short='g',
type=int, type=int,
kwargs={'swap_size' : 0}, kwargs={'swap_size': 0},
description='Reverse every n bytes before scanning'), description='Reverse every n bytes before scanning'),
Option(long='log', Option(long='log',
short='f', short='f',
type=argparse.FileType, type=argparse.FileType,
kwargs={'log_file' : None}, kwargs={'log_file': None},
description='Log results to file'), description='Log results to file'),
Option(long='csv', Option(long='csv',
short='c', short='c',
kwargs={'csv' : True}, kwargs={'csv': True},
description='Log results to file in CSV format'), description='Log results to file in CSV format'),
Option(long='term', Option(long='term',
short='t', short='t',
kwargs={'format_to_terminal' : True}, kwargs={'format_to_terminal': True},
description='Format output to fit the terminal window'), description='Format output to fit the terminal window'),
Option(long='quiet', Option(long='quiet',
short='q', short='q',
kwargs={'quiet' : True}, kwargs={'quiet': True},
description='Suppress output to stdout'), description='Suppress output to stdout'),
Option(long='verbose', Option(long='verbose',
short='v', short='v',
kwargs={'verbose' : True}, kwargs={'verbose': True},
description='Enable verbose output'), description='Enable verbose output'),
Option(short='h', Option(short='h',
long='help', long='help',
kwargs={'show_help' : True}, kwargs={'show_help': True},
description='Show help output'), description='Show help output'),
Option(short='a', Option(short='a',
long='finclude', long='finclude',
type=str, type=str,
kwargs={'file_name_include_regex' : ""}, kwargs={'file_name_include_regex': ""},
description='Only scan files whose names match this regex'), description='Only scan files whose names match this regex'),
Option(short='p', Option(short='p',
long='fexclude', long='fexclude',
type=str, type=str,
kwargs={'file_name_exclude_regex' : ""}, kwargs={'file_name_exclude_regex': ""},
description='Do not scan files whose names match this regex'), description='Do not scan files whose names match this regex'),
Option(short='s', Option(short='s',
long='status', long='status',
type=int, type=int,
kwargs={'status_server_port' : 0}, kwargs={'status_server_port': 0},
description='Enable the status server on the specified port'), description='Enable the status server on the specified port'),
Option(long=None, Option(long=None,
short=None, short=None,
type=binwalk.core.common.BlockFile, type=binwalk.core.common.BlockFile,
kwargs={'files' : []}), kwargs={'files': []}),
# Hidden, API-only arguments # Hidden, API-only arguments
Option(long="string", Option(long="string",
hidden=True, hidden=True,
kwargs={'subclass' : binwalk.core.common.StringFile}), kwargs={'subclass': binwalk.core.common.StringFile}),
] ]
KWARGS = [ KWARGS = [
...@@ -132,9 +134,11 @@ class General(Module): ...@@ -132,9 +134,11 @@ class General(Module):
# Build file name filter regex rules # Build file name filter regex rules
if self.file_name_include_regex: if self.file_name_include_regex:
self.file_name_include_regex = re.compile(self.file_name_include_regex) self.file_name_include_regex = re.compile(
self.file_name_include_regex)
if self.file_name_exclude_regex: if self.file_name_exclude_regex:
self.file_name_exclude_regex = re.compile(self.file_name_exclude_regex) self.file_name_exclude_regex = re.compile(
self.file_name_exclude_regex)
self.settings = binwalk.core.settings.Settings() self.settings = binwalk.core.settings.Settings()
self.display = binwalk.core.display.Display(log=self.log_file, self.display = binwalk.core.display.Display(log=self.log_file,
...@@ -160,7 +164,8 @@ class General(Module): ...@@ -160,7 +164,8 @@ class General(Module):
Must be called after self._test_target_files so that self.target_files is properly set. Must be called after self._test_target_files so that self.target_files is properly set.
''' '''
# If more than one target file was specified, enable verbose mode; else, there is # If more than one target file was specified, enable verbose mode; else, there is
# nothing in some outputs to indicate which scan corresponds to which file. # nothing in some outputs to indicate which scan corresponds to which
# file.
if len(self.target_files) > 1 and not self.verbose: if len(self.target_files) > 1 and not self.verbose:
self.verbose = True self.verbose = True
...@@ -217,4 +222,3 @@ class General(Module): ...@@ -217,4 +222,3 @@ class General(Module):
raise e raise e
except Exception as e: except Exception as e:
self.error(description="Cannot open file : %s" % str(e)) self.error(description="Cannot open file : %s" % str(e))
# Routines to perform Chi Squared tests. # Routines to perform Chi Squared tests.
# Used for fingerprinting unknown areas of high entropy (e.g., is this block of high entropy data compressed or encrypted?). # Used for fingerprinting unknown areas of high entropy (e.g., is this block of high entropy data compressed or encrypted?).
# Inspired by people who actually know what they're doing: http://www.fourmilab.ch/random/ # Inspired by people who actually know what they're doing:
# http://www.fourmilab.ch/random/
import math import math
from binwalk.core.compat import * from binwalk.core.compat import *
from binwalk.core.module import Module, Kwarg, Option, Dependency from binwalk.core.module import Module, Kwarg, Option, Dependency
class ChiSquare(object): class ChiSquare(object):
''' '''
Performs a Chi Squared test against the provided data. Performs a Chi Squared test against the provided data.
''' '''
...@@ -20,12 +23,13 @@ class ChiSquare(object): ...@@ -20,12 +23,13 @@ class ChiSquare(object):
Returns None. Returns None.
''' '''
self.bytes = {} self.bytes = {}
self.freedom = self.IDEAL - 1 self.freedom = self.IDEAL - 1
# Initialize the self.bytes dictionary with keys for all possible byte values (0 - 255) # Initialize the self.bytes dictionary with keys for all possible byte
# values (0 - 255)
for i in range(0, int(self.IDEAL)): for i in range(0, int(self.IDEAL)):
self.bytes[chr(i)] = 0 self.bytes[chr(i)] = 0
self.reset() self.reset()
def reset(self): def reset(self):
...@@ -33,7 +37,7 @@ class ChiSquare(object): ...@@ -33,7 +37,7 @@ class ChiSquare(object):
self.byte_count = 0 self.byte_count = 0
for key in self.bytes.keys(): for key in self.bytes.keys():
self.bytes[key] = 0 self.bytes[key] = 0
def update(self, data): def update(self, data):
''' '''
...@@ -59,20 +63,23 @@ class ChiSquare(object): ...@@ -59,20 +63,23 @@ class ChiSquare(object):
if expected: if expected:
for byte in self.bytes.values(): for byte in self.bytes.values():
self.xc2 += ((byte - expected) ** 2 ) / expected self.xc2 += ((byte - expected) ** 2) / expected
return self.xc2 return self.xc2
class EntropyBlock(object): class EntropyBlock(object):
def __init__(self, **kwargs): def __init__(self, **kwargs):
self.start = None self.start = None
self.end = None self.end = None
self.length = None self.length = None
for (k,v) in iterator(kwargs): for (k, v) in iterator(kwargs):
setattr(self, k, v) setattr(self, k, v)
class HeuristicCompressionAnalyzer(Module): class HeuristicCompressionAnalyzer(Module):
''' '''
Performs analysis and attempts to interpret the results. Performs analysis and attempts to interpret the results.
''' '''
...@@ -87,26 +94,27 @@ class HeuristicCompressionAnalyzer(Module): ...@@ -87,26 +94,27 @@ class HeuristicCompressionAnalyzer(Module):
TITLE = "Heuristic Compression" TITLE = "Heuristic Compression"
DEPENDS = [ DEPENDS = [
Dependency(name='Entropy', Dependency(name='Entropy',
attribute='entropy', attribute='entropy',
kwargs={'enabled' : True, 'do_plot' : False, 'display_results' : False, 'block_size' : ENTROPY_BLOCK_SIZE}), kwargs={
'enabled': True, 'do_plot': False, 'display_results': False, 'block_size': ENTROPY_BLOCK_SIZE}),
] ]
CLI = [ CLI = [
Option(short='H', Option(short='H',
long='heuristic', long='heuristic',
kwargs={'enabled' : True}, kwargs={'enabled': True},
description='Heuristically classify high entropy data'), description='Heuristically classify high entropy data'),
Option(short='a', Option(short='a',
long='trigger', long='trigger',
kwargs={'trigger_level' : 0}, kwargs={'trigger_level': 0},
type=float, type=float,
description='Set the entropy trigger level (0.0 - 1.0, default: %.2f)' % ENTROPY_TRIGGER), description='Set the entropy trigger level (0.0 - 1.0, default: %.2f)' % ENTROPY_TRIGGER),
] ]
KWARGS = [ KWARGS = [
Kwarg(name='enabled', default=False), Kwarg(name='enabled', default=False),
Kwarg(name='trigger_level', default=ENTROPY_TRIGGER), Kwarg(name='trigger_level', default=ENTROPY_TRIGGER),
] ]
def init(self): def init(self):
...@@ -130,17 +138,19 @@ class HeuristicCompressionAnalyzer(Module): ...@@ -130,17 +138,19 @@ class HeuristicCompressionAnalyzer(Module):
self.blocks[result.file.name] = [] self.blocks[result.file.name] = []
if result.entropy >= self.trigger_level and (not self.blocks[result.file.name] or self.blocks[result.file.name][-1].end is not None): if result.entropy >= self.trigger_level and (not self.blocks[result.file.name] or self.blocks[result.file.name][-1].end is not None):
self.blocks[result.file.name].append(EntropyBlock(start=result.offset + self.BLOCK_OFFSET)) self.blocks[result.file.name].append(
EntropyBlock(start=result.offset + self.BLOCK_OFFSET))
elif result.entropy < self.trigger_level and self.blocks[result.file.name] and self.blocks[result.file.name][-1].end is None: elif result.entropy < self.trigger_level and self.blocks[result.file.name] and self.blocks[result.file.name][-1].end is None:
self.blocks[result.file.name][-1].end = result.offset - self.BLOCK_OFFSET self.blocks[result.file.name][
-1].end = result.offset - self.BLOCK_OFFSET
def run(self): def run(self):
for fp in iter(self.next_file, None): for fp in iter(self.next_file, None):
if has_key(self.blocks, fp.name): if has_key(self.blocks, fp.name):
self.header() self.header()
for block in self.blocks[fp.name]: for block in self.blocks[fp.name]:
if block.end is None: if block.end is None:
...@@ -173,7 +183,7 @@ class HeuristicCompressionAnalyzer(Module): ...@@ -173,7 +183,7 @@ class HeuristicCompressionAnalyzer(Module):
while j < dlen: while j < dlen:
chi.reset() chi.reset()
data = d[j:j+self.block_size] data = d[j:j + self.block_size]
if len(data) < self.block_size: if len(data) < self.block_size:
break break
...@@ -181,7 +191,7 @@ class HeuristicCompressionAnalyzer(Module): ...@@ -181,7 +191,7 @@ class HeuristicCompressionAnalyzer(Module):
if chi.chisq() >= self.CHI_CUTOFF: if chi.chisq() >= self.CHI_CUTOFF:
num_error += 1 num_error += 1
j += self.block_size j += self.block_size
if (j + i) > block.length: if (j + i) > block.length:
...@@ -194,5 +204,6 @@ class HeuristicCompressionAnalyzer(Module): ...@@ -194,5 +204,6 @@ class HeuristicCompressionAnalyzer(Module):
else: else:
verdict = 'High entropy data, best guess: encrypted' verdict = 'High entropy data, best guess: encrypted'
desc = '%s, size: %d, %d low entropy blocks' % (verdict, block.length, num_error) desc = '%s, size: %d, %d low entropy blocks' % (
verdict, block.length, num_error)
self.result(offset=block.start, description=desc, file=fp) self.result(offset=block.start, description=desc, file=fp)
...@@ -5,13 +5,13 @@ import binwalk.core.common as common ...@@ -5,13 +5,13 @@ import binwalk.core.common as common
from binwalk.core.compat import * from binwalk.core.compat import *
from binwalk.core.module import Module, Option, Kwarg from binwalk.core.module import Module, Option, Kwarg
class HexDiff(Module):
class HexDiff(Module):
COLORS = { COLORS = {
'red' : '31', 'red': '31',
'green' : '32', 'green': '32',
'blue' : '34', 'blue': '34',
} }
SEPERATORS = ['\\', '/'] SEPERATORS = ['\\', '/']
...@@ -23,34 +23,34 @@ class HexDiff(Module): ...@@ -23,34 +23,34 @@ class HexDiff(Module):
TITLE = "Binary Diffing" TITLE = "Binary Diffing"
CLI = [ CLI = [
Option(short='W', Option(short='W',
long='hexdump', long='hexdump',
kwargs={'enabled' : True}, kwargs={'enabled': True},
description='Perform a hexdump / diff of a file or files'), description='Perform a hexdump / diff of a file or files'),
Option(short='G', Option(short='G',
long='green', long='green',
kwargs={'show_green' : True}, kwargs={'show_green': True},
description='Only show lines containing bytes that are the same among all files'), description='Only show lines containing bytes that are the same among all files'),
Option(short='i', Option(short='i',
long='red', long='red',
kwargs={'show_red' : True}, kwargs={'show_red': True},
description='Only show lines containing bytes that are different among all files'), description='Only show lines containing bytes that are different among all files'),
Option(short='U', Option(short='U',
long='blue', long='blue',
kwargs={'show_blue' : True}, kwargs={'show_blue': True},
description='Only show lines containing bytes that are different among some files'), description='Only show lines containing bytes that are different among some files'),
Option(short='w', Option(short='w',
long='terse', long='terse',
kwargs={'terse' : True}, kwargs={'terse': True},
description='Diff all files, but only display a hex dump of the first file'), description='Diff all files, but only display a hex dump of the first file'),
] ]
KWARGS = [ KWARGS = [
Kwarg(name='show_red', default=False), Kwarg(name='show_red', default=False),
Kwarg(name='show_blue', default=False), Kwarg(name='show_blue', default=False),
Kwarg(name='show_green', default=False), Kwarg(name='show_green', default=False),
Kwarg(name='terse', default=False), Kwarg(name='terse', default=False),
Kwarg(name='enabled', default=False), Kwarg(name='enabled', default=False),
] ]
RESULT_FORMAT = "%s\n" RESULT_FORMAT = "%s\n"
...@@ -98,7 +98,7 @@ class HexDiff(Module): ...@@ -98,7 +98,7 @@ class HexDiff(Module):
except IndexError as e: except IndexError as e:
diff_count += 1 diff_count += 1
if diff_count == len(target_data)-1: if diff_count == len(target_data) - 1:
color = "red" color = "red"
elif diff_count > 0: elif diff_count > 0:
color = "blue" color = "blue"
...@@ -149,7 +149,8 @@ class HexDiff(Module): ...@@ -149,7 +149,8 @@ class HexDiff(Module):
hexbyte = "XX" hexbyte = "XX"
asciibyte = "." asciibyte = "."
else: else:
(hexbyte, asciibyte) = self.hexascii(block_data, block_data[fp][i], i) (hexbyte, asciibyte) = self.hexascii(
block_data, block_data[fp][i], i)
hexline += "%s " % hexbyte hexline += "%s " % hexbyte
asciiline += "%s" % asciibyte asciiline += "%s" % asciibyte
...@@ -178,11 +179,13 @@ class HexDiff(Module): ...@@ -178,11 +179,13 @@ class HexDiff(Module):
self.status.completed += self.block self.status.completed += self.block
def init(self): def init(self):
# To mimic expected behavior, if all options are False, we show everything # To mimic expected behavior, if all options are False, we show
# everything
if not any([self.show_red, self.show_green, self.show_blue]): if not any([self.show_red, self.show_green, self.show_blue]):
self.show_red = self.show_green = self.show_blue = True self.show_red = self.show_green = self.show_blue = True
# Always disable terminal formatting, as it won't work properly with colorized output # Always disable terminal formatting, as it won't work properly with
# colorized output
self.config.display.fit_to_screen = False self.config.display.fit_to_screen = False
# Set the block size (aka, hexdump line size) # Set the block size (aka, hexdump line size)
...@@ -205,7 +208,8 @@ class HexDiff(Module): ...@@ -205,7 +208,8 @@ class HexDiff(Module):
file_count = 1 file_count = 1
else: else:
file_count = len(self.hex_target_files) file_count = len(self.hex_target_files)
self.HEADER_FORMAT = "OFFSET " + (("%%-%ds " % header_width) * file_count) + "\n" self.HEADER_FORMAT = "OFFSET " + \
(("%%-%ds " % header_width) * file_count) + "\n"
# Build the header argument list # Build the header argument list
self.HEADER = [fp.name for fp in self.hex_target_files] self.HEADER = [fp.name for fp in self.hex_target_files]
...@@ -225,4 +229,3 @@ class HexDiff(Module): ...@@ -225,4 +229,3 @@ class HexDiff(Module):
self.header() self.header()
self.diff_files(self.hex_target_files) self.diff_files(self.hex_target_files)
self.footer() self.footer()
# Basic signature scan module. This is the default (and primary) feature of binwalk. # Basic signature scan module. This is the default (and primary) feature
# of binwalk.
import binwalk.core.magic import binwalk.core.magic
from binwalk.core.module import Module, Option, Kwarg from binwalk.core.module import Module, Option, Kwarg
class Signature(Module): class Signature(Module):
TITLE = "Signature Scan" TITLE = "Signature Scan"
ORDER = 10 ORDER = 10
CLI = [ CLI = [
Option(short='B', Option(short='B',
long='signature', long='signature',
kwargs={'enabled' : True, 'explicit_signature_scan' : True}, kwargs={'enabled': True, 'explicit_signature_scan': True},
description='Scan target file(s) for common file signatures'), description='Scan target file(s) for common file signatures'),
Option(short='R', Option(short='R',
long='raw', long='raw',
kwargs={'enabled' : True, 'raw_bytes' : []}, kwargs={'enabled': True, 'raw_bytes': []},
type=list, type=list,
dtype=str.__name__, dtype=str.__name__,
description='Scan target file(s) for the specified sequence of bytes'), description='Scan target file(s) for the specified sequence of bytes'),
Option(short='A', Option(short='A',
long='opcodes', long='opcodes',
kwargs={'enabled' : True, 'search_for_opcodes' : True}, kwargs={'enabled': True, 'search_for_opcodes': True},
description='Scan target file(s) for common executable opcode signatures'), description='Scan target file(s) for common executable opcode signatures'),
Option(short='m', Option(short='m',
long='magic', long='magic',
kwargs={'enabled' : True, 'magic_files' : []}, kwargs={'enabled': True, 'magic_files': []},
type=list, type=list,
dtype='file', dtype='file',
description='Specify a custom magic file to use'), description='Specify a custom magic file to use'),
Option(short='b', Option(short='b',
long='dumb', long='dumb',
kwargs={'dumb_scan' : True}, kwargs={'dumb_scan': True},
description='Disable smart signature keywords'), description='Disable smart signature keywords'),
Option(short='I', Option(short='I',
long='invalid', long='invalid',
kwargs={'show_invalid' : True}, kwargs={'show_invalid': True},
description='Show results marked as invalid'), description='Show results marked as invalid'),
Option(short='x', Option(short='x',
long='exclude', long='exclude',
kwargs={'exclude_filters' : []}, kwargs={'exclude_filters': []},
type=list, type=list,
dtype=str.__name__, dtype=str.__name__,
description='Exclude results that match <str>'), description='Exclude results that match <str>'),
Option(short='y', Option(short='y',
long='include', long='include',
kwargs={'include_filters' : []}, kwargs={'include_filters': []},
type=list, type=list,
dtype=str.__name__, dtype=str.__name__,
description='Only show results that match <str>'), description='Only show results that match <str>'),
] ]
KWARGS = [ KWARGS = [
Kwarg(name='enabled', default=False), Kwarg(name='enabled', default=False),
Kwarg(name='show_invalid', default=False), Kwarg(name='show_invalid', default=False),
Kwarg(name='include_filters', default=[]), Kwarg(name='include_filters', default=[]),
Kwarg(name='exclude_filters', default=[]), Kwarg(name='exclude_filters', default=[]),
Kwarg(name='raw_bytes', default=[]), Kwarg(name='raw_bytes', default=[]),
Kwarg(name='search_for_opcodes', default=False), Kwarg(name='search_for_opcodes', default=False),
Kwarg(name='explicit_signature_scan', default=False), Kwarg(name='explicit_signature_scan', default=False),
Kwarg(name='dumb_scan', default=False), Kwarg(name='dumb_scan', default=False),
Kwarg(name='magic_files', default=[]), Kwarg(name='magic_files', default=[]),
] ]
VERBOSE_FORMAT = "%s %d" VERBOSE_FORMAT = "%s %d"
...@@ -67,16 +69,19 @@ class Signature(Module): ...@@ -67,16 +69,19 @@ class Signature(Module):
def init(self): def init(self):
self.one_of_many = None self.one_of_many = None
# Append the user's magic file first so that those signatures take precedence # Append the user's magic file first so that those signatures take
# precedence
if self.search_for_opcodes: if self.search_for_opcodes:
self.magic_files = [ self.magic_files = [
self.config.settings.user.binarch, self.config.settings.user.binarch,
self.config.settings.system.binarch, self.config.settings.system.binarch,
] ]
# Use the system default magic file if no other was specified, or if -B was explicitly specified # Use the system default magic file if no other was specified, or if -B
# was explicitly specified
if (not self.magic_files and not self.raw_bytes) or self.explicit_signature_scan: if (not self.magic_files and not self.raw_bytes) or self.explicit_signature_scan:
self.magic_files += self.config.settings.user.magic + self.config.settings.system.magic self.magic_files += self.config.settings.user.magic + \
self.config.settings.system.magic
# Initialize libmagic # Initialize libmagic
self.magic = binwalk.core.magic.Magic(include=self.include_filters, self.magic = binwalk.core.magic.Magic(include=self.include_filters,
...@@ -87,13 +92,16 @@ class Signature(Module): ...@@ -87,13 +92,16 @@ class Signature(Module):
if self.raw_bytes: if self.raw_bytes:
raw_signatures = [] raw_signatures = []
for raw_bytes in self.raw_bytes: for raw_bytes in self.raw_bytes:
raw_signatures.append("0 string %s %s" % (raw_bytes, raw_bytes)) raw_signatures.append(
binwalk.core.common.debug("Parsing raw signatures: %s" % str(raw_signatures)) "0 string %s %s" % (raw_bytes, raw_bytes))
binwalk.core.common.debug(
"Parsing raw signatures: %s" % str(raw_signatures))
self.magic.parse(raw_signatures) self.magic.parse(raw_signatures)
# Parse the magic file(s) # Parse the magic file(s)
if self.magic_files: if self.magic_files:
binwalk.core.common.debug("Loading magic files: %s" % str(self.magic_files)) binwalk.core.common.debug(
"Loading magic files: %s" % str(self.magic_files))
for f in self.magic_files: for f in self.magic_files:
self.magic.load(f) self.magic.load(f)
...@@ -116,7 +124,8 @@ class Signature(Module): ...@@ -116,7 +124,8 @@ class Signature(Module):
r.valid = False r.valid = False
if r.valid: if r.valid:
# Don't keep displaying signatures that repeat a bunch of times (e.g., JFFS2 nodes) # Don't keep displaying signatures that repeat a bunch of times
# (e.g., JFFS2 nodes)
if r.id == self.one_of_many: if r.id == self.one_of_many:
r.display = False r.display = False
elif r.many: elif r.many:
...@@ -156,14 +165,17 @@ class Signature(Module): ...@@ -156,14 +165,17 @@ class Signature(Module):
r.file = fp r.file = fp
# Register the result for futher processing/display # Register the result for futher processing/display
# self.result automatically calls self.validate for result validation # self.result automatically calls self.validate for result
# validation
self.result(r=r) self.result(r=r)
# Is this a valid result and did it specify a jump-to-offset keyword, and are we doing a "smart" scan? # Is this a valid result and did it specify a jump-to-offset
# keyword, and are we doing a "smart" scan?
if r.valid and r.jump > 0 and not self.dumb_scan: if r.valid and r.jump > 0 and not self.dumb_scan:
absolute_jump_offset = r.offset + r.jump absolute_jump_offset = r.offset + r.jump
current_block_offset = relative_offset + r.jump current_block_offset = relative_offset + r.jump
#print ("Jumping to: 0x%X (0x%X)..." % (absolute_jump_offset, current_block_offset)) # print ("Jumping to: 0x%X (0x%X)..." %
# (absolute_jump_offset, current_block_offset))
# If the jump-to-offset is beyond the confines of the current block, seek the file to # If the jump-to-offset is beyond the confines of the current block, seek the file to
# that offset and quit processing this block of data. # that offset and quit processing this block of data.
...@@ -176,4 +188,3 @@ class Signature(Module): ...@@ -176,4 +188,3 @@ class Signature(Module):
self.header() self.header()
self.scan_file(fp) self.scan_file(fp)
self.footer() self.footer()
...@@ -2,38 +2,42 @@ import os ...@@ -2,38 +2,42 @@ import os
import binwalk.core.common import binwalk.core.common
import binwalk.core.plugin import binwalk.core.plugin
class ArcadyanDeobfuscator(binwalk.core.plugin.Plugin): class ArcadyanDeobfuscator(binwalk.core.plugin.Plugin):
''' '''
Deobfuscator for known Arcadyan firmware obfuscation(s). Deobfuscator for known Arcadyan firmware obfuscation(s).
''' '''
MODULES = ['Signature'] MODULES = ['Signature']
OBFUSCATION_MAGIC_SIZE = 4 OBFUSCATION_MAGIC_SIZE = 4
MAX_IMAGE_SIZE = 0x1B0000 MAX_IMAGE_SIZE = 0x1B0000
BLOCK_SIZE = 32 BLOCK_SIZE = 32
BLOCK1_OFFSET = 4 BLOCK1_OFFSET = 4
BLOCK2_OFFSET = 0x68 BLOCK2_OFFSET = 0x68
MIN_FILE_SIZE = (OBFUSCATION_MAGIC_SIZE + BLOCK2_OFFSET + BLOCK_SIZE) MIN_FILE_SIZE = (
OBFUSCATION_MAGIC_SIZE + BLOCK2_OFFSET + BLOCK_SIZE)
BLOCK1_START = BLOCK1_OFFSET BLOCK1_START = BLOCK1_OFFSET
BLOCK1_END = BLOCK1_START + BLOCK_SIZE BLOCK1_END = BLOCK1_START + BLOCK_SIZE
BLOCK2_START = BLOCK2_OFFSET BLOCK2_START = BLOCK2_OFFSET
BLOCK2_END = BLOCK2_OFFSET + BLOCK_SIZE BLOCK2_END = BLOCK2_OFFSET + BLOCK_SIZE
P1_START = 0 P1_START = 0
P1_END = BLOCK1_OFFSET P1_END = BLOCK1_OFFSET
P2_START = BLOCK1_END P2_START = BLOCK1_END
P2_END = BLOCK2_START P2_END = BLOCK2_START
P3_START = BLOCK2_END P3_START = BLOCK2_END
def init(self): def init(self):
if self.module.extractor.enabled: if self.module.extractor.enabled:
self.module.extractor.add_rule(regex="^obfuscated arcadyan firmware", self.module.extractor.add_rule(
extension="obfuscated", regex="^obfuscated arcadyan firmware",
cmd=self.extractor) extension="obfuscated",
cmd=self.extractor)
def extractor(self, fname): def extractor(self, fname):
deobfuscated = None deobfuscated = None
...@@ -55,22 +59,25 @@ class ArcadyanDeobfuscator(binwalk.core.plugin.Plugin): ...@@ -55,22 +59,25 @@ class ArcadyanDeobfuscator(binwalk.core.plugin.Plugin):
# Nibble-swap each byte in block 1 # Nibble-swap each byte in block 1
nswap = '' nswap = ''
for i in range(self.BLOCK1_START, self.BLOCK1_END): for i in range(self.BLOCK1_START, self.BLOCK1_END):
nswap += chr(((ord(deobfuscated[i]) & 0x0F) << 4) + ((ord(deobfuscated[i]) & 0xF0) >> 4)); nswap += chr(((ord(deobfuscated[i]) & 0x0F) << 4) + (
deobfuscated = deobfuscated[self.P1_START:self.P1_END] + nswap + deobfuscated[self.BLOCK1_END:] (ord(deobfuscated[i]) & 0xF0) >> 4))
deobfuscated = deobfuscated[
self.P1_START:self.P1_END] + nswap + deobfuscated[self.BLOCK1_END:]
# Byte-swap each byte pair in block 1 # Byte-swap each byte pair in block 1
bswap = '' bswap = ''
i = self.BLOCK1_START i = self.BLOCK1_START
while i < self.BLOCK1_END: while i < self.BLOCK1_END:
bswap += deobfuscated[i+1] + deobfuscated[i] bswap += deobfuscated[i + 1] + deobfuscated[i]
i += 2 i += 2
deobfuscated = deobfuscated[self.P1_START:self.P1_END] + bswap + deobfuscated[self.BLOCK1_END:] deobfuscated = deobfuscated[
self.P1_START:self.P1_END] + bswap + deobfuscated[self.BLOCK1_END:]
if deobfuscated: if deobfuscated:
out = binwalk.core.common.BlockFile((os.path.splitext(fname)[0] + '.deobfuscated'), "wb") out = binwalk.core.common.BlockFile(
(os.path.splitext(fname)[0] + '.deobfuscated'), "wb")
out.write(deobfuscated) out.write(deobfuscated)
out.close() out.close()
return True return True
else: else:
return False return False
#import binwalk.core.C # import binwalk.core.C
import binwalk.core.plugin import binwalk.core.plugin
#from binwalk.core.common import * # from binwalk.core.common import *
class CompressdPlugin(binwalk.core.plugin.Plugin): class CompressdPlugin(binwalk.core.plugin.Plugin):
# ''' # '''
...@@ -9,25 +10,25 @@ class CompressdPlugin(binwalk.core.plugin.Plugin): ...@@ -9,25 +10,25 @@ class CompressdPlugin(binwalk.core.plugin.Plugin):
MODULES = ['Signature'] MODULES = ['Signature']
#READ_SIZE = 64 # READ_SIZE = 64
#COMPRESS42 = "compress42" # COMPRESS42 = "compress42"
#COMPRESS42_FUNCTIONS = [ # COMPRESS42_FUNCTIONS = [
# binwalk.core.C.Function(name="is_compressed", type=bool), # binwalk.core.C.Function(name="is_compressed", type=bool),
#] #]
#comp = None # comp = None
#def init(self): # def init(self):
#self.comp = binwalk.core.C.Library(self.COMPRESS42, self.COMPRESS42_FUNCTIONS) # self.comp = binwalk.core.C.Library(self.COMPRESS42, self.COMPRESS42_FUNCTIONS)
# This plugin is currently disabled due to the need to move away from supporting C # This plugin is currently disabled due to the need to move away from supporting C
# libraries and into a pure Python project, for cross-platform support and ease of # libraries and into a pure Python project, for cross-platform support and ease of
# installation / package maintenance. A Python implementation will likely need to # installation / package maintenance. A Python implementation will likely need to
# be custom developed in the future, but for now, since this compression format is # be custom developed in the future, but for now, since this compression format is
# not very common, especially in firmware, simply disable it. # not very common, especially in firmware, simply disable it.
#self.comp = None # self.comp = None
#def scan(self, result): # def scan(self, result):
# if self.comp and result.file and result.description.lower().startswith("compress'd data"): # if self.comp and result.file and result.description.lower().startswith("compress'd data"):
# fd = self.module.config.open_file(result.file.name, offset=result.offset, length=self.READ_SIZE) # fd = self.module.config.open_file(result.file.name, offset=result.offset, length=self.READ_SIZE)
# compressed_data = fd.read(self.READ_SIZE) # compressed_data = fd.read(self.READ_SIZE)
...@@ -35,5 +36,3 @@ class CompressdPlugin(binwalk.core.plugin.Plugin): ...@@ -35,5 +36,3 @@ class CompressdPlugin(binwalk.core.plugin.Plugin):
# if not self.comp.is_compressed(compressed_data, len(compressed_data)): # if not self.comp.is_compressed(compressed_data, len(compressed_data)):
# result.valid = False # result.valid = False
...@@ -2,7 +2,9 @@ import os ...@@ -2,7 +2,9 @@ import os
import subprocess import subprocess
import binwalk.core.plugin import binwalk.core.plugin
class CPIOPlugin(binwalk.core.plugin.Plugin): class CPIOPlugin(binwalk.core.plugin.Plugin):
''' '''
Ensures that ASCII CPIO archive entries only get extracted once. Ensures that ASCII CPIO archive entries only get extracted once.
Also provides an internal CPIO extraction wrapper around the Unix Also provides an internal CPIO extraction wrapper around the Unix
...@@ -40,10 +42,11 @@ class CPIOPlugin(binwalk.core.plugin.Plugin): ...@@ -40,10 +42,11 @@ class CPIOPlugin(binwalk.core.plugin.Plugin):
return return
try: try:
result = subprocess.call(['cpio', '-d', '-i', '--no-absolute-filenames'], result = subprocess.call(
stdin=fpin, ['cpio', '-d', '-i', '--no-absolute-filenames'],
stderr=fperr, stdin=fpin,
stdout=fperr) stderr=fperr,
stdout=fperr)
except OSError: except OSError:
result = -1 result = -1
...@@ -70,7 +73,8 @@ class CPIOPlugin(binwalk.core.plugin.Plugin): ...@@ -70,7 +73,8 @@ class CPIOPlugin(binwalk.core.plugin.Plugin):
def _get_file_name_length(self, description): def _get_file_name_length(self, description):
length = 0 length = 0
if 'file name length: "' in description: if 'file name length: "' in description:
length_string = description.split('file name length: "')[1].split('"')[0] length_string = description.split(
'file name length: "')[1].split('"')[0]
length = int(length_string, 0) length = int(length_string, 0)
return length return length
...@@ -78,12 +82,14 @@ class CPIOPlugin(binwalk.core.plugin.Plugin): ...@@ -78,12 +82,14 @@ class CPIOPlugin(binwalk.core.plugin.Plugin):
if result.valid: if result.valid:
# ASCII CPIO archives consist of multiple entries, ending with an entry named 'TRAILER!!!'. # ASCII CPIO archives consist of multiple entries, ending with an entry named 'TRAILER!!!'.
# Displaying each entry is useful, as it shows what files are contained in the archive, # Displaying each entry is useful, as it shows what files are contained in the archive,
# but we only want to extract the archive when the first entry is found. # but we only want to extract the archive when the first entry is
# found.
if result.description.startswith('ASCII cpio archive'): if result.description.startswith('ASCII cpio archive'):
# Validate the reported name length # Validate the reported name length
file_name = self._get_file_name(result.description) file_name = self._get_file_name(result.description)
file_name_length = self._get_file_name_length(result.description) file_name_length = self._get_file_name_length(
result.description)
if len(file_name) != file_name_length: if len(file_name) != file_name_length:
result.valid = False result.valid = False
return return
...@@ -91,7 +97,8 @@ class CPIOPlugin(binwalk.core.plugin.Plugin): ...@@ -91,7 +97,8 @@ class CPIOPlugin(binwalk.core.plugin.Plugin):
self.consecutive_hits += 1 self.consecutive_hits += 1
if not self.found_archive or self.found_archive_in_file != result.file.name: if not self.found_archive or self.found_archive_in_file != result.file.name:
# This is the first entry. Set found_archive and allow the scan to continue normally. # This is the first entry. Set found_archive and allow the
# scan to continue normally.
self.found_archive_in_file = result.file.name self.found_archive_in_file = result.file.name
self.found_archive = True self.found_archive = True
result.extract = True result.extract = True
...@@ -113,5 +120,6 @@ class CPIOPlugin(binwalk.core.plugin.Plugin): ...@@ -113,5 +120,6 @@ class CPIOPlugin(binwalk.core.plugin.Plugin):
self.consecutive_hits = 0 self.consecutive_hits = 0
elif self.consecutive_hits >= 4: elif self.consecutive_hits >= 4:
# Ignore other stuff until the end of CPIO is found # Ignore other stuff until the end of CPIO is found
# TODO: It would be better to jump to the end of this CPIO entry rather than make this assumption... # TODO: It would be better to jump to the end of this CPIO
# entry rather than make this assumption...
result.valid = False result.valid = False
...@@ -7,21 +7,23 @@ try: ...@@ -7,21 +7,23 @@ try:
except ImportError as e: except ImportError as e:
pass pass
class RomFSCommon(object): class RomFSCommon(object):
def _read_next_word(self): def _read_next_word(self):
value = struct.unpack("%sL" % self.endianess, self.data[self.index:self.index+4])[0] value = struct.unpack(
"%sL" % self.endianess, self.data[self.index:self.index + 4])[0]
self.index += 4 self.index += 4
return value return value
def _read_next_uid(self): def _read_next_uid(self):
uid = int(self.data[self.index:self.index+4]) uid = int(self.data[self.index:self.index + 4])
self.index += 4 self.index += 4
return uid return uid
def _read_next_block(self, size): def _read_next_block(self, size):
size = int(size) size = int(size)
data = self.data[self.index:self.index+size] data = self.data[self.index:self.index + size]
self.index += size self.index += size
return data return data
...@@ -41,10 +43,11 @@ class RomFSCommon(object): ...@@ -41,10 +43,11 @@ class RomFSCommon(object):
self.index += 1 self.index += 1
return data return data
class RomFSEntry(RomFSCommon): class RomFSEntry(RomFSCommon):
DIR_STRUCT_MASK = 0x00000001 DIR_STRUCT_MASK = 0x00000001
DATA_MASK = 0x00000008 DATA_MASK = 0x00000008
COMPRESSED_MASK = 0x005B0000 COMPRESSED_MASK = 0x005B0000
def __init__(self, data, endianess="<"): def __init__(self, data, endianess="<"):
...@@ -61,6 +64,7 @@ class RomFSEntry(RomFSCommon): ...@@ -61,6 +64,7 @@ class RomFSEntry(RomFSCommon):
self.unknown5 = self._read_next_word() self.unknown5 = self._read_next_word()
self.uid = self._read_next_uid() self.uid = self._read_next_uid()
class RomFSDirStruct(RomFSCommon): class RomFSDirStruct(RomFSCommon):
SIZE = 0x20 SIZE = 0x20
...@@ -94,17 +98,20 @@ class RomFSDirStruct(RomFSCommon): ...@@ -94,17 +98,20 @@ class RomFSDirStruct(RomFSCommon):
if count == 0: if count == 0:
mod = self.SIZE - total_size mod = self.SIZE - total_size
else: else:
mod = self.SIZE - int(total_size - (count*self.SIZE)) mod = self.SIZE - int(total_size - (count * self.SIZE))
if mod > 0: if mod > 0:
remainder = self._read_next_block(mod) remainder = self._read_next_block(mod)
yield (uid, entry) yield (uid, entry)
class FileContainer(object): class FileContainer(object):
def __init__(self): def __init__(self):
pass pass
class RomFS(object): class RomFS(object):
SUPERBLOCK_SIZE = 0x20 SUPERBLOCK_SIZE = 0x20
...@@ -145,7 +152,8 @@ class RomFS(object): ...@@ -145,7 +152,8 @@ class RomFS(object):
while True: while True:
try: try:
entry = RomFSEntry(self.data[offset:offset+self.FILE_ENTRY_SIZE], endianess=self.endianess) entry = RomFSEntry(
self.data[offset:offset + self.FILE_ENTRY_SIZE], endianess=self.endianess)
except ValueError as e: except ValueError as e:
break break
...@@ -160,7 +168,8 @@ class RomFS(object): ...@@ -160,7 +168,8 @@ class RomFS(object):
if entry.type & entry.DIR_STRUCT_MASK: if entry.type & entry.DIR_STRUCT_MASK:
entries[entry.uid].type = "directory" entries[entry.uid].type = "directory"
ds = RomFSDirStruct(self.data[entry.offset:entry.offset+entry.size], endianess=self.endianess) ds = RomFSDirStruct(
self.data[entry.offset:entry.offset + entry.size], endianess=self.endianess)
for (uid, name) in ds.ls: for (uid, name) in ds.ls:
if not uid in entries: if not uid in entries:
entries[uid] = FileContainer() entries[uid] = FileContainer()
...@@ -184,7 +193,9 @@ if __name__ == '__main__': ...@@ -184,7 +193,9 @@ if __name__ == '__main__':
print ("Usage: %s <input file> <output directory>" % sys.argv[0]) print ("Usage: %s <input file> <output directory>" % sys.argv[0])
sys.exit(1) sys.exit(1)
class DlinkROMFSExtractPlugin(binwalk.core.plugin.Plugin): class DlinkROMFSExtractPlugin(binwalk.core.plugin.Plugin):
''' '''
Gzip extractor plugin. Gzip extractor plugin.
''' '''
...@@ -193,7 +204,8 @@ class DlinkROMFSExtractPlugin(binwalk.core.plugin.Plugin): ...@@ -193,7 +204,8 @@ class DlinkROMFSExtractPlugin(binwalk.core.plugin.Plugin):
def init(self): def init(self):
# If the extractor is enabled for the module we're currently loaded # If the extractor is enabled for the module we're currently loaded
# into, then register self.extractor as a D-Link ROMFS file system extraction rule. # into, then register self.extractor as a D-Link ROMFS file system
# extraction rule.
if self.module.extractor.enabled: if self.module.extractor.enabled:
self.module.extractor.add_rule(txtrule=None, self.module.extractor.add_rule(txtrule=None,
regex="^d-link romfs filesystem", regex="^d-link romfs filesystem",
......
...@@ -2,7 +2,9 @@ import os ...@@ -2,7 +2,9 @@ import os
import gzip import gzip
import binwalk.core.plugin import binwalk.core.plugin
class GzipExtractPlugin(binwalk.core.plugin.Plugin): class GzipExtractPlugin(binwalk.core.plugin.Plugin):
''' '''
Gzip extractor plugin. Gzip extractor plugin.
''' '''
......
...@@ -3,7 +3,9 @@ import binwalk.core.compat ...@@ -3,7 +3,9 @@ import binwalk.core.compat
import binwalk.core.plugin import binwalk.core.plugin
from binwalk.core.common import BlockFile from binwalk.core.common import BlockFile
class GzipValidPlugin(binwalk.core.plugin.Plugin): class GzipValidPlugin(binwalk.core.plugin.Plugin):
''' '''
Validates gzip compressed data. Almost identical to zlibvalid.py. Validates gzip compressed data. Almost identical to zlibvalid.py.
''' '''
...@@ -15,7 +17,8 @@ class GzipValidPlugin(binwalk.core.plugin.Plugin): ...@@ -15,7 +17,8 @@ class GzipValidPlugin(binwalk.core.plugin.Plugin):
# If this result is a gzip signature match, try to decompress the data # If this result is a gzip signature match, try to decompress the data
if result.file and result.description.lower().startswith('gzip'): if result.file and result.description.lower().startswith('gzip'):
# Seek to and read the suspected gzip data # Seek to and read the suspected gzip data
fd = self.module.config.open_file(result.file.name, offset=result.offset, length=self.MAX_DATA_SIZE) fd = self.module.config.open_file(
result.file.name, offset=result.offset, length=self.MAX_DATA_SIZE)
data = fd.read(self.MAX_DATA_SIZE) data = fd.read(self.MAX_DATA_SIZE)
fd.close() fd.close()
...@@ -40,8 +43,7 @@ class GzipValidPlugin(binwalk.core.plugin.Plugin): ...@@ -40,8 +43,7 @@ class GzipValidPlugin(binwalk.core.plugin.Plugin):
except zlib.error as e: except zlib.error as e:
error = str(e) error = str(e)
# Truncated input data results in error -5. # Truncated input data results in error -5.
# gzip uses different checksums than zlib, which results in error -3. # gzip uses different checksums than zlib, which results in
# error -3.
if not error.startswith("Error -5") and not error.startswith("Error -3"): if not error.startswith("Error -5") and not error.startswith("Error -3"):
result.valid = False result.valid = False
...@@ -13,6 +13,7 @@ except ImportError as e: ...@@ -13,6 +13,7 @@ except ImportError as e:
class HilinkDecryptor(binwalk.core.plugin.Plugin): class HilinkDecryptor(binwalk.core.plugin.Plugin):
''' '''
Plugin to decrypt, validate, and extract Hilink encrypted firmware. Plugin to decrypt, validate, and extract Hilink encrypted firmware.
''' '''
...@@ -28,11 +29,12 @@ class HilinkDecryptor(binwalk.core.plugin.Plugin): ...@@ -28,11 +29,12 @@ class HilinkDecryptor(binwalk.core.plugin.Plugin):
self.enabled = True self.enabled = True
if self.enabled is True and self.module.extractor.enabled is True: if self.enabled is True and self.module.extractor.enabled is True:
# Add an extraction rule for encrypted Hilink firmware signature results # Add an extraction rule for encrypted Hilink firmware signature
self.module.extractor.add_rule(regex="^%s" % self.SIGNATURE_DESCRIPTION, # results
extension="enc", self.module.extractor.add_rule(
cmd=self._decrypt_and_extract) regex="^%s" % self.SIGNATURE_DESCRIPTION,
extension="enc",
cmd=self._decrypt_and_extract)
def _decrypt_and_extract(self, fname): def _decrypt_and_extract(self, fname):
''' '''
...@@ -68,25 +70,31 @@ class HilinkDecryptor(binwalk.core.plugin.Plugin): ...@@ -68,25 +70,31 @@ class HilinkDecryptor(binwalk.core.plugin.Plugin):
if self.enabled is True: if self.enabled is True:
if result.valid is True: if result.valid is True:
if result.description.lower().startswith(self.SIGNATURE_DESCRIPTION) is True: if result.description.lower().startswith(self.SIGNATURE_DESCRIPTION) is True:
# Read in the first 64 bytes of the suspected encrypted uImage header # Read in the first 64 bytes of the suspected encrypted
fd = self.module.config.open_file(result.file.name, offset=result.offset) # uImage header
encrypted_header_data = binwalk.core.compat.str2bytes(fd.read(64)) fd = self.module.config.open_file(
result.file.name, offset=result.offset)
encrypted_header_data = binwalk.core.compat.str2bytes(
fd.read(64))
fd.close() fd.close()
# Decrypt the header # Decrypt the header
decrypted_header_data = self._hilink_decrypt(encrypted_header_data) decrypted_header_data = self._hilink_decrypt(
encrypted_header_data)
# Pull out the image size and image name fields from the decrypted uImage header # Pull out the image size and image name fields from the decrypted uImage header
# and add them to the printed description. # and add them to the printed description.
result.size = struct.unpack(b">L", decrypted_header_data[12:16])[0] result.size = struct.unpack(
b">L", decrypted_header_data[12:16])[0]
result.description += ", size: %d" % (result.size) result.description += ", size: %d" % (result.size)
# NOTE: The description field should be 32 bytes? Hilink seems to use only 24 bytes for this field, # NOTE: The description field should be 32 bytes? Hilink seems to use only 24 bytes for this field,
# even though the header size is still 64 bytes? # even though the header size is still 64 bytes?
result.description += ', image name: "%s"' % binwalk.core.compat.bytes2str(decrypted_header_data[32:56]).strip("\x00") result.description += ', image name: "%s"' % binwalk.core.compat.bytes2str(
decrypted_header_data[32:56]).strip("\x00")
# Do some basic validation on the decrypted size and image name fields # Do some basic validation on the decrypted size and image
# name fields
if result.size > (result.file.size - result.offset): if result.size > (result.file.size - result.offset):
result.valid = False result.valid = False
if not all(c in string.printable for c in result.description): if not all(c in string.printable for c in result.description):
result.valid = False result.valid = False
...@@ -2,7 +2,9 @@ import struct ...@@ -2,7 +2,9 @@ import struct
import binascii import binascii
import binwalk.core.plugin import binwalk.core.plugin
class JFFS2ValidPlugin(binwalk.core.plugin.Plugin): class JFFS2ValidPlugin(binwalk.core.plugin.Plugin):
''' '''
Helps validate JFFS2 signature results. Helps validate JFFS2 signature results.
...@@ -23,7 +25,8 @@ class JFFS2ValidPlugin(binwalk.core.plugin.Plugin): ...@@ -23,7 +25,8 @@ class JFFS2ValidPlugin(binwalk.core.plugin.Plugin):
header_crc = struct.unpack("<I", node_header[8:12])[0] header_crc = struct.unpack("<I", node_header[8:12])[0]
# Calculate the actual CRC # Calculate the actual CRC
calculated_header_crc = (binascii.crc32(node_header[0:8], -1) ^ -1) & 0xffffffff calculated_header_crc = (
binascii.crc32(node_header[0:8], -1) ^ -1) & 0xffffffff
# Make sure they match # Make sure they match
return (header_crc == calculated_header_crc) return (header_crc == calculated_header_crc)
...@@ -32,16 +35,15 @@ class JFFS2ValidPlugin(binwalk.core.plugin.Plugin): ...@@ -32,16 +35,15 @@ class JFFS2ValidPlugin(binwalk.core.plugin.Plugin):
if result.file and result.description.lower().startswith('jffs2 filesystem'): if result.file and result.description.lower().startswith('jffs2 filesystem'):
# Seek to and read the suspected JFFS2 node header # Seek to and read the suspected JFFS2 node header
fd = self.module.config.open_file(result.file.name, offset=result.offset) fd = self.module.config.open_file(
result.file.name, offset=result.offset)
# JFFS2 headers are only 12 bytes in size, but reading larger amounts of # JFFS2 headers are only 12 bytes in size, but reading larger amounts of
# data from disk speeds up repeated disk access and decreases performance # data from disk speeds up repeated disk access and decreases performance
# hits (disk caching?). # hits (disk caching?).
# #
# TODO: Should this plugin validate the *entire* JFFS2 file system, rather # TODO: Should this plugin validate the *entire* JFFS2 file system, rather
# than letting the signature module find every single JFFS2 node? # than letting the signature module find every single JFFS2 node?
node_header = fd.read(1024) node_header = fd.read(1024)
fd.close() fd.close()
result.valid = self._check_crc(node_header[0:12]) result.valid = self._check_crc(node_header[0:12])
import os import os
import binwalk.core.plugin import binwalk.core.plugin
class LZMAExtractPlugin(binwalk.core.plugin.Plugin): class LZMAExtractPlugin(binwalk.core.plugin.Plugin):
''' '''
LZMA extractor plugin. LZMA extractor plugin.
''' '''
...@@ -11,7 +13,8 @@ class LZMAExtractPlugin(binwalk.core.plugin.Plugin): ...@@ -11,7 +13,8 @@ class LZMAExtractPlugin(binwalk.core.plugin.Plugin):
try: try:
# lzma package in Python 2.0 decompress() does not handle multiple # lzma package in Python 2.0 decompress() does not handle multiple
# compressed streams, only first stream is extracted. # compressed streams, only first stream is extracted.
# backports.lzma package could be used to keep consistent behaviour. # backports.lzma package could be used to keep consistent
# behaviour.
try: try:
import lzma import lzma
except ImportError: except ImportError:
......
...@@ -4,7 +4,9 @@ import binwalk.core.plugin ...@@ -4,7 +4,9 @@ import binwalk.core.plugin
from binwalk.core.compat import * from binwalk.core.compat import *
from binwalk.core.common import BlockFile from binwalk.core.common import BlockFile
class LZMAModPlugin(binwalk.core.plugin.Plugin): class LZMAModPlugin(binwalk.core.plugin.Plugin):
''' '''
Finds and extracts modified LZMA files commonly found in cable modems. Finds and extracts modified LZMA files commonly found in cable modems.
Based on Bernardo Rodrigues' work: http://w00tsec.blogspot.com/2013/11/unpacking-firmware-images-from-cable.html Based on Bernardo Rodrigues' work: http://w00tsec.blogspot.com/2013/11/unpacking-firmware-images-from-cable.html
...@@ -27,11 +29,14 @@ class LZMAModPlugin(binwalk.core.plugin.Plugin): ...@@ -27,11 +29,14 @@ class LZMAModPlugin(binwalk.core.plugin.Plugin):
# Try extracting the LZMA file without modification first # Try extracting the LZMA file without modification first
result = self.module.extractor.execute(self.original_cmd, fname) result = self.module.extractor.execute(self.original_cmd, fname)
# If the external extractor was successul (True) or didn't exist (None), don't do anything. # If the external extractor was successul (True) or didn't exist
# (None), don't do anything.
if result not in [True, None]: if result not in [True, None]:
out_name = os.path.splitext(fname)[0] + '-patched' + os.path.splitext(fname)[1] out_name = os.path.splitext(fname)[
0] + '-patched' + os.path.splitext(fname)[1]
fp_out = BlockFile(out_name, 'w') fp_out = BlockFile(out_name, 'w')
# Use self.module.config.open_file here to ensure that other config settings (such as byte-swapping) are honored # Use self.module.config.open_file here to ensure that other config
# settings (such as byte-swapping) are honored
fp_in = self.module.config.open_file(fname, offset=0, length=0) fp_in = self.module.config.open_file(fname, offset=0, length=0)
fp_in.set_block_size(peek=0) fp_in.set_block_size(peek=0)
i = 0 i = 0
...@@ -51,16 +56,18 @@ class LZMAModPlugin(binwalk.core.plugin.Plugin): ...@@ -51,16 +56,18 @@ class LZMAModPlugin(binwalk.core.plugin.Plugin):
fp_in.close() fp_in.close()
fp_out.close() fp_out.close()
# Overwrite the original file so that it can be cleaned up if -r was specified # Overwrite the original file so that it can be cleaned up if -r
# was specified
shutil.move(out_name, fname) shutil.move(out_name, fname)
result = self.module.extractor.execute(self.original_cmd, fname) result = self.module.extractor.execute(self.original_cmd, fname)
return result return result
def scan(self, result): def scan(self, result):
# The modified cable modem LZMA headers all have valid dictionary sizes and a properties byte of 0x5D. # The modified cable modem LZMA headers all have valid dictionary sizes
# and a properties byte of 0x5D.
if result.description.lower().startswith(self.SIGNATURE) and "invalid uncompressed size" in result.description: if result.description.lower().startswith(self.SIGNATURE) and "invalid uncompressed size" in result.description:
if "properties: 0x5D" in result.description and "invalid dictionary size" not in result.description: if "properties: 0x5D" in result.description and "invalid dictionary size" not in result.description:
result.valid = True result.valid = True
result.description = result.description.split("invalid uncompressed size")[0] + "missing uncompressed size" result.description = result.description.split(
"invalid uncompressed size")[0] + "missing uncompressed size"
...@@ -2,7 +2,9 @@ import binwalk.core.plugin ...@@ -2,7 +2,9 @@ import binwalk.core.plugin
import binwalk.core.compat import binwalk.core.compat
from binwalk.core.common import BlockFile from binwalk.core.common import BlockFile
class LZMAPlugin(binwalk.core.plugin.Plugin): class LZMAPlugin(binwalk.core.plugin.Plugin):
''' '''
Validates lzma signature results. Validates lzma signature results.
''' '''
...@@ -29,7 +31,8 @@ class LZMAPlugin(binwalk.core.plugin.Plugin): ...@@ -29,7 +31,8 @@ class LZMAPlugin(binwalk.core.plugin.Plugin):
valid = True valid = True
if self.decompressor is not None: if self.decompressor is not None:
# The only acceptable exceptions are those indicating that the input data was truncated. # The only acceptable exceptions are those indicating that the
# input data was truncated.
try: try:
self.decompressor(binwalk.core.compat.str2bytes(data)) self.decompressor(binwalk.core.compat.str2bytes(data))
except IOError as e: except IOError as e:
...@@ -49,7 +52,8 @@ class LZMAPlugin(binwalk.core.plugin.Plugin): ...@@ -49,7 +52,8 @@ class LZMAPlugin(binwalk.core.plugin.Plugin):
if result.valid and result.file and result.description.lower().startswith('lzma compressed data'): if result.valid and result.file and result.description.lower().startswith('lzma compressed data'):
# Seek to and read the suspected lzma data # Seek to and read the suspected lzma data
fd = self.module.config.open_file(result.file.name, offset=result.offset, length=self.MAX_DATA_SIZE) fd = self.module.config.open_file(
result.file.name, offset=result.offset, length=self.MAX_DATA_SIZE)
data = fd.read(self.MAX_DATA_SIZE) data = fd.read(self.MAX_DATA_SIZE)
fd.close() fd.close()
...@@ -59,4 +63,3 @@ class LZMAPlugin(binwalk.core.plugin.Plugin): ...@@ -59,4 +63,3 @@ class LZMAPlugin(binwalk.core.plugin.Plugin):
data = data[:5] + self.FAKE_LZMA_SIZE + data[5:] data = data[:5] + self.FAKE_LZMA_SIZE + data[5:]
if not self.is_valid_lzma(data): if not self.is_valid_lzma(data):
result.valid = False result.valid = False
...@@ -2,6 +2,7 @@ import time ...@@ -2,6 +2,7 @@ import time
import math import math
import binwalk.core.plugin import binwalk.core.plugin
class TarPlugin(binwalk.core.plugin.Plugin): class TarPlugin(binwalk.core.plugin.Plugin):
MODULES = ['Signature'] MODULES = ['Signature']
...@@ -41,24 +42,27 @@ class TarPlugin(binwalk.core.plugin.Plugin): ...@@ -41,24 +42,27 @@ class TarPlugin(binwalk.core.plugin.Plugin):
if result.description.lower().startswith('posix tar archive'): if result.description.lower().startswith('posix tar archive'):
is_tar = True is_tar = True
file_offset = result.offset file_offset = result.offset
fd = self.module.config.open_file(result.file.name, offset=result.offset) fd = self.module.config.open_file(
result.file.name, offset=result.offset)
while is_tar: while is_tar:
# read in the tar header struct # read in the tar header struct
buf = fd.read(self.TAR_BLOCKSIZE) buf = fd.read(self.TAR_BLOCKSIZE)
# check to see if we are still in a tarball # check to see if we are still in a tarball
if buf[257:262] == 'ustar': if buf[257:262] == 'ustar':
# get size of tarred file convert to blocks (plus 1 to include header) # get size of tarred file convert to blocks (plus 1 to
# include header)
try: try:
size = self.nti(buf[124:136]) size = self.nti(buf[124:136])
blocks = math.ceil(size/float(self.TAR_BLOCKSIZE)) + 1 blocks = math.ceil(
size / float(self.TAR_BLOCKSIZE)) + 1
except ValueError as e: except ValueError as e:
is_tar = False is_tar = False
break break
# update file offset for next file in tarball # update file offset for next file in tarball
file_offset += int(self.TAR_BLOCKSIZE*blocks) file_offset += int(self.TAR_BLOCKSIZE * blocks)
if file_offset >= result.file.size: if file_offset >= result.file.size:
# we hit the end of the file # we hit the end of the file
...@@ -66,6 +70,6 @@ class TarPlugin(binwalk.core.plugin.Plugin): ...@@ -66,6 +70,6 @@ class TarPlugin(binwalk.core.plugin.Plugin):
else: else:
fd.seek(file_offset) fd.seek(file_offset)
else: else:
is_tar = False is_tar = False
result.jump = file_offset result.jump = file_offset
...@@ -3,14 +3,16 @@ import binascii ...@@ -3,14 +3,16 @@ import binascii
import binwalk.core.plugin import binwalk.core.plugin
import binwalk.core.compat import binwalk.core.compat
class UBIValidPlugin(binwalk.core.plugin.Plugin): class UBIValidPlugin(binwalk.core.plugin.Plugin):
''' '''
Helps validate UBI erase count signature results. Helps validate UBI erase count signature results.
Checks header CRC and calculates jump value Checks header CRC and calculates jump value
''' '''
MODULES = ['Signature'] MODULES = ['Signature']
current_file=None current_file = None
last_ec_hdr_offset = None last_ec_hdr_offset = None
peb_size = None peb_size = None
...@@ -26,15 +28,15 @@ class UBIValidPlugin(binwalk.core.plugin.Plugin): ...@@ -26,15 +28,15 @@ class UBIValidPlugin(binwalk.core.plugin.Plugin):
def _process_result(self, result): def _process_result(self, result):
if self.current_file == result.file.name: if self.current_file == result.file.name:
result.display=False result.display = False
else: else:
# Reset everything in case new file is encountered # Reset everything in case new file is encountered
self.peb_size=None self.peb_size = None
self.last_ec_hdr_offset=None self.last_ec_hdr_offset = None
self.peb_size=None self.peb_size = None
# Display result and trigger extraction # Display result and trigger extraction
result.display=True result.display = True
self.current_file = result.file.name self.current_file = result.file.name
...@@ -54,7 +56,8 @@ class UBIValidPlugin(binwalk.core.plugin.Plugin): ...@@ -54,7 +56,8 @@ class UBIValidPlugin(binwalk.core.plugin.Plugin):
def scan(self, result): def scan(self, result):
if result.file and result.description.lower().startswith('ubi erase count header'): if result.file and result.description.lower().startswith('ubi erase count header'):
# Seek to and read the suspected UBI erase count header # Seek to and read the suspected UBI erase count header
fd = self.module.config.open_file(result.file.name, offset=result.offset) fd = self.module.config.open_file(
result.file.name, offset=result.offset)
ec_header = binwalk.core.compat.str2bytes(fd.read(1024)) ec_header = binwalk.core.compat.str2bytes(fd.read(1024))
fd.close() fd.close()
......
...@@ -4,5 +4,6 @@ ...@@ -4,5 +4,6 @@
import binwalk.core.plugin import binwalk.core.plugin
class Unjffs2DepreciatedPlugin(binwalk.core.plugin.Plugin): class Unjffs2DepreciatedPlugin(binwalk.core.plugin.Plugin):
pass pass
import binwalk.core.plugin import binwalk.core.plugin
class ZipHelperPlugin(binwalk.core.plugin.Plugin): class ZipHelperPlugin(binwalk.core.plugin.Plugin):
''' '''
A helper plugin for Zip files to ensure that the Zip archive A helper plugin for Zip files to ensure that the Zip archive
extraction rule is only executed once when the first Zip archive extraction rule is only executed once when the first Zip archive
......
...@@ -4,7 +4,9 @@ import binwalk.core.compat ...@@ -4,7 +4,9 @@ import binwalk.core.compat
import binwalk.core.common import binwalk.core.common
import binwalk.core.plugin import binwalk.core.plugin
class ZLIBExtractPlugin(binwalk.core.plugin.Plugin): class ZLIBExtractPlugin(binwalk.core.plugin.Plugin):
''' '''
Zlib extractor plugin. Zlib extractor plugin.
''' '''
...@@ -26,7 +28,8 @@ class ZLIBExtractPlugin(binwalk.core.plugin.Plugin): ...@@ -26,7 +28,8 @@ class ZLIBExtractPlugin(binwalk.core.plugin.Plugin):
fpin = binwalk.core.common.BlockFile(fname) fpin = binwalk.core.common.BlockFile(fname)
fpout = binwalk.core.common.BlockFile(outfile, 'w') fpout = binwalk.core.common.BlockFile(outfile, 'w')
plaintext = zlib.decompress(binwalk.core.compat.str2bytes(fpin.read())) plaintext = zlib.decompress(
binwalk.core.compat.str2bytes(fpin.read()))
fpout.write(plaintext) fpout.write(plaintext)
fpin.close() fpin.close()
...@@ -37,4 +40,3 @@ class ZLIBExtractPlugin(binwalk.core.plugin.Plugin): ...@@ -37,4 +40,3 @@ class ZLIBExtractPlugin(binwalk.core.plugin.Plugin):
return False return False
return True return True
...@@ -3,7 +3,9 @@ import binwalk.core.compat ...@@ -3,7 +3,9 @@ import binwalk.core.compat
import binwalk.core.plugin import binwalk.core.plugin
from binwalk.core.common import BlockFile from binwalk.core.common import BlockFile
class ZlibValidPlugin(binwalk.core.plugin.Plugin): class ZlibValidPlugin(binwalk.core.plugin.Plugin):
''' '''
Validates zlib compressed data. Validates zlib compressed data.
''' '''
...@@ -40,4 +42,3 @@ class ZlibValidPlugin(binwalk.core.plugin.Plugin): ...@@ -40,4 +42,3 @@ class ZlibValidPlugin(binwalk.core.plugin.Plugin):
# Error -5, incomplete or truncated data input # Error -5, incomplete or truncated data input
if not str(e).startswith("Error -5"): if not str(e).startswith("Error -5"):
result.valid = False result.valid = False
...@@ -2,6 +2,7 @@ import idc ...@@ -2,6 +2,7 @@ import idc
import idaapi import idaapi
import binwalk import binwalk
class binwalk_t(idaapi.plugin_t): class binwalk_t(idaapi.plugin_t):
flags = 0 flags = 0
comment = "Scan the current IDB for file signatures" comment = "Scan the current IDB for file signatures"
...@@ -10,8 +11,10 @@ class binwalk_t(idaapi.plugin_t): ...@@ -10,8 +11,10 @@ class binwalk_t(idaapi.plugin_t):
wanted_hotkey = "" wanted_hotkey = ""
def init(self): def init(self):
self.menu_context_1 = idaapi.add_menu_item("Search/", "binwalk opcodes", "", 0, self.opcode_scan, (None,)) self.menu_context_1 = idaapi.add_menu_item(
self.menu_context_2 = idaapi.add_menu_item("Search/", "binwalk signatures", "", 0, self.signature_scan, (None,)) "Search/", "binwalk opcodes", "", 0, self.opcode_scan, (None,))
self.menu_context_2 = idaapi.add_menu_item(
"Search/", "binwalk signatures", "", 0, self.signature_scan, (None,))
return idaapi.PLUGIN_KEEP return idaapi.PLUGIN_KEEP
def term(self): def term(self):
...@@ -28,6 +31,6 @@ class binwalk_t(idaapi.plugin_t): ...@@ -28,6 +31,6 @@ class binwalk_t(idaapi.plugin_t):
def opcode_scan(self, arg): def opcode_scan(self, arg):
binwalk.scan(idc.GetIdbPath(), opcode=True) binwalk.scan(idc.GetIdbPath(), opcode=True)
def PLUGIN_ENTRY(): def PLUGIN_ENTRY():
return binwalk_t() return binwalk_t()
...@@ -9,7 +9,8 @@ for module in binwalk.scan(*sys.argv[1:], signature=True, quiet=True, extract=Tr ...@@ -9,7 +9,8 @@ for module in binwalk.scan(*sys.argv[1:], signature=True, quiet=True, extract=Tr
for result in module.results: for result in module.results:
if module.extractor.output.has_key(result.file.path): if module.extractor.output.has_key(result.file.path):
if module.extractor.output[result.file.path].extracted.has_key(result.offset): if module.extractor.output[result.file.path].extracted.has_key(result.offset):
print ("Extracted '%s' at offset 0x%X from '%s' to '%s'" % (result.description.split(',')[0], print (
result.offset, "Extracted '%s' at offset 0x%X from '%s' to '%s'" % (result.description.split(',')[0],
result.file.path, result.offset,
str(module.extractor.output[result.file.path].extracted[result.offset]))) result.file.path,
str(module.extractor.output[result.file.path].extracted[result.offset])))
...@@ -4,10 +4,12 @@ import sys ...@@ -4,10 +4,12 @@ import sys
import binwalk import binwalk
try: try:
# Perform a signature scan against the files specified on the command line and suppress the usual binwalk output. # Perform a signature scan against the files specified on the command line
# and suppress the usual binwalk output.
for module in binwalk.scan(*sys.argv[1:], signature=True, quiet=True): for module in binwalk.scan(*sys.argv[1:], signature=True, quiet=True):
print ("%s Results:" % module.name) print ("%s Results:" % module.name)
for result in module.results: for result in module.results:
print ("\t%s 0x%.8X %s [%s]" % (result.file.name, result.offset, result.description, str(result.valid))) print ("\t%s 0x%.8X %s [%s]" % (
result.file.name, result.offset, result.description, str(result.valid)))
except binwalk.ModuleException as e: except binwalk.ModuleException as e:
pass pass
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment