parent
1ec1847474
commit
85cf5f8be3
6 changed files with 1215 additions and 5 deletions
@ -0,0 +1,595 @@ |
|||||||
|
#!/usr/bin/env python |
||||||
|
# |
||||||
|
# ESP32 partition table generation tool |
||||||
|
# |
||||||
|
# Converts partition tables to/from CSV and binary formats. |
||||||
|
# |
||||||
|
# See https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/partition-tables.html |
||||||
|
# for explanation of partition table structure and uses. |
||||||
|
# |
||||||
|
# SPDX-FileCopyrightText: 2016-2021 Espressif Systems (Shanghai) CO LTD |
||||||
|
# SPDX-License-Identifier: Apache-2.0 |
||||||
|
|
||||||
|
from __future__ import division, print_function, unicode_literals |
||||||
|
|
||||||
|
import argparse |
||||||
|
import binascii |
||||||
|
import errno |
||||||
|
import hashlib |
||||||
|
import os |
||||||
|
import re |
||||||
|
import struct |
||||||
|
import sys |
||||||
|
|
||||||
|
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature |
||||||
|
MD5_PARTITION_BEGIN = b'\xEB\xEB' + b'\xFF' * 14 # The first 2 bytes are like magic numbers for MD5 sum |
||||||
|
PARTITION_TABLE_SIZE = 0x1000 # Size of partition table |
||||||
|
|
||||||
|
MIN_PARTITION_SUBTYPE_APP_OTA = 0x10 |
||||||
|
NUM_PARTITION_SUBTYPE_APP_OTA = 16 |
||||||
|
|
||||||
|
__version__ = '1.2' |
||||||
|
|
||||||
|
APP_TYPE = 0x00 |
||||||
|
DATA_TYPE = 0x01 |
||||||
|
|
||||||
|
TYPES = { |
||||||
|
'app': APP_TYPE, |
||||||
|
'data': DATA_TYPE, |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
def get_ptype_as_int(ptype): |
||||||
|
""" Convert a string which might be numeric or the name of a partition type to an integer """ |
||||||
|
try: |
||||||
|
return TYPES[ptype] |
||||||
|
except KeyError: |
||||||
|
try: |
||||||
|
return int(ptype, 0) |
||||||
|
except TypeError: |
||||||
|
return ptype |
||||||
|
|
||||||
|
|
||||||
|
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h |
||||||
|
SUBTYPES = { |
||||||
|
APP_TYPE: { |
||||||
|
'factory': 0x00, |
||||||
|
'test': 0x20, |
||||||
|
}, |
||||||
|
DATA_TYPE: { |
||||||
|
'ota': 0x00, |
||||||
|
'phy': 0x01, |
||||||
|
'nvs': 0x02, |
||||||
|
'coredump': 0x03, |
||||||
|
'nvs_keys': 0x04, |
||||||
|
'efuse': 0x05, |
||||||
|
'undefined': 0x06, |
||||||
|
'esphttpd': 0x80, |
||||||
|
'fat': 0x81, |
||||||
|
'spiffs': 0x82, |
||||||
|
}, |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
def get_subtype_as_int(ptype, subtype): |
||||||
|
""" Convert a string which might be numeric or the name of a partition subtype to an integer """ |
||||||
|
try: |
||||||
|
return SUBTYPES[get_ptype_as_int(ptype)][subtype] |
||||||
|
except KeyError: |
||||||
|
try: |
||||||
|
return int(subtype, 0) |
||||||
|
except TypeError: |
||||||
|
return subtype |
||||||
|
|
||||||
|
|
||||||
|
ALIGNMENT = { |
||||||
|
APP_TYPE: 0x10000, |
||||||
|
DATA_TYPE: 0x1000, |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
def get_alignment_for_type(ptype): |
||||||
|
return ALIGNMENT.get(ptype, ALIGNMENT[DATA_TYPE]) |
||||||
|
|
||||||
|
|
||||||
|
def get_partition_type(ptype): |
||||||
|
if ptype == 'app': |
||||||
|
return APP_TYPE |
||||||
|
if ptype == 'data': |
||||||
|
return DATA_TYPE |
||||||
|
raise InputError('Invalid partition type') |
||||||
|
|
||||||
|
|
||||||
|
def add_extra_subtypes(csv): |
||||||
|
for line_no in csv: |
||||||
|
try: |
||||||
|
fields = [line.strip() for line in line_no.split(',')] |
||||||
|
for subtype, subtype_values in SUBTYPES.items(): |
||||||
|
if (int(fields[2], 16) in subtype_values.values() and subtype == get_partition_type(fields[0])): |
||||||
|
raise ValueError('Found duplicate value in partition subtype') |
||||||
|
SUBTYPES[TYPES[fields[0]]][fields[1]] = int(fields[2], 16) |
||||||
|
except InputError as err: |
||||||
|
raise InputError('Error parsing custom subtypes: %s' % err) |
||||||
|
|
||||||
|
|
||||||
|
quiet = False |
||||||
|
md5sum = True |
||||||
|
secure = False |
||||||
|
offset_part_table = 0 |
||||||
|
|
||||||
|
|
||||||
|
def status(msg): |
||||||
|
""" Print status message to stderr """ |
||||||
|
if not quiet: |
||||||
|
critical(msg) |
||||||
|
|
||||||
|
|
||||||
|
def critical(msg): |
||||||
|
""" Print critical message to stderr """ |
||||||
|
sys.stderr.write(msg) |
||||||
|
sys.stderr.write('\n') |
||||||
|
|
||||||
|
|
||||||
|
class PartitionTable(list): |
||||||
|
def __init__(self): |
||||||
|
super(PartitionTable, self).__init__(self) |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def from_file(cls, f): |
||||||
|
data = f.read() |
||||||
|
data_is_binary = data[0:2] == PartitionDefinition.MAGIC_BYTES |
||||||
|
if data_is_binary: |
||||||
|
status('Parsing binary partition input...') |
||||||
|
return cls.from_binary(data), True |
||||||
|
|
||||||
|
data = data.decode() |
||||||
|
status('Parsing CSV input...') |
||||||
|
return cls.from_csv(data), False |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def from_csv(cls, csv_contents): |
||||||
|
res = PartitionTable() |
||||||
|
lines = csv_contents.splitlines() |
||||||
|
|
||||||
|
def expand_vars(f): |
||||||
|
f = os.path.expandvars(f) |
||||||
|
m = re.match(r'(?<!\\)\$([A-Za-z_][A-Za-z0-9_]*)', f) |
||||||
|
if m: |
||||||
|
raise InputError("unknown variable '%s'" % m.group(1)) |
||||||
|
return f |
||||||
|
|
||||||
|
for line_no in range(len(lines)): |
||||||
|
line = expand_vars(lines[line_no]).strip() |
||||||
|
if line.startswith('#') or len(line) == 0: |
||||||
|
continue |
||||||
|
try: |
||||||
|
res.append(PartitionDefinition.from_csv(line, line_no + 1)) |
||||||
|
except InputError as err: |
||||||
|
raise InputError('Error at line %d: %s\nPlease check extra_partition_subtypes.inc file in build/config directory' % (line_no + 1, err)) |
||||||
|
except Exception: |
||||||
|
critical('Unexpected error parsing CSV line %d: %s' % (line_no + 1, line)) |
||||||
|
raise |
||||||
|
|
||||||
|
# fix up missing offsets & negative sizes |
||||||
|
last_end = offset_part_table + PARTITION_TABLE_SIZE # first offset after partition table |
||||||
|
for e in res: |
||||||
|
if e.offset is not None and e.offset < last_end: |
||||||
|
if e == res[0]: |
||||||
|
raise InputError('CSV Error at line %d: Partitions overlap. Partition sets offset 0x%x. ' |
||||||
|
'But partition table occupies the whole sector 0x%x. ' |
||||||
|
'Use a free offset 0x%x or higher.' |
||||||
|
% (e.line_no, e.offset, offset_part_table, last_end)) |
||||||
|
else: |
||||||
|
raise InputError('CSV Error at line %d: Partitions overlap. Partition sets offset 0x%x. Previous partition ends 0x%x' |
||||||
|
% (e.line_no, e.offset, last_end)) |
||||||
|
if e.offset is None: |
||||||
|
pad_to = get_alignment_for_type(e.type) |
||||||
|
if last_end % pad_to != 0: |
||||||
|
last_end += pad_to - (last_end % pad_to) |
||||||
|
e.offset = last_end |
||||||
|
if e.size < 0: |
||||||
|
e.size = -e.size - e.offset |
||||||
|
last_end = e.offset + e.size |
||||||
|
|
||||||
|
return res |
||||||
|
|
||||||
|
def __getitem__(self, item): |
||||||
|
""" Allow partition table access via name as well as by |
||||||
|
numeric index. """ |
||||||
|
if isinstance(item, str): |
||||||
|
for x in self: |
||||||
|
if x.name == item: |
||||||
|
return x |
||||||
|
raise ValueError("No partition entry named '%s'" % item) |
||||||
|
else: |
||||||
|
return super(PartitionTable, self).__getitem__(item) |
||||||
|
|
||||||
|
def find_by_type(self, ptype, subtype): |
||||||
|
""" Return a partition by type & subtype, returns |
||||||
|
None if not found """ |
||||||
|
# convert ptype & subtypes names (if supplied this way) to integer values |
||||||
|
ptype = get_ptype_as_int(ptype) |
||||||
|
subtype = get_subtype_as_int(ptype, subtype) |
||||||
|
|
||||||
|
for p in self: |
||||||
|
if p.type == ptype and p.subtype == subtype: |
||||||
|
yield p |
||||||
|
return |
||||||
|
|
||||||
|
def find_by_name(self, name): |
||||||
|
for p in self: |
||||||
|
if p.name == name: |
||||||
|
return p |
||||||
|
return None |
||||||
|
|
||||||
|
def verify(self): |
||||||
|
# verify each partition individually |
||||||
|
for p in self: |
||||||
|
p.verify() |
||||||
|
|
||||||
|
# check on duplicate name |
||||||
|
names = [p.name for p in self] |
||||||
|
duplicates = set(n for n in names if names.count(n) > 1) |
||||||
|
|
||||||
|
# print sorted duplicate partitions by name |
||||||
|
if len(duplicates) != 0: |
||||||
|
critical('A list of partitions that have the same name:') |
||||||
|
for p in sorted(self, key=lambda x:x.name): |
||||||
|
if len(duplicates.intersection([p.name])) != 0: |
||||||
|
critical('%s' % (p.to_csv())) |
||||||
|
raise InputError('Partition names must be unique') |
||||||
|
|
||||||
|
# check for overlaps |
||||||
|
last = None |
||||||
|
for p in sorted(self, key=lambda x:x.offset): |
||||||
|
if p.offset < offset_part_table + PARTITION_TABLE_SIZE: |
||||||
|
raise InputError('Partition offset 0x%x is below 0x%x' % (p.offset, offset_part_table + PARTITION_TABLE_SIZE)) |
||||||
|
if last is not None and p.offset < last.offset + last.size: |
||||||
|
raise InputError('Partition at 0x%x overlaps 0x%x-0x%x' % (p.offset, last.offset, last.offset + last.size - 1)) |
||||||
|
last = p |
||||||
|
|
||||||
|
# check that otadata should be unique |
||||||
|
otadata_duplicates = [p for p in self if p.type == TYPES['data'] and p.subtype == SUBTYPES[DATA_TYPE]['ota']] |
||||||
|
if len(otadata_duplicates) > 1: |
||||||
|
for p in otadata_duplicates: |
||||||
|
critical('%s' % (p.to_csv())) |
||||||
|
raise InputError('Found multiple otadata partitions. Only one partition can be defined with type="data"(1) and subtype="ota"(0).') |
||||||
|
|
||||||
|
if len(otadata_duplicates) == 1 and otadata_duplicates[0].size != 0x2000: |
||||||
|
p = otadata_duplicates[0] |
||||||
|
critical('%s' % (p.to_csv())) |
||||||
|
raise InputError('otadata partition must have size = 0x2000') |
||||||
|
|
||||||
|
def flash_size(self): |
||||||
|
""" Return the size that partitions will occupy in flash |
||||||
|
(ie the offset the last partition ends at) |
||||||
|
""" |
||||||
|
try: |
||||||
|
last = sorted(self, reverse=True)[0] |
||||||
|
except IndexError: |
||||||
|
return 0 # empty table! |
||||||
|
return last.offset + last.size |
||||||
|
|
||||||
|
def verify_size_fits(self, flash_size_bytes: int) -> None: |
||||||
|
""" Check that partition table fits into the given flash size. |
||||||
|
Raises InputError otherwise. |
||||||
|
""" |
||||||
|
table_size = self.flash_size() |
||||||
|
if flash_size_bytes < table_size: |
||||||
|
mb = 1024 * 1024 |
||||||
|
raise InputError('Partitions tables occupies %.1fMB of flash (%d bytes) which does not fit in configured ' |
||||||
|
"flash size %dMB. Change the flash size in menuconfig under the 'Serial Flasher Config' menu." % |
||||||
|
(table_size / mb, table_size, flash_size_bytes / mb)) |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def from_binary(cls, b): |
||||||
|
md5 = hashlib.md5() |
||||||
|
result = cls() |
||||||
|
for o in range(0,len(b),32): |
||||||
|
data = b[o:o + 32] |
||||||
|
if len(data) != 32: |
||||||
|
raise InputError('Partition table length must be a multiple of 32 bytes') |
||||||
|
if data == b'\xFF' * 32: |
||||||
|
return result # got end marker |
||||||
|
if md5sum and data[:2] == MD5_PARTITION_BEGIN[:2]: # check only the magic number part |
||||||
|
if data[16:] == md5.digest(): |
||||||
|
continue # the next iteration will check for the end marker |
||||||
|
else: |
||||||
|
raise InputError("MD5 checksums don't match! (computed: 0x%s, parsed: 0x%s)" % (md5.hexdigest(), binascii.hexlify(data[16:]))) |
||||||
|
else: |
||||||
|
md5.update(data) |
||||||
|
result.append(PartitionDefinition.from_binary(data)) |
||||||
|
raise InputError('Partition table is missing an end-of-table marker') |
||||||
|
|
||||||
|
def to_binary(self): |
||||||
|
result = b''.join(e.to_binary() for e in self) |
||||||
|
if md5sum: |
||||||
|
result += MD5_PARTITION_BEGIN + hashlib.md5(result).digest() |
||||||
|
if len(result) >= MAX_PARTITION_LENGTH: |
||||||
|
raise InputError('Binary partition table length (%d) longer than max' % len(result)) |
||||||
|
result += b'\xFF' * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing |
||||||
|
return result |
||||||
|
|
||||||
|
def to_csv(self, simple_formatting=False): |
||||||
|
rows = ['# ESP-IDF Partition Table', |
||||||
|
'# Name, Type, SubType, Offset, Size, Flags'] |
||||||
|
rows += [x.to_csv(simple_formatting) for x in self] |
||||||
|
return '\n'.join(rows) + '\n' |
||||||
|
|
||||||
|
|
||||||
|
class PartitionDefinition(object): |
||||||
|
MAGIC_BYTES = b'\xAA\x50' |
||||||
|
|
||||||
|
# dictionary maps flag name (as used in CSV flags list, property name) |
||||||
|
# to bit set in flags words in binary format |
||||||
|
FLAGS = { |
||||||
|
'encrypted': 0 |
||||||
|
} |
||||||
|
|
||||||
|
# add subtypes for the 16 OTA slot values ("ota_XX, etc.") |
||||||
|
for ota_slot in range(NUM_PARTITION_SUBTYPE_APP_OTA): |
||||||
|
SUBTYPES[TYPES['app']]['ota_%d' % ota_slot] = MIN_PARTITION_SUBTYPE_APP_OTA + ota_slot |
||||||
|
|
||||||
|
def __init__(self): |
||||||
|
self.name = '' |
||||||
|
self.type = None |
||||||
|
self.subtype = None |
||||||
|
self.offset = None |
||||||
|
self.size = None |
||||||
|
self.encrypted = False |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def from_csv(cls, line, line_no): |
||||||
|
""" Parse a line from the CSV """ |
||||||
|
line_w_defaults = line + ',,,,' # lazy way to support default fields |
||||||
|
fields = [f.strip() for f in line_w_defaults.split(',')] |
||||||
|
|
||||||
|
res = PartitionDefinition() |
||||||
|
res.line_no = line_no |
||||||
|
res.name = fields[0] |
||||||
|
res.type = res.parse_type(fields[1]) |
||||||
|
res.subtype = res.parse_subtype(fields[2]) |
||||||
|
res.offset = res.parse_address(fields[3]) |
||||||
|
res.size = res.parse_address(fields[4]) |
||||||
|
if res.size is None: |
||||||
|
raise InputError("Size field can't be empty") |
||||||
|
|
||||||
|
flags = fields[5].split(':') |
||||||
|
for flag in flags: |
||||||
|
if flag in cls.FLAGS: |
||||||
|
setattr(res, flag, True) |
||||||
|
elif len(flag) > 0: |
||||||
|
raise InputError("CSV flag column contains unknown flag '%s'" % (flag)) |
||||||
|
|
||||||
|
return res |
||||||
|
|
||||||
|
def __eq__(self, other): |
||||||
|
return self.name == other.name and self.type == other.type \ |
||||||
|
and self.subtype == other.subtype and self.offset == other.offset \ |
||||||
|
and self.size == other.size |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
def maybe_hex(x): |
||||||
|
return '0x%x' % x if x is not None else 'None' |
||||||
|
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type, self.subtype or 0, |
||||||
|
maybe_hex(self.offset), maybe_hex(self.size)) |
||||||
|
|
||||||
|
def __str__(self): |
||||||
|
return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type, self.subtype, self.offset or -1, self.size or -1) |
||||||
|
|
||||||
|
def __cmp__(self, other): |
||||||
|
return self.offset - other.offset |
||||||
|
|
||||||
|
def __lt__(self, other): |
||||||
|
return self.offset < other.offset |
||||||
|
|
||||||
|
def __gt__(self, other): |
||||||
|
return self.offset > other.offset |
||||||
|
|
||||||
|
def __le__(self, other): |
||||||
|
return self.offset <= other.offset |
||||||
|
|
||||||
|
def __ge__(self, other): |
||||||
|
return self.offset >= other.offset |
||||||
|
|
||||||
|
def parse_type(self, strval): |
||||||
|
if strval == '': |
||||||
|
raise InputError("Field 'type' can't be left empty.") |
||||||
|
return parse_int(strval, TYPES) |
||||||
|
|
||||||
|
def parse_subtype(self, strval): |
||||||
|
if strval == '': |
||||||
|
if self.type == TYPES['app']: |
||||||
|
raise InputError('App partition cannot have an empty subtype') |
||||||
|
return SUBTYPES[DATA_TYPE]['undefined'] |
||||||
|
return parse_int(strval, SUBTYPES.get(self.type, {})) |
||||||
|
|
||||||
|
def parse_address(self, strval): |
||||||
|
if strval == '': |
||||||
|
return None # PartitionTable will fill in default |
||||||
|
return parse_int(strval) |
||||||
|
|
||||||
|
def verify(self): |
||||||
|
if self.type is None: |
||||||
|
raise ValidationError(self, 'Type field is not set') |
||||||
|
if self.subtype is None: |
||||||
|
raise ValidationError(self, 'Subtype field is not set') |
||||||
|
if self.offset is None: |
||||||
|
raise ValidationError(self, 'Offset field is not set') |
||||||
|
align = get_alignment_for_type(self.type) |
||||||
|
if self.offset % align: |
||||||
|
raise ValidationError(self, 'Offset 0x%x is not aligned to 0x%x' % (self.offset, align)) |
||||||
|
if self.size % align and secure and self.type == APP_TYPE: |
||||||
|
raise ValidationError(self, 'Size 0x%x is not aligned to 0x%x' % (self.size, align)) |
||||||
|
if self.size is None: |
||||||
|
raise ValidationError(self, 'Size field is not set') |
||||||
|
|
||||||
|
if self.name in TYPES and TYPES.get(self.name, '') != self.type: |
||||||
|
critical("WARNING: Partition has name '%s' which is a partition type, but does not match this partition's " |
||||||
|
'type (0x%x). Mistake in partition table?' % (self.name, self.type)) |
||||||
|
all_subtype_names = [] |
||||||
|
for names in (t.keys() for t in SUBTYPES.values()): |
||||||
|
all_subtype_names += names |
||||||
|
if self.name in all_subtype_names and SUBTYPES.get(self.type, {}).get(self.name, '') != self.subtype: |
||||||
|
critical("WARNING: Partition has name '%s' which is a partition subtype, but this partition has " |
||||||
|
'non-matching type 0x%x and subtype 0x%x. Mistake in partition table?' % (self.name, self.type, self.subtype)) |
||||||
|
|
||||||
|
STRUCT_FORMAT = b'<2sBBLL16sL' |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def from_binary(cls, b): |
||||||
|
if len(b) != 32: |
||||||
|
raise InputError('Partition definition length must be exactly 32 bytes. Got %d bytes.' % len(b)) |
||||||
|
res = cls() |
||||||
|
(magic, res.type, res.subtype, res.offset, |
||||||
|
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b) |
||||||
|
if b'\x00' in res.name: # strip null byte padding from name string |
||||||
|
res.name = res.name[:res.name.index(b'\x00')] |
||||||
|
res.name = res.name.decode() |
||||||
|
if magic != cls.MAGIC_BYTES: |
||||||
|
raise InputError('Invalid magic bytes (%r) for partition definition' % magic) |
||||||
|
for flag,bit in cls.FLAGS.items(): |
||||||
|
if flags & (1 << bit): |
||||||
|
setattr(res, flag, True) |
||||||
|
flags &= ~(1 << bit) |
||||||
|
if flags != 0: |
||||||
|
critical('WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?' % flags) |
||||||
|
return res |
||||||
|
|
||||||
|
def get_flags_list(self): |
||||||
|
return [flag for flag in self.FLAGS.keys() if getattr(self, flag)] |
||||||
|
|
||||||
|
def to_binary(self): |
||||||
|
flags = sum((1 << self.FLAGS[flag]) for flag in self.get_flags_list()) |
||||||
|
return struct.pack(self.STRUCT_FORMAT, |
||||||
|
self.MAGIC_BYTES, |
||||||
|
self.type, self.subtype, |
||||||
|
self.offset, self.size, |
||||||
|
self.name.encode(), |
||||||
|
flags) |
||||||
|
|
||||||
|
def to_csv(self, simple_formatting=False): |
||||||
|
def addr_format(a, include_sizes): |
||||||
|
if not simple_formatting and include_sizes: |
||||||
|
for (val, suffix) in [(0x100000, 'M'), (0x400, 'K')]: |
||||||
|
if a % val == 0: |
||||||
|
return '%d%s' % (a // val, suffix) |
||||||
|
return '0x%x' % a |
||||||
|
|
||||||
|
def lookup_keyword(t, keywords): |
||||||
|
for k,v in keywords.items(): |
||||||
|
if simple_formatting is False and t == v: |
||||||
|
return k |
||||||
|
return '%d' % t |
||||||
|
|
||||||
|
def generate_text_flags(): |
||||||
|
""" colon-delimited list of flags """ |
||||||
|
return ':'.join(self.get_flags_list()) |
||||||
|
|
||||||
|
return ','.join([self.name, |
||||||
|
lookup_keyword(self.type, TYPES), |
||||||
|
lookup_keyword(self.subtype, SUBTYPES.get(self.type, {})), |
||||||
|
addr_format(self.offset, False), |
||||||
|
addr_format(self.size, True), |
||||||
|
generate_text_flags()]) |
||||||
|
|
||||||
|
|
||||||
|
def parse_int(v, keywords={}): |
||||||
|
"""Generic parser for integer fields - int(x,0) with provision for |
||||||
|
k/m/K/M suffixes and 'keyword' value lookup. |
||||||
|
""" |
||||||
|
try: |
||||||
|
for letter, multiplier in [('k', 1024), ('m', 1024 * 1024)]: |
||||||
|
if v.lower().endswith(letter): |
||||||
|
return parse_int(v[:-1], keywords) * multiplier |
||||||
|
return int(v, 0) |
||||||
|
except ValueError: |
||||||
|
if len(keywords) == 0: |
||||||
|
raise InputError('Invalid field value %s' % v) |
||||||
|
try: |
||||||
|
return keywords[v.lower()] |
||||||
|
except KeyError: |
||||||
|
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ', '.join(keywords))) |
||||||
|
|
||||||
|
|
||||||
|
def main(): |
||||||
|
global quiet |
||||||
|
global md5sum |
||||||
|
global offset_part_table |
||||||
|
global secure |
||||||
|
parser = argparse.ArgumentParser(description='ESP32 partition table utility') |
||||||
|
|
||||||
|
parser.add_argument('--flash-size', help='Optional flash size limit, checks partition table fits in flash', |
||||||
|
nargs='?', choices=['1MB', '2MB', '4MB', '8MB', '16MB', '32MB', '64MB', '128MB']) |
||||||
|
parser.add_argument('--disable-md5sum', help='Disable md5 checksum for the partition table', default=False, action='store_true') |
||||||
|
parser.add_argument('--no-verify', help="Don't verify partition table fields", action='store_true') |
||||||
|
parser.add_argument('--verify', '-v', help='Verify partition table fields (deprecated, this behaviour is ' |
||||||
|
'enabled by default and this flag does nothing.', action='store_true') |
||||||
|
parser.add_argument('--quiet', '-q', help="Don't print non-critical status messages to stderr", action='store_true') |
||||||
|
parser.add_argument('--offset', '-o', help='Set offset partition table', default='0x8000') |
||||||
|
parser.add_argument('--secure', help='Require app partitions to be suitable for secure boot', action='store_true') |
||||||
|
parser.add_argument('--extra-partition-subtypes', help='Extra partition subtype entries', nargs='*') |
||||||
|
parser.add_argument('input', help='Path to CSV or binary file to parse.', type=argparse.FileType('rb')) |
||||||
|
parser.add_argument('output', help='Path to output converted binary or CSV file. Will use stdout if omitted.', |
||||||
|
nargs='?', default='-') |
||||||
|
|
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
quiet = args.quiet |
||||||
|
md5sum = not args.disable_md5sum |
||||||
|
secure = args.secure |
||||||
|
offset_part_table = int(args.offset, 0) |
||||||
|
if args.extra_partition_subtypes: |
||||||
|
add_extra_subtypes(args.extra_partition_subtypes) |
||||||
|
|
||||||
|
table, input_is_binary = PartitionTable.from_file(args.input) |
||||||
|
|
||||||
|
if not args.no_verify: |
||||||
|
status('Verifying table...') |
||||||
|
table.verify() |
||||||
|
|
||||||
|
if args.flash_size: |
||||||
|
size_mb = int(args.flash_size.replace('MB', '')) |
||||||
|
table.verify_size_fits(size_mb * 1024 * 1024) |
||||||
|
|
||||||
|
# Make sure that the output directory is created |
||||||
|
output_dir = os.path.abspath(os.path.dirname(args.output)) |
||||||
|
|
||||||
|
if not os.path.exists(output_dir): |
||||||
|
try: |
||||||
|
os.makedirs(output_dir) |
||||||
|
except OSError as exc: |
||||||
|
if exc.errno != errno.EEXIST: |
||||||
|
raise |
||||||
|
|
||||||
|
if input_is_binary: |
||||||
|
output = table.to_csv() |
||||||
|
with sys.stdout if args.output == '-' else open(args.output, 'w') as f: |
||||||
|
f.write(output) |
||||||
|
else: |
||||||
|
output = table.to_binary() |
||||||
|
try: |
||||||
|
stdout_binary = sys.stdout.buffer # Python 3 |
||||||
|
except AttributeError: |
||||||
|
stdout_binary = sys.stdout |
||||||
|
with stdout_binary if args.output == '-' else open(args.output, 'wb') as f: |
||||||
|
f.write(output) |
||||||
|
|
||||||
|
|
||||||
|
class InputError(RuntimeError): |
||||||
|
def __init__(self, e): |
||||||
|
super(InputError, self).__init__(e) |
||||||
|
|
||||||
|
|
||||||
|
class ValidationError(InputError): |
||||||
|
def __init__(self, partition, message): |
||||||
|
super(ValidationError, self).__init__( |
||||||
|
'Partition %s invalid: %s' % (partition.name, message)) |
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__': |
||||||
|
try: |
||||||
|
main() |
||||||
|
except InputError as e: |
||||||
|
print(e, file=sys.stderr) |
||||||
|
sys.exit(2) |
@ -0,0 +1,593 @@ |
|||||||
|
#!/usr/bin/env python |
||||||
|
# |
||||||
|
# spiffsgen is a tool used to generate a spiffs image from a directory |
||||||
|
# |
||||||
|
# SPDX-FileCopyrightText: 2019-2022 Espressif Systems (Shanghai) CO LTD |
||||||
|
# SPDX-License-Identifier: Apache-2.0 |
||||||
|
|
||||||
|
from __future__ import division, print_function |
||||||
|
|
||||||
|
import argparse |
||||||
|
import io |
||||||
|
import math |
||||||
|
import os |
||||||
|
import struct |
||||||
|
|
||||||
|
try: |
||||||
|
import typing |
||||||
|
|
||||||
|
TSP = typing.TypeVar('TSP', bound='SpiffsObjPageWithIdx') |
||||||
|
ObjIdsItem = typing.Tuple[int, typing.Type[TSP]] |
||||||
|
except ImportError: |
||||||
|
pass |
||||||
|
|
||||||
|
|
||||||
|
SPIFFS_PH_FLAG_USED_FINAL_INDEX = 0xF8 |
||||||
|
SPIFFS_PH_FLAG_USED_FINAL = 0xFC |
||||||
|
|
||||||
|
SPIFFS_PH_FLAG_LEN = 1 |
||||||
|
SPIFFS_PH_IX_SIZE_LEN = 4 |
||||||
|
SPIFFS_PH_IX_OBJ_TYPE_LEN = 1 |
||||||
|
SPIFFS_TYPE_FILE = 1 |
||||||
|
|
||||||
|
# Based on typedefs under spiffs_config.h |
||||||
|
SPIFFS_OBJ_ID_LEN = 2 # spiffs_obj_id |
||||||
|
SPIFFS_SPAN_IX_LEN = 2 # spiffs_span_ix |
||||||
|
SPIFFS_PAGE_IX_LEN = 2 # spiffs_page_ix |
||||||
|
SPIFFS_BLOCK_IX_LEN = 2 # spiffs_block_ix |
||||||
|
|
||||||
|
|
||||||
|
class SpiffsBuildConfig(object): |
||||||
|
def __init__(self, |
||||||
|
page_size, # type: int |
||||||
|
page_ix_len, # type: int |
||||||
|
block_size, # type: int |
||||||
|
block_ix_len, # type: int |
||||||
|
meta_len, # type: int |
||||||
|
obj_name_len, # type: int |
||||||
|
obj_id_len, # type: int |
||||||
|
span_ix_len, # type: int |
||||||
|
packed, # type: bool |
||||||
|
aligned, # type: bool |
||||||
|
endianness, # type: str |
||||||
|
use_magic, # type: bool |
||||||
|
use_magic_len, # type: bool |
||||||
|
aligned_obj_ix_tables # type: bool |
||||||
|
): |
||||||
|
if block_size % page_size != 0: |
||||||
|
raise RuntimeError('block size should be a multiple of page size') |
||||||
|
|
||||||
|
self.page_size = page_size |
||||||
|
self.block_size = block_size |
||||||
|
self.obj_id_len = obj_id_len |
||||||
|
self.span_ix_len = span_ix_len |
||||||
|
self.packed = packed |
||||||
|
self.aligned = aligned |
||||||
|
self.obj_name_len = obj_name_len |
||||||
|
self.meta_len = meta_len |
||||||
|
self.page_ix_len = page_ix_len |
||||||
|
self.block_ix_len = block_ix_len |
||||||
|
self.endianness = endianness |
||||||
|
self.use_magic = use_magic |
||||||
|
self.use_magic_len = use_magic_len |
||||||
|
self.aligned_obj_ix_tables = aligned_obj_ix_tables |
||||||
|
|
||||||
|
self.PAGES_PER_BLOCK = self.block_size // self.page_size |
||||||
|
self.OBJ_LU_PAGES_PER_BLOCK = int(math.ceil(self.block_size / self.page_size * self.obj_id_len / self.page_size)) |
||||||
|
self.OBJ_USABLE_PAGES_PER_BLOCK = self.PAGES_PER_BLOCK - self.OBJ_LU_PAGES_PER_BLOCK |
||||||
|
|
||||||
|
self.OBJ_LU_PAGES_OBJ_IDS_LIM = self.page_size // self.obj_id_len |
||||||
|
|
||||||
|
self.OBJ_DATA_PAGE_HEADER_LEN = self.obj_id_len + self.span_ix_len + SPIFFS_PH_FLAG_LEN |
||||||
|
|
||||||
|
pad = 4 - (4 if self.OBJ_DATA_PAGE_HEADER_LEN % 4 == 0 else self.OBJ_DATA_PAGE_HEADER_LEN % 4) |
||||||
|
|
||||||
|
self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED = self.OBJ_DATA_PAGE_HEADER_LEN + pad |
||||||
|
self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED_PAD = pad |
||||||
|
self.OBJ_DATA_PAGE_CONTENT_LEN = self.page_size - self.OBJ_DATA_PAGE_HEADER_LEN |
||||||
|
|
||||||
|
self.OBJ_INDEX_PAGES_HEADER_LEN = (self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED + SPIFFS_PH_IX_SIZE_LEN + |
||||||
|
SPIFFS_PH_IX_OBJ_TYPE_LEN + self.obj_name_len + self.meta_len) |
||||||
|
if aligned_obj_ix_tables: |
||||||
|
self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED = (self.OBJ_INDEX_PAGES_HEADER_LEN + SPIFFS_PAGE_IX_LEN - 1) & ~(SPIFFS_PAGE_IX_LEN - 1) |
||||||
|
self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED_PAD = self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED - self.OBJ_INDEX_PAGES_HEADER_LEN |
||||||
|
else: |
||||||
|
self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED = self.OBJ_INDEX_PAGES_HEADER_LEN |
||||||
|
self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED_PAD = 0 |
||||||
|
|
||||||
|
self.OBJ_INDEX_PAGES_OBJ_IDS_HEAD_LIM = (self.page_size - self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED) // self.block_ix_len |
||||||
|
self.OBJ_INDEX_PAGES_OBJ_IDS_LIM = (self.page_size - self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED) // self.block_ix_len |
||||||
|
|
||||||
|
|
||||||
|
class SpiffsFullError(RuntimeError): |
||||||
|
pass |
||||||
|
|
||||||
|
|
||||||
|
class SpiffsPage(object): |
||||||
|
_endianness_dict = { |
||||||
|
'little': '<', |
||||||
|
'big': '>' |
||||||
|
} |
||||||
|
|
||||||
|
_len_dict = { |
||||||
|
1: 'B', |
||||||
|
2: 'H', |
||||||
|
4: 'I', |
||||||
|
8: 'Q' |
||||||
|
} |
||||||
|
|
||||||
|
def __init__(self, bix, build_config): # type: (int, SpiffsBuildConfig) -> None |
||||||
|
self.build_config = build_config |
||||||
|
self.bix = bix |
||||||
|
|
||||||
|
def to_binary(self): # type: () -> bytes |
||||||
|
raise NotImplementedError() |
||||||
|
|
||||||
|
|
||||||
|
class SpiffsObjPageWithIdx(SpiffsPage): |
||||||
|
def __init__(self, obj_id, build_config): # type: (int, SpiffsBuildConfig) -> None |
||||||
|
super(SpiffsObjPageWithIdx, self).__init__(0, build_config) |
||||||
|
self.obj_id = obj_id |
||||||
|
|
||||||
|
def to_binary(self): # type: () -> bytes |
||||||
|
raise NotImplementedError() |
||||||
|
|
||||||
|
|
||||||
|
class SpiffsObjLuPage(SpiffsPage): |
||||||
|
def __init__(self, bix, build_config): # type: (int, SpiffsBuildConfig) -> None |
||||||
|
SpiffsPage.__init__(self, bix, build_config) |
||||||
|
|
||||||
|
self.obj_ids_limit = self.build_config.OBJ_LU_PAGES_OBJ_IDS_LIM |
||||||
|
self.obj_ids = list() # type: typing.List[ObjIdsItem] |
||||||
|
|
||||||
|
def _calc_magic(self, blocks_lim): # type: (int) -> int |
||||||
|
# Calculate the magic value mirroring computation done by the macro SPIFFS_MAGIC defined in |
||||||
|
# spiffs_nucleus.h |
||||||
|
magic = 0x20140529 ^ self.build_config.page_size |
||||||
|
if self.build_config.use_magic_len: |
||||||
|
magic = magic ^ (blocks_lim - self.bix) |
||||||
|
# narrow the result to build_config.obj_id_len bytes |
||||||
|
mask = (2 << (8 * self.build_config.obj_id_len)) - 1 |
||||||
|
return magic & mask |
||||||
|
|
||||||
|
def register_page(self, page): # type: (TSP) -> None |
||||||
|
if not self.obj_ids_limit > 0: |
||||||
|
raise SpiffsFullError() |
||||||
|
|
||||||
|
obj_id = (page.obj_id, page.__class__) |
||||||
|
self.obj_ids.append(obj_id) |
||||||
|
self.obj_ids_limit -= 1 |
||||||
|
|
||||||
|
def to_binary(self): # type: () -> bytes |
||||||
|
img = b'' |
||||||
|
|
||||||
|
for (obj_id, page_type) in self.obj_ids: |
||||||
|
if page_type == SpiffsObjIndexPage: |
||||||
|
obj_id ^= (1 << ((self.build_config.obj_id_len * 8) - 1)) |
||||||
|
img += struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] + |
||||||
|
SpiffsPage._len_dict[self.build_config.obj_id_len], obj_id) |
||||||
|
|
||||||
|
assert len(img) <= self.build_config.page_size |
||||||
|
|
||||||
|
img += b'\xFF' * (self.build_config.page_size - len(img)) |
||||||
|
|
||||||
|
return img |
||||||
|
|
||||||
|
def magicfy(self, blocks_lim): # type: (int) -> None |
||||||
|
# Only use magic value if no valid obj id has been written to the spot, which is the |
||||||
|
# spot taken up by the last obj id on last lookup page. The parent is responsible |
||||||
|
# for determining which is the last lookup page and calling this function. |
||||||
|
remaining = self.obj_ids_limit |
||||||
|
empty_obj_id_dict = { |
||||||
|
1: 0xFF, |
||||||
|
2: 0xFFFF, |
||||||
|
4: 0xFFFFFFFF, |
||||||
|
8: 0xFFFFFFFFFFFFFFFF |
||||||
|
} |
||||||
|
if remaining >= 2: |
||||||
|
for i in range(remaining): |
||||||
|
if i == remaining - 2: |
||||||
|
self.obj_ids.append((self._calc_magic(blocks_lim), SpiffsObjDataPage)) |
||||||
|
break |
||||||
|
else: |
||||||
|
self.obj_ids.append((empty_obj_id_dict[self.build_config.obj_id_len], SpiffsObjDataPage)) |
||||||
|
self.obj_ids_limit -= 1 |
||||||
|
|
||||||
|
|
||||||
|
class SpiffsObjIndexPage(SpiffsObjPageWithIdx): |
||||||
|
def __init__(self, obj_id, span_ix, size, name, build_config |
||||||
|
): # type: (int, int, int, str, SpiffsBuildConfig) -> None |
||||||
|
super(SpiffsObjIndexPage, self).__init__(obj_id, build_config) |
||||||
|
self.span_ix = span_ix |
||||||
|
self.name = name |
||||||
|
self.size = size |
||||||
|
|
||||||
|
if self.span_ix == 0: |
||||||
|
self.pages_lim = self.build_config.OBJ_INDEX_PAGES_OBJ_IDS_HEAD_LIM |
||||||
|
else: |
||||||
|
self.pages_lim = self.build_config.OBJ_INDEX_PAGES_OBJ_IDS_LIM |
||||||
|
|
||||||
|
self.pages = list() # type: typing.List[int] |
||||||
|
|
||||||
|
def register_page(self, page): # type: (SpiffsObjDataPage) -> None |
||||||
|
if not self.pages_lim > 0: |
||||||
|
raise SpiffsFullError |
||||||
|
|
||||||
|
self.pages.append(page.offset) |
||||||
|
self.pages_lim -= 1 |
||||||
|
|
||||||
|
def to_binary(self): # type: () -> bytes |
||||||
|
obj_id = self.obj_id ^ (1 << ((self.build_config.obj_id_len * 8) - 1)) |
||||||
|
img = struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] + |
||||||
|
SpiffsPage._len_dict[self.build_config.obj_id_len] + |
||||||
|
SpiffsPage._len_dict[self.build_config.span_ix_len] + |
||||||
|
SpiffsPage._len_dict[SPIFFS_PH_FLAG_LEN], |
||||||
|
obj_id, |
||||||
|
self.span_ix, |
||||||
|
SPIFFS_PH_FLAG_USED_FINAL_INDEX) |
||||||
|
|
||||||
|
# Add padding before the object index page specific information |
||||||
|
img += b'\xFF' * self.build_config.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED_PAD |
||||||
|
|
||||||
|
# If this is the first object index page for the object, add filname, type |
||||||
|
# and size information |
||||||
|
if self.span_ix == 0: |
||||||
|
img += struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] + |
||||||
|
SpiffsPage._len_dict[SPIFFS_PH_IX_SIZE_LEN] + |
||||||
|
SpiffsPage._len_dict[SPIFFS_PH_FLAG_LEN], |
||||||
|
self.size, |
||||||
|
SPIFFS_TYPE_FILE) |
||||||
|
|
||||||
|
img += self.name.encode() + (b'\x00' * ( |
||||||
|
(self.build_config.obj_name_len - len(self.name)) |
||||||
|
+ self.build_config.meta_len |
||||||
|
+ self.build_config.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED_PAD)) |
||||||
|
|
||||||
|
# Finally, add the page index of daa pages |
||||||
|
for page in self.pages: |
||||||
|
page = page >> int(math.log(self.build_config.page_size, 2)) |
||||||
|
img += struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] + |
||||||
|
SpiffsPage._len_dict[self.build_config.page_ix_len], page) |
||||||
|
|
||||||
|
assert len(img) <= self.build_config.page_size |
||||||
|
|
||||||
|
img += b'\xFF' * (self.build_config.page_size - len(img)) |
||||||
|
|
||||||
|
return img |
||||||
|
|
||||||
|
|
||||||
|
class SpiffsObjDataPage(SpiffsObjPageWithIdx): |
||||||
|
def __init__(self, offset, obj_id, span_ix, contents, build_config |
||||||
|
): # type: (int, int, int, bytes, SpiffsBuildConfig) -> None |
||||||
|
super(SpiffsObjDataPage, self).__init__(obj_id, build_config) |
||||||
|
self.span_ix = span_ix |
||||||
|
self.contents = contents |
||||||
|
self.offset = offset |
||||||
|
|
||||||
|
def to_binary(self): # type: () -> bytes |
||||||
|
img = struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] + |
||||||
|
SpiffsPage._len_dict[self.build_config.obj_id_len] + |
||||||
|
SpiffsPage._len_dict[self.build_config.span_ix_len] + |
||||||
|
SpiffsPage._len_dict[SPIFFS_PH_FLAG_LEN], |
||||||
|
self.obj_id, |
||||||
|
self.span_ix, |
||||||
|
SPIFFS_PH_FLAG_USED_FINAL) |
||||||
|
|
||||||
|
img += self.contents |
||||||
|
|
||||||
|
assert len(img) <= self.build_config.page_size |
||||||
|
|
||||||
|
img += b'\xFF' * (self.build_config.page_size - len(img)) |
||||||
|
|
||||||
|
return img |
||||||
|
|
||||||
|
|
||||||
|
class SpiffsBlock(object): |
||||||
|
def _reset(self): # type: () -> None |
||||||
|
self.cur_obj_index_span_ix = 0 |
||||||
|
self.cur_obj_data_span_ix = 0 |
||||||
|
self.cur_obj_id = 0 |
||||||
|
self.cur_obj_idx_page = None # type: typing.Optional[SpiffsObjIndexPage] |
||||||
|
|
||||||
|
def __init__(self, bix, build_config): # type: (int, SpiffsBuildConfig) -> None |
||||||
|
self.build_config = build_config |
||||||
|
self.offset = bix * self.build_config.block_size |
||||||
|
self.remaining_pages = self.build_config.OBJ_USABLE_PAGES_PER_BLOCK |
||||||
|
self.pages = list() # type: typing.List[SpiffsPage] |
||||||
|
self.bix = bix |
||||||
|
|
||||||
|
lu_pages = list() |
||||||
|
for i in range(self.build_config.OBJ_LU_PAGES_PER_BLOCK): |
||||||
|
page = SpiffsObjLuPage(self.bix, self.build_config) |
||||||
|
lu_pages.append(page) |
||||||
|
|
||||||
|
self.pages.extend(lu_pages) |
||||||
|
|
||||||
|
self.lu_page_iter = iter(lu_pages) |
||||||
|
self.lu_page = next(self.lu_page_iter) |
||||||
|
|
||||||
|
self._reset() |
||||||
|
|
||||||
|
def _register_page(self, page): # type: (TSP) -> None |
||||||
|
if isinstance(page, SpiffsObjDataPage): |
||||||
|
assert self.cur_obj_idx_page is not None |
||||||
|
self.cur_obj_idx_page.register_page(page) # can raise SpiffsFullError |
||||||
|
|
||||||
|
try: |
||||||
|
self.lu_page.register_page(page) |
||||||
|
except SpiffsFullError: |
||||||
|
self.lu_page = next(self.lu_page_iter) |
||||||
|
try: |
||||||
|
self.lu_page.register_page(page) |
||||||
|
except AttributeError: # no next lookup page |
||||||
|
# Since the amount of lookup pages is pre-computed at every block instance, |
||||||
|
# this should never occur |
||||||
|
raise RuntimeError('invalid attempt to add page to a block when there is no more space in lookup') |
||||||
|
|
||||||
|
self.pages.append(page) |
||||||
|
|
||||||
|
def begin_obj(self, obj_id, size, name, obj_index_span_ix=0, obj_data_span_ix=0 |
||||||
|
): # type: (int, int, str, int, int) -> None |
||||||
|
if not self.remaining_pages > 0: |
||||||
|
raise SpiffsFullError() |
||||||
|
self._reset() |
||||||
|
|
||||||
|
self.cur_obj_id = obj_id |
||||||
|
self.cur_obj_index_span_ix = obj_index_span_ix |
||||||
|
self.cur_obj_data_span_ix = obj_data_span_ix |
||||||
|
|
||||||
|
page = SpiffsObjIndexPage(obj_id, self.cur_obj_index_span_ix, size, name, self.build_config) |
||||||
|
self._register_page(page) |
||||||
|
|
||||||
|
self.cur_obj_idx_page = page |
||||||
|
|
||||||
|
self.remaining_pages -= 1 |
||||||
|
self.cur_obj_index_span_ix += 1 |
||||||
|
|
||||||
|
def update_obj(self, contents): # type: (bytes) -> None |
||||||
|
if not self.remaining_pages > 0: |
||||||
|
raise SpiffsFullError() |
||||||
|
page = SpiffsObjDataPage(self.offset + (len(self.pages) * self.build_config.page_size), |
||||||
|
self.cur_obj_id, self.cur_obj_data_span_ix, contents, self.build_config) |
||||||
|
|
||||||
|
self._register_page(page) |
||||||
|
|
||||||
|
self.cur_obj_data_span_ix += 1 |
||||||
|
self.remaining_pages -= 1 |
||||||
|
|
||||||
|
def end_obj(self): # type: () -> None |
||||||
|
self._reset() |
||||||
|
|
||||||
|
def is_full(self): # type: () -> bool |
||||||
|
return self.remaining_pages <= 0 |
||||||
|
|
||||||
|
def to_binary(self, blocks_lim): # type: (int) -> bytes |
||||||
|
img = b'' |
||||||
|
|
||||||
|
if self.build_config.use_magic: |
||||||
|
for (idx, page) in enumerate(self.pages): |
||||||
|
if idx == self.build_config.OBJ_LU_PAGES_PER_BLOCK - 1: |
||||||
|
assert isinstance(page, SpiffsObjLuPage) |
||||||
|
page.magicfy(blocks_lim) |
||||||
|
img += page.to_binary() |
||||||
|
else: |
||||||
|
for page in self.pages: |
||||||
|
img += page.to_binary() |
||||||
|
|
||||||
|
assert len(img) <= self.build_config.block_size |
||||||
|
|
||||||
|
img += b'\xFF' * (self.build_config.block_size - len(img)) |
||||||
|
return img |
||||||
|
|
||||||
|
|
||||||
|
class SpiffsFS(object): |
||||||
|
def __init__(self, img_size, build_config): # type: (int, SpiffsBuildConfig) -> None |
||||||
|
if img_size % build_config.block_size != 0: |
||||||
|
raise RuntimeError('image size should be a multiple of block size') |
||||||
|
|
||||||
|
self.img_size = img_size |
||||||
|
self.build_config = build_config |
||||||
|
|
||||||
|
self.blocks = list() # type: typing.List[SpiffsBlock] |
||||||
|
self.blocks_lim = self.img_size // self.build_config.block_size |
||||||
|
self.remaining_blocks = self.blocks_lim |
||||||
|
self.cur_obj_id = 1 # starting object id |
||||||
|
|
||||||
|
def _create_block(self): # type: () -> SpiffsBlock |
||||||
|
if self.is_full(): |
||||||
|
raise SpiffsFullError('the image size has been exceeded') |
||||||
|
|
||||||
|
block = SpiffsBlock(len(self.blocks), self.build_config) |
||||||
|
self.blocks.append(block) |
||||||
|
self.remaining_blocks -= 1 |
||||||
|
return block |
||||||
|
|
||||||
|
def is_full(self): # type: () -> bool |
||||||
|
return self.remaining_blocks <= 0 |
||||||
|
|
||||||
|
def create_file(self, img_path, file_path): # type: (str, str) -> None |
||||||
|
if len(img_path) > self.build_config.obj_name_len: |
||||||
|
raise RuntimeError("object name '%s' too long" % img_path) |
||||||
|
|
||||||
|
name = img_path |
||||||
|
|
||||||
|
with open(file_path, 'rb') as obj: |
||||||
|
contents = obj.read() |
||||||
|
|
||||||
|
stream = io.BytesIO(contents) |
||||||
|
|
||||||
|
try: |
||||||
|
block = self.blocks[-1] |
||||||
|
block.begin_obj(self.cur_obj_id, len(contents), name) |
||||||
|
except (IndexError, SpiffsFullError): |
||||||
|
block = self._create_block() |
||||||
|
block.begin_obj(self.cur_obj_id, len(contents), name) |
||||||
|
|
||||||
|
contents_chunk = stream.read(self.build_config.OBJ_DATA_PAGE_CONTENT_LEN) |
||||||
|
|
||||||
|
while contents_chunk: |
||||||
|
try: |
||||||
|
block = self.blocks[-1] |
||||||
|
try: |
||||||
|
# This can fail because either (1) all the pages in block have been |
||||||
|
# used or (2) object index has been exhausted. |
||||||
|
block.update_obj(contents_chunk) |
||||||
|
except SpiffsFullError: |
||||||
|
# If its (1), use the outer exception handler |
||||||
|
if block.is_full(): |
||||||
|
raise SpiffsFullError |
||||||
|
# If its (2), write another object index page |
||||||
|
block.begin_obj(self.cur_obj_id, len(contents), name, |
||||||
|
obj_index_span_ix=block.cur_obj_index_span_ix, |
||||||
|
obj_data_span_ix=block.cur_obj_data_span_ix) |
||||||
|
continue |
||||||
|
except (IndexError, SpiffsFullError): |
||||||
|
# All pages in the block have been exhausted. Create a new block, copying |
||||||
|
# the previous state of the block to a new one for the continuation of the |
||||||
|
# current object |
||||||
|
prev_block = block |
||||||
|
block = self._create_block() |
||||||
|
block.cur_obj_id = prev_block.cur_obj_id |
||||||
|
block.cur_obj_idx_page = prev_block.cur_obj_idx_page |
||||||
|
block.cur_obj_data_span_ix = prev_block.cur_obj_data_span_ix |
||||||
|
block.cur_obj_index_span_ix = prev_block.cur_obj_index_span_ix |
||||||
|
continue |
||||||
|
|
||||||
|
contents_chunk = stream.read(self.build_config.OBJ_DATA_PAGE_CONTENT_LEN) |
||||||
|
|
||||||
|
block.end_obj() |
||||||
|
|
||||||
|
self.cur_obj_id += 1 |
||||||
|
|
||||||
|
def to_binary(self): # type: () -> bytes |
||||||
|
img = b'' |
||||||
|
all_blocks = [] |
||||||
|
for block in self.blocks: |
||||||
|
all_blocks.append(block.to_binary(self.blocks_lim)) |
||||||
|
bix = len(self.blocks) |
||||||
|
if self.build_config.use_magic: |
||||||
|
# Create empty blocks with magic numbers |
||||||
|
while self.remaining_blocks > 0: |
||||||
|
block = SpiffsBlock(bix, self.build_config) |
||||||
|
all_blocks.append(block.to_binary(self.blocks_lim)) |
||||||
|
self.remaining_blocks -= 1 |
||||||
|
bix += 1 |
||||||
|
else: |
||||||
|
# Just fill remaining spaces FF's |
||||||
|
all_blocks.append(b'\xFF' * (self.img_size - len(all_blocks) * self.build_config.block_size)) |
||||||
|
img += b''.join([blk for blk in all_blocks]) |
||||||
|
return img |
||||||
|
|
||||||
|
|
||||||
|
class CustomHelpFormatter(argparse.HelpFormatter): |
||||||
|
""" |
||||||
|
Similar to argparse.ArgumentDefaultsHelpFormatter, except it |
||||||
|
doesn't add the default value if "(default:" is already present. |
||||||
|
This helps in the case of options with action="store_false", like |
||||||
|
--no-magic or --no-magic-len. |
||||||
|
""" |
||||||
|
def _get_help_string(self, action): # type: (argparse.Action) -> str |
||||||
|
if action.help is None: |
||||||
|
return '' |
||||||
|
if '%(default)' not in action.help and '(default:' not in action.help: |
||||||
|
if action.default is not argparse.SUPPRESS: |
||||||
|
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE] |
||||||
|
if action.option_strings or action.nargs in defaulting_nargs: |
||||||
|
return action.help + ' (default: %(default)s)' |
||||||
|
return action.help |
||||||
|
|
||||||
|
|
||||||
|
def main(): # type: () -> None |
||||||
|
parser = argparse.ArgumentParser(description='SPIFFS Image Generator', |
||||||
|
formatter_class=CustomHelpFormatter) |
||||||
|
|
||||||
|
parser.add_argument('image_size', |
||||||
|
help='Size of the created image') |
||||||
|
|
||||||
|
parser.add_argument('base_dir', |
||||||
|
help='Path to directory from which the image will be created') |
||||||
|
|
||||||
|
parser.add_argument('output_file', |
||||||
|
help='Created image output file path') |
||||||
|
|
||||||
|
parser.add_argument('--page-size', |
||||||
|
help='Logical page size. Set to value same as CONFIG_SPIFFS_PAGE_SIZE.', |
||||||
|
type=int, |
||||||
|
default=256) |
||||||
|
|
||||||
|
parser.add_argument('--block-size', |
||||||
|
help="Logical block size. Set to the same value as the flash chip's sector size (g_rom_flashchip.sector_size).", |
||||||
|
type=int, |
||||||
|
default=4096) |
||||||
|
|
||||||
|
parser.add_argument('--obj-name-len', |
||||||
|
help='File full path maximum length. Set to value same as CONFIG_SPIFFS_OBJ_NAME_LEN.', |
||||||
|
type=int, |
||||||
|
default=32) |
||||||
|
|
||||||
|
parser.add_argument('--meta-len', |
||||||
|
help='File metadata length. Set to value same as CONFIG_SPIFFS_META_LENGTH.', |
||||||
|
type=int, |
||||||
|
default=4) |
||||||
|
|
||||||
|
parser.add_argument('--use-magic', |
||||||
|
dest='use_magic', |
||||||
|
help='Use magic number to create an identifiable SPIFFS image. Specify if CONFIG_SPIFFS_USE_MAGIC.', |
||||||
|
action='store_true') |
||||||
|
|
||||||
|
parser.add_argument('--no-magic', |
||||||
|
dest='use_magic', |
||||||
|
help='Inverse of --use-magic (default: --use-magic is enabled)', |
||||||
|
action='store_false') |
||||||
|
|
||||||
|
parser.add_argument('--use-magic-len', |
||||||
|
dest='use_magic_len', |
||||||
|
help='Use position in memory to create different magic numbers for each block. Specify if CONFIG_SPIFFS_USE_MAGIC_LENGTH.', |
||||||
|
action='store_true') |
||||||
|
|
||||||
|
parser.add_argument('--no-magic-len', |
||||||
|
dest='use_magic_len', |
||||||
|
help='Inverse of --use-magic-len (default: --use-magic-len is enabled)', |
||||||
|
action='store_false') |
||||||
|
|
||||||
|
parser.add_argument('--follow-symlinks', |
||||||
|
help='Take into account symbolic links during partition image creation.', |
||||||
|
action='store_true') |
||||||
|
|
||||||
|
parser.add_argument('--big-endian', |
||||||
|
help='Specify if the target architecture is big-endian. If not specified, little-endian is assumed.', |
||||||
|
action='store_true') |
||||||
|
|
||||||
|
parser.add_argument('--aligned-obj-ix-tables', |
||||||
|
action='store_true', |
||||||
|
help='Use aligned object index tables. Specify if SPIFFS_ALIGNED_OBJECT_INDEX_TABLES is set.') |
||||||
|
|
||||||
|
parser.set_defaults(use_magic=True, use_magic_len=True) |
||||||
|
|
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
if not os.path.exists(args.base_dir): |
||||||
|
raise RuntimeError('given base directory %s does not exist' % args.base_dir) |
||||||
|
|
||||||
|
with open(args.output_file, 'wb') as image_file: |
||||||
|
image_size = int(args.image_size, 0) |
||||||
|
spiffs_build_default = SpiffsBuildConfig(args.page_size, SPIFFS_PAGE_IX_LEN, |
||||||
|
args.block_size, SPIFFS_BLOCK_IX_LEN, args.meta_len, |
||||||
|
args.obj_name_len, SPIFFS_OBJ_ID_LEN, SPIFFS_SPAN_IX_LEN, |
||||||
|
True, True, 'big' if args.big_endian else 'little', |
||||||
|
args.use_magic, args.use_magic_len, args.aligned_obj_ix_tables) |
||||||
|
|
||||||
|
spiffs = SpiffsFS(image_size, spiffs_build_default) |
||||||
|
|
||||||
|
for root, dirs, files in os.walk(args.base_dir, followlinks=args.follow_symlinks): |
||||||
|
for f in files: |
||||||
|
full_path = os.path.join(root, f) |
||||||
|
spiffs.create_file('/' + os.path.relpath(full_path, args.base_dir).replace('\\', '/'), full_path) |
||||||
|
|
||||||
|
image = spiffs.to_binary() |
||||||
|
|
||||||
|
image_file.write(image) |
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__': |
||||||
|
main() |
Loading…
Reference in new issue