[DEVOPS-18]: Add map file parser, mariadb inserter (#2732)

This commit is contained in:
Max Andreev 2023-06-05 14:25:43 +04:00 committed by GitHub
parent d9a9fa0c10
commit 72ad22bb91
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 414 additions and 18 deletions

View file

@ -100,25 +100,31 @@ jobs:
cp build/f7-firmware-*/firmware.elf map_analyser_files/firmware.elf
cp ${{ github.event_path }} map_analyser_files/event.json
- name: 'Upload map analyser files to storage'
- name: 'Analyse map file'
if: ${{ !github.event.pull_request.head.repo.fork }}
uses: prewk/s3-cp-action@v2
with:
aws_s3_endpoint: "${{ secrets.MAP_REPORT_AWS_ENDPOINT }}"
aws_access_key_id: "${{ secrets.MAP_REPORT_AWS_ACCESS_KEY }}"
aws_secret_access_key: "${{ secrets.MAP_REPORT_AWS_SECRET_KEY }}"
source: "./map_analyser_files/"
dest: "s3://${{ secrets.MAP_REPORT_AWS_BUCKET }}/${{steps.names.outputs.random_hash}}"
flags: "--recursive --acl public-read"
- name: 'Trigger map file reporter'
if: ${{ !github.event.pull_request.head.repo.fork }}
uses: peter-evans/repository-dispatch@v2
with:
repository: flipperdevices/flipper-map-reporter
token: ${{ secrets.REPOSITORY_DISPATCH_TOKEN }}
event-type: map-file-analyse
client-payload: '{"random_hash": "${{steps.names.outputs.random_hash}}", "event_type": "${{steps.names.outputs.event_type}}"}'
run: |
source scripts/toolchain/fbtenv.sh
get_size()
{
SECTION="$1";
arm-none-eabi-size \
-A map_analyser_files/firmware.elf \
| grep "^$SECTION" | awk '{print $2}'
}
export BSS_SIZE="$(get_size ".bss")"
export TEXT_SIZE="$(get_size ".text")"
export RODATA_SIZE="$(get_size ".rodata")"
export DATA_SIZE="$(get_size ".data")"
export FREE_FLASH_SIZE="$(get_size ".free_flash")"
python3 -m pip install mariadb==1.1.6 cxxfilt==0.3.0
python3 scripts/map_parser.py map_analyser_files/firmware.elf.map map_analyser_files/firmware.elf.map.all
python3 scripts/map_mariadb_insert.py \
${{ secrets.AMAP_MARIADB_USER }} \
${{ secrets.AMAP_MARIADB_PASSWORD }} \
${{ secrets.AMAP_MARIADB_HOST }} \
${{ secrets.AMAP_MARIADB_PORT }} \
${{ secrets.AMAP_MARIADB_DATABASE }} \
map_analyser_files/firmware.elf.map.all
- name: 'Upload artifacts to update server'
if: ${{ !github.event.pull_request.head.repo.fork }}

139
scripts/map_mariadb_insert.py Executable file
View file

@ -0,0 +1,139 @@
#!/usr/bin/env python3
# Requiremets:
# mariadb==1.1.6
from datetime import datetime
import argparse
import mariadb
import sys
import os
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("db_user", help="MariaDB user")
parser.add_argument("db_pass", help="MariaDB password")
parser.add_argument("db_host", help="MariaDB hostname")
parser.add_argument("db_port", type=int, help="MariaDB port")
parser.add_argument("db_name", help="MariaDB database")
parser.add_argument("report_file", help="Report file(.map.all)")
args = parser.parse_args()
return args
def mariadbConnect(args):
try:
conn = mariadb.connect(
user=args.db_user,
password=args.db_pass,
host=args.db_host,
port=args.db_port,
database=args.db_name,
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB: {e}")
sys.exit(1)
return conn
def parseEnv():
outArr = []
outArr.append(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
outArr.append(os.getenv("COMMIT_HASH", default=None))
outArr.append(os.getenv("COMMIT_MSG", default=None))
outArr.append(os.getenv("BRANCH_NAME", default=None))
outArr.append(os.getenv("BSS_SIZE", default=None))
outArr.append(os.getenv("TEXT_SIZE", default=None))
outArr.append(os.getenv("RODATA_SIZE", default=None))
outArr.append(os.getenv("DATA_SIZE", default=None))
outArr.append(os.getenv("FREE_FLASH_SIZE", default=None))
outArr.append(os.getenv("PULL_ID", default=None))
outArr.append(os.getenv("PULL_NAME", default=None))
return outArr
def createTables(cur, conn):
headerTable = "CREATE TABLE IF NOT EXISTS `header` ( \
`id` int(10) unsigned NOT NULL AUTO_INCREMENT, \
`datetime` datetime NOT NULL, \
`commit` varchar(40) NOT NULL, \
`commit_msg` text NOT NULL, \
`branch_name` text NOT NULL, \
`bss_size` int(10) unsigned NOT NULL, \
`text_size` int(10) unsigned NOT NULL, \
`rodata_size` int(10) unsigned NOT NULL, \
`data_size` int(10) unsigned NOT NULL, \
`free_flash_size` int(10) unsigned NOT NULL, \
`pullrequest_id` int(10) unsigned DEFAULT NULL, \
`pullrequest_name` text DEFAULT NULL, \
PRIMARY KEY (`id`), \
KEY `header_id_index` (`id`) )"
dataTable = "CREATE TABLE IF NOT EXISTS `data` ( \
`header_id` int(10) unsigned NOT NULL, \
`id` int(10) unsigned NOT NULL AUTO_INCREMENT, \
`section` text NOT NULL, \
`address` text NOT NULL, \
`size` int(10) unsigned NOT NULL, \
`name` text NOT NULL, \
`lib` text NOT NULL, \
`obj_name` text NOT NULL, \
PRIMARY KEY (`id`), \
KEY `data_id_index` (`id`), \
KEY `data_header_id_index` (`header_id`), \
CONSTRAINT `data_header_id_foreign` FOREIGN KEY (`header_id`) REFERENCES `header` (`id`) )"
cur.execute(headerTable)
cur.execute(dataTable)
conn.commit()
def insertHeader(data, cur, conn):
query = "INSERT INTO `header` ( \
datetime, commit, commit_msg, branch_name, bss_size, text_size, \
rodata_size, data_size, free_flash_size, pullrequest_id, pullrequest_name) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
cur.execute(query, data)
conn.commit()
return cur.lastrowid
def parseFile(fileObj, headerID):
arr = []
fileLines = fileObj.readlines()
for line in fileLines:
lineArr = []
tempLineArr = line.split("\t")
lineArr.append(headerID)
lineArr.append(tempLineArr[0]) # section
lineArr.append(int(tempLineArr[2], 16)) # address hex
lineArr.append(int(tempLineArr[3])) # size
lineArr.append(tempLineArr[4]) # name
lineArr.append(tempLineArr[5]) # lib
lineArr.append(tempLineArr[6]) # obj_name
arr.append(tuple(lineArr))
return arr
def insertData(data, cur, conn):
query = "INSERT INTO `data` ( \
header_id, section, address, size, \
name, lib, obj_name) \
VALUES (?, ?, ?, ?, ? ,?, ?)"
cur.executemany(query, data)
conn.commit()
def main():
args = parseArgs()
dbConn = mariadbConnect(args)
reportFile = open(args.report_file)
dbCurs = dbConn.cursor()
createTables(dbCurs, dbConn)
headerID = insertHeader(parseEnv(), dbCurs, dbConn)
insertData(parseFile(reportFile, headerID), dbCurs, dbConn)
reportFile.close()
dbCurs.close()
if __name__ == "__main__":
main()

251
scripts/map_parser.py Executable file
View file

@ -0,0 +1,251 @@
#!/usr/bin/env python3
# Requiremets:
# cxxfilt==0.3.0
import sys
import re
import os
from typing import TextIO
from cxxfilt import demangle
class Objectfile:
def __init__(self, section: str, offset: int, size: int, comment: str):
self.section = section.strip()
self.offset = offset
self.size = size
self.path = (None, None)
self.basepath = None
if comment:
self.path = re.match(r"^(.+?)(?:\(([^\)]+)\))?$", comment).groups()
self.basepath = os.path.basename(self.path[0])
self.children = []
def __repr__(self) -> str:
return f"<Objectfile {self.section} {self.offset:x} {self.size:x} {self.path} {repr(self.children)}>"
def update_children_size(children: list[list], subsection_size: int) -> list:
# set subsection size to an only child
if len(children) == 1:
children[0][1] = subsection_size
return children
rest_size = subsection_size
for index in range(1, len(children)):
if rest_size > 0:
# current size = current address - previous child address
child_size = children[index][0] - children[index - 1][0]
rest_size -= child_size
children[index - 1][1] = child_size
# if there is rest size, set it to the last child element
if rest_size > 0:
children[-1][1] = rest_size
return children
def parse_sections(file_name: str) -> list:
"""
Quick&Dirty parsing for GNU lds linker map output, needs LANG=C, because
some messages are localized.
"""
sections = []
with open(file_name, "r") as file:
# skip until memory map is found
found = False
while True:
line = file.readline()
if not line:
break
if line.strip() == "Memory Configuration":
found = True
break
if not found:
raise Exception(f"Memory configuration is not found in the{input_file}")
# long section names result in a linebreak afterwards
sectionre = re.compile(
"(?P<section>.+?|.{14,}\n)[ ]+0x(?P<offset>[0-9a-f]+)[ ]+0x(?P<size>[0-9a-f]+)(?:[ ]+(?P<comment>.+))?\n+",
re.I,
)
subsectionre = re.compile(
"[ ]{16}0x(?P<offset>[0-9a-f]+)[ ]+(?P<function>.+)\n+", re.I
)
s = file.read()
pos = 0
while True:
m = sectionre.match(s, pos)
if not m:
# skip that line
try:
nextpos = s.index("\n", pos) + 1
pos = nextpos
continue
except ValueError:
break
pos = m.end()
section = m.group("section")
v = m.group("offset")
offset = int(v, 16) if v is not None else None
v = m.group("size")
size = int(v, 16) if v is not None else None
comment = m.group("comment")
if section != "*default*" and size > 0:
of = Objectfile(section, offset, size, comment)
if section.startswith(" "):
children = []
sections[-1].children.append(of)
while True:
m = subsectionre.match(s, pos)
if not m:
break
pos = m.end()
offset, function = m.groups()
offset = int(offset, 16)
if sections and sections[-1].children:
children.append([offset, 0, function])
if children:
children = update_children_size(
children=children, subsection_size=of.size
)
sections[-1].children[-1].children.extend(children)
else:
sections.append(of)
return sections
def get_subsection_name(section_name: str, subsection: Objectfile) -> str:
subsection_split_names = subsection.section.split(".")
if subsection.section.startswith("."):
subsection_split_names = subsection_split_names[1:]
return (
f".{subsection_split_names[1]}"
if len(subsection_split_names) > 2
else section_name
)
def write_subsection(
section_name: str,
subsection_name: str,
address: str,
size: int,
demangled_name: str,
module_name: str,
file_name: str,
mangled_name: str,
write_file_object: TextIO,
) -> None:
write_file_object.write(
f"{section_name}\t"
f"{subsection_name}\t"
f"{address}\t"
f"{size}\t"
f"{demangled_name}\t"
f"{module_name}\t"
f"{file_name}\t"
f"{mangled_name}\n"
)
def save_subsection(
section_name: str, subsection: Objectfile, write_file_object: TextIO
) -> None:
subsection_name = get_subsection_name(section_name, subsection)
module_name = subsection.path[0]
file_name = subsection.path[1]
if not file_name:
file_name, module_name = module_name, ""
if not subsection.children:
address = f"{subsection.offset:x}"
size = subsection.size
mangled_name = (
""
if subsection.section == section_name
else subsection.section.split(".")[-1]
)
demangled_name = demangle(mangled_name) if mangled_name else mangled_name
write_subsection(
section_name=section_name,
subsection_name=subsection_name,
address=address,
size=size,
demangled_name=demangled_name,
module_name=module_name,
file_name=file_name,
mangled_name=mangled_name,
write_file_object=write_file_object,
)
return
for subsection_child in subsection.children:
address = f"{subsection_child[0]:x}"
size = subsection_child[1]
mangled_name = subsection_child[2]
demangled_name = demangle(mangled_name)
write_subsection(
section_name=section_name,
subsection_name=subsection_name,
address=address,
size=size,
demangled_name=demangled_name,
module_name=module_name,
file_name=file_name,
mangled_name=mangled_name,
write_file_object=write_file_object,
)
def save_section(section: Objectfile, write_file_object: TextIO) -> None:
section_name = section.section
for subsection in section.children:
save_subsection(
section_name=section_name,
subsection=subsection,
write_file_object=write_file_object,
)
def save_parsed_data(parsed_data: list[Objectfile], output_file_name: str) -> None:
with open(output_file_name, "w") as write_file_object:
for section in parsed_data:
if section.children:
save_section(section=section, write_file_object=write_file_object)
if __name__ == "__main__":
if len(sys.argv) < 3:
raise Exception(f"Usage: {sys.argv[0]} <input file> <output file>")
input_file = sys.argv[1]
output_file = sys.argv[2]
parsed_sections = parse_sections(input_file)
if parsed_sections is None:
raise Exception(f"Memory configuration is not {input_file}")
save_parsed_data(parsed_sections, output_file)