-
Notifications
You must be signed in to change notification settings - Fork 13
/
dumper.py
214 lines (190 loc) · 7.77 KB
/
dumper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
#!/usr/bin/env python
import bz2
import lzma
import struct
import sys
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from multiprocessing import cpu_count
import zstandard
import update_metadata_pb2 as um
flatten = lambda l: [item for sublist in l for item in sublist]
def u32(x):
return struct.unpack(">I", x)[0]
def u64(x):
return struct.unpack(">Q", x)[0]
class Dumper:
def __init__(
self, payloadfile, out, diff=None, old=None, images="", workers=cpu_count(), buffsize=8192
):
self.payloadpath = payloadfile
payloadfile = self.open_payloadfile()
self.payloadfile = payloadfile
self.tls = threading.local()
self.out = out
self.diff = diff
self.old = old
self.images = images
self.workers = workers
self.buffsize = buffsize
self.validate_magic()
def open_payloadfile(self):
return open(self.payloadpath, 'rb')
def run(self, slow=False) -> bool:
if self.images == "":
partitions = self.dam.partitions
else:
partitions = []
for image in self.images:
found = False
for dam_part in self.dam.partitions:
if dam_part.partition_name == image:
partitions.append(dam_part)
found = True
break
if not found:
print(f"Partition {image} not found in image")
if len(partitions) == 0:
print("Not operating on any partitions")
return False
partitions_with_ops = []
for partition in partitions:
operations = []
for operation in partition.operations:
self.payloadfile.seek(self.data_offset + operation.data_offset)
operations.append(
{
"data_offset": self.payloadfile.tell(),
"operation": operation,
"data_length": operation.data_length,
}
)
partitions_with_ops.append(
{
"partition": partition,
"operations": operations,
}
)
self.payloadfile.close()
if slow:
self.extract_slow(partitions_with_ops)
else:
self.multiprocess_partitions(partitions_with_ops)
return True
def extract_slow(self, partitions):
for part in partitions:
self.dump_part(part)
def multiprocess_partitions(self, partitions):
with ThreadPoolExecutor(max_workers=self.workers) as executor:
futures = {executor.submit(self.dump_part, part): part for part in partitions}
for future in as_completed(futures):
partition_name = futures[future]['partition'].partition_name
future.result()
print(f"{partition_name} Done!")
def validate_magic(self):
magic = self.payloadfile.read(4)
assert magic == b"CrAU"
file_format_version = u64(self.payloadfile.read(8))
assert file_format_version == 2
manifest_size = u64(self.payloadfile.read(8))
metadata_signature_size = 0
if file_format_version > 1:
metadata_signature_size = u32(self.payloadfile.read(4))
manifest = self.payloadfile.read(manifest_size)
self.metadata_signature = self.payloadfile.read(metadata_signature_size)
self.data_offset = self.payloadfile.tell()
self.dam = um.DeltaArchiveManifest()
self.dam.ParseFromString(manifest)
self.block_size = self.dam.block_size
def data_for_op(self, operation, out_file, old_file):
payloadfile = self.tls.payloadfile
payloadfile.seek(operation["data_offset"])
buffsize = self.buffsize
processed_len = 0
data_length = operation["data_length"]
op = operation["operation"]
# assert hashlib.sha256(data).digest() == op.data_sha256_hash, 'operation data hash mismatch'
op_type = op.type
print(op.type)
if op.type == op.REPLACE_ZSTD:
if payloadfile.read(4) != b'(\xb5/\xfd':
op_type = op.REPLACE
payloadfile.seek(payloadfile.tell() - 4)
if op_type == op.REPLACE_ZSTD:
dec = zstandard.ZstdDecompressor().decompressobj()
while processed_len < data_length:
data = payloadfile.read(buffsize)
processed_len += len(data)
data = dec.decompress(data)
out_file.write(data)
out_file.write(dec.flush())
elif op_type == op.REPLACE_XZ:
dec = lzma.LZMADecompressor()
out_file.seek(op.dst_extents[0].start_block * self.block_size)
while processed_len < data_length:
data = payloadfile.read(buffsize)
processed_len += len(data)
while True:
data = dec.decompress(data, max_length=buffsize)
out_file.write(data)
if dec.needs_input or dec.eof:
break
data = b''
elif op_type == op.REPLACE_BZ:
dec = bz2.BZ2Decompressor()
out_file.seek(op.dst_extents[0].start_block * self.block_size)
while processed_len < data_length:
data = payloadfile.read(buffsize)
processed_len += len(data)
while True:
data = dec.decompress(data, max_length=buffsize)
out_file.write(data)
if dec.needs_input or dec.eof:
break
data = b''
elif op_type == op.REPLACE:
out_file.seek(op.dst_extents[0].start_block * self.block_size)
while processed_len < data_length:
data = payloadfile.read(buffsize)
processed_len += len(data)
out_file.write(data)
elif op_type == op.SOURCE_COPY:
if not self.diff:
print("SOURCE_COPY supported only for differential OTA")
sys.exit(-2)
out_file.seek(op.dst_extents[0].start_block * self.block_size)
for ext in op.src_extents:
old_file.seek(ext.start_block * self.block_size)
data_length = ext.num_blocks * self.block_size
while processed_len < data_length:
data = old_file.read(buffsize)
processed_len += len(data)
out_file.write(data)
processed_len = 0
elif op_type == op.ZERO:
for ext in op.dst_extents:
out_file.seek(ext.start_block * self.block_size)
data_length = ext.num_blocks * self.block_size
while processed_len < data_length:
data = bytes(min(data_length - processed_len, buffsize))
out_file.write(data)
processed_len += len(data)
processed_len = 0
else:
print(f"Unsupported type = {op.type:d}")
sys.exit(-1)
del data
def dump_part(self, part):
name = part["partition"].partition_name
out_file = open(f"{self.out}/{name}.img", "wb")
if self.diff:
old_file = open(f"{self.old}/{name}.img", "rb")
else:
old_file = None
with self.open_payloadfile() as payloadfile:
self.tls.payloadfile = payloadfile
self.do_ops_for_part(part, out_file, old_file)
out_file.close()
def do_ops_for_part(self, part, out_file, old_file):
for op in part["operations"]:
self.data_for_op(op, out_file, old_file)