4
4
#include < atomic>
5
5
#include < mutex>
6
6
#include < chrono>
7
+ #include < queue>
8
+ #include < condition_variable>
7
9
8
10
using namespace BinaryNinja ;
9
11
@@ -139,6 +141,12 @@ extern "C"
139
141
}).detach ();
140
142
});
141
143
144
+ struct PatchInfo {
145
+ Ref<Architecture> arch;
146
+ uint64_t address;
147
+ bool alwaysBranch;
148
+ };
149
+
142
150
auto processFunctionBatch = [](Ref<BinaryView> viewRef,
143
151
const std::vector<Ref<Function>>& funcBatch,
144
152
int maxPassesPerFunction,
@@ -153,18 +161,28 @@ extern "C"
153
161
break ;
154
162
155
163
auto mlil = func->GetMediumLevelIL ();
156
- if (!mlil || mlil->GetInstructionCount () == 0 )
164
+ if (!mlil || mlil->GetInstructionCount () == 0 ) {
165
+ processedFunctions.fetch_add (1 );
157
166
continue ;
167
+ }
158
168
159
169
auto arch = func->GetArchitecture ();
160
- if (!arch)
170
+ if (!arch) {
171
+ processedFunctions.fetch_add (1 );
161
172
continue ;
173
+ }
174
+
175
+ size_t instrCount = mlil->GetInstructionCount ();
176
+ // if (instrCount > 10000) {
177
+ // std::string funcName = func->GetSymbol() ? func->GetSymbol()->GetShortName() : "sub_" + std::to_string(func->GetStart());
178
+ // LogInfo("Processing large function %s with %zu instructions", funcName.c_str(), instrCount);
179
+ // }
162
180
163
181
int funcPatches = 0 ;
164
182
int pass = 1 ;
165
183
166
184
while (pass <= maxPassesPerFunction) {
167
- int passPatchCount = 0 ;
185
+ std::vector<PatchInfo> pendingPatches ;
168
186
169
187
for (size_t i = 0 ; i < mlil->GetInstructionCount (); ++i) {
170
188
auto instr = mlil->GetInstruction (i);
@@ -175,34 +193,33 @@ extern "C"
175
193
if (val.state == BNRegisterValueType::ConstantValue) {
176
194
if (val.value == 0 ) {
177
195
if (viewRef->IsNeverBranchPatchAvailable (arch, instr.address )) {
178
- {
179
- std::lock_guard<std::mutex> lock (updateMutex);
180
- viewRef->ConvertToNop (arch, instr.address );
181
- }
182
- passPatchCount++;
196
+ pendingPatches.push_back ({arch, instr.address , false });
183
197
}
184
198
}
185
199
else {
186
200
if (viewRef->IsAlwaysBranchPatchAvailable (arch, instr.address )) {
187
- {
188
- std::lock_guard<std::mutex> lock (updateMutex);
189
- viewRef->AlwaysBranch (arch, instr.address );
190
- }
191
- passPatchCount++;
201
+ pendingPatches.push_back ({arch, instr.address , true });
192
202
}
193
203
}
194
204
}
195
205
}
196
206
197
- funcPatches += passPatchCount;
198
-
199
- if (passPatchCount == 0 )
207
+ if (pendingPatches.empty ())
200
208
break ;
201
-
209
+
202
210
{
203
211
std::lock_guard<std::mutex> lock (updateMutex);
212
+ for (const auto & patch : pendingPatches) {
213
+ if (patch.alwaysBranch ) {
214
+ viewRef->AlwaysBranch (patch.arch , patch.address );
215
+ } else {
216
+ viewRef->ConvertToNop (patch.arch , patch.address );
217
+ }
218
+ }
204
219
viewRef->UpdateAnalysis ();
205
220
}
221
+
222
+ funcPatches += pendingPatches.size ();
206
223
pass++;
207
224
}
208
225
@@ -255,24 +272,53 @@ extern "C"
255
272
std::atomic<size_t > processedFunctions (0 );
256
273
std::mutex updateMutex;
257
274
258
- std::vector<std::thread> threads;
259
- size_t functionsPerThread = (totalFuncs + threadCount - 1 ) / threadCount;
275
+ std::queue<Ref<Function>> workQueue;
276
+ std::mutex queueMutex;
277
+ std::condition_variable cv;
278
+ std::atomic<bool > workDone (false );
260
279
261
- for (int tid = 0 ; tid < threadCount; tid++) {
262
- size_t startIdx = tid * functionsPerThread;
263
- size_t endIdx = std::min (startIdx + functionsPerThread, totalFuncs);
264
-
265
- if (startIdx >= totalFuncs)
266
- break ;
280
+ for (auto & func : functions) {
281
+ workQueue.push (func);
282
+ }
283
+
284
+ auto worker = [&]() {
285
+ while (true ) {
286
+ std::vector<Ref<Function>> localBatch;
287
+
288
+ {
289
+ std::unique_lock<std::mutex> lock (queueMutex);
290
+
291
+ cv.wait (lock, [&] { return !workQueue.empty () || workDone.load () || shouldCancel.load (); });
292
+
293
+ if ((workDone.load () && workQueue.empty ()) || shouldCancel.load ())
294
+ break ;
295
+
296
+ size_t remaining = workQueue.size ();
297
+ size_t batchSize = 1 ;
298
+ if (remaining > 100 ) {
299
+ batchSize = 5 ;
300
+ } else if (remaining > 50 ) {
301
+ batchSize = 3 ;
302
+ } else if (remaining > 20 ) {
303
+ batchSize = 2 ;
304
+ }
305
+
306
+ for (size_t i = 0 ; i < batchSize && !workQueue.empty (); ++i) {
307
+ localBatch.push_back (workQueue.front ());
308
+ workQueue.pop ();
309
+ }
310
+ }
267
311
268
- std::vector<Ref<Function>> funcBatch;
269
- for (size_t i = startIdx; i < endIdx; i++) {
270
- funcBatch.push_back (functions[i]);
312
+ if (!localBatch.empty ()) {
313
+ processFunctionBatch (viewRef, localBatch, maxPassesPerFunction,
314
+ globalPatchCount, shouldCancel, updateMutex, processedFunctions);
315
+ }
271
316
}
272
-
273
- threads.emplace_back (processFunctionBatch, viewRef, funcBatch, maxPassesPerFunction,
274
- std::ref (globalPatchCount), std::ref (shouldCancel),
275
- std::ref (updateMutex), std::ref (processedFunctions));
317
+ };
318
+
319
+ std::vector<std::thread> threads;
320
+ for (int i = 0 ; i < threadCount; ++i) {
321
+ threads.emplace_back (worker);
276
322
}
277
323
278
324
size_t lastProcessed = 0 ;
@@ -299,6 +345,12 @@ extern "C"
299
345
std::this_thread::sleep_for (std::chrono::milliseconds (100 ));
300
346
}
301
347
348
+ {
349
+ std::lock_guard<std::mutex> lock (queueMutex);
350
+ workDone.store (true );
351
+ }
352
+ cv.notify_all ();
353
+
302
354
for (auto & t : threads) {
303
355
if (t.joinable ())
304
356
t.join ();
0 commit comments