Skip to content

Commit 7d13a33

Browse files
committed
Added txids to 'BlockEntry' and optimized block parsing thread usage.
1 parent cb1054f commit 7d13a33

File tree

2 files changed

+30
-20
lines changed

2 files changed

+30
-20
lines changed

src/new_index/fetch.rs

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ use std::thread;
1616

1717
use electrs_macros::trace;
1818

19-
use crate::chain::{Block, BlockHash};
19+
use crate::chain::{Block, BlockHash, Txid};
2020
use crate::daemon::Daemon;
2121
use crate::errors::*;
2222
use crate::util::{spawn_thread, HeaderEntry, SyncChannel};
@@ -45,6 +45,7 @@ pub struct BlockEntry {
4545
pub block: Block,
4646
pub entry: HeaderEntry,
4747
pub size: u32,
48+
pub txids: Vec<Txid>,
4849
}
4950

5051
type SizedBlock = (Block, u32);
@@ -106,10 +107,14 @@ fn bitcoind_fetcher(
106107
let block_entries: Vec<BlockEntry> = blocks
107108
.into_iter()
108109
.zip(entries)
109-
.map(|(block, entry)| BlockEntry {
110-
entry: entry.clone(), // TODO: remove this clone()
111-
size: block.total_size() as u32,
112-
block,
110+
.map(|(block, entry)| {
111+
let txids = block.txdata.iter().map(|tx| tx.compute_txid()).collect();
112+
BlockEntry {
113+
entry: entry.clone(), // TODO: remove this clone()
114+
size: block.total_size() as u32,
115+
txids,
116+
block,
117+
}
113118
})
114119
.collect();
115120
assert_eq!(block_entries.len(), entries.len());
@@ -156,7 +161,10 @@ fn blkfiles_fetcher(
156161
let blockhash = block.block_hash();
157162
entry_map
158163
.remove(&blockhash)
159-
.map(|entry| BlockEntry { block, entry, size })
164+
.map(|entry| {
165+
let txids = block.txdata.iter().map(|tx| tx.compute_txid()).collect();
166+
BlockEntry { block, entry, size, txids }
167+
})
160168
.or_else(|| {
161169
trace!("skipping block {}", blockhash);
162170
None
@@ -224,9 +232,14 @@ fn blkfiles_parser(blobs: Fetcher<Vec<u8>>, magic: u32) -> Fetcher<Vec<SizedBloc
224232
Fetcher::from(
225233
chan.into_receiver(),
226234
spawn_thread("blkfiles_parser", move || {
235+
let pool = rayon::ThreadPoolBuilder::new()
236+
.num_threads(0) // CPU-bound
237+
.thread_name(|i| format!("parse-blocks-{}", i))
238+
.build()
239+
.unwrap();
227240
blobs.map(|blob| {
228241
trace!("parsing {} bytes", blob.len());
229-
let blocks = parse_blocks(blob, magic).expect("failed to parse blk*.dat file");
242+
let blocks = parse_blocks(&pool, blob, magic).expect("failed to parse blk*.dat file");
230243
sender
231244
.send(blocks)
232245
.expect("failed to send blocks from blk*.dat file");
@@ -236,7 +249,7 @@ fn blkfiles_parser(blobs: Fetcher<Vec<u8>>, magic: u32) -> Fetcher<Vec<SizedBloc
236249
}
237250

238251
#[trace]
239-
fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<SizedBlock>> {
252+
fn parse_blocks(pool: &rayon::ThreadPool, blob: Vec<u8>, magic: u32) -> Result<Vec<SizedBlock>> {
240253
let mut cursor = Cursor::new(&blob);
241254
let mut slices = vec![];
242255
let max_pos = blob.len() as u64;
@@ -273,11 +286,6 @@ fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<SizedBlock>> {
273286
cursor.set_position(end as u64);
274287
}
275288

276-
let pool = rayon::ThreadPoolBuilder::new()
277-
.num_threads(0) // CPU-bound
278-
.thread_name(|i| format!("parse-blocks-{}", i))
279-
.build()
280-
.unwrap();
281289
Ok(pool.install(|| {
282290
slices
283291
.into_par_iter()

src/new_index/schema.rs

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1174,13 +1174,12 @@ fn add_blocks(block_entries: &[BlockEntry], iconfig: &IndexerConfig) -> Vec<DBRo
11741174
.map(|b| {
11751175
let mut rows = vec![];
11761176
let blockhash = full_hash(&b.entry.hash()[..]);
1177-
let txids: Vec<Txid> = b.block.txdata.iter().map(|tx| tx.compute_txid()).collect();
1178-
for (tx, txid) in b.block.txdata.iter().zip(txids.iter()) {
1177+
for (tx, txid) in b.block.txdata.iter().zip(b.txids.iter()) {
11791178
add_transaction(*txid, tx, &mut rows, iconfig);
11801179
}
11811180

11821181
if !iconfig.light_mode {
1183-
rows.push(BlockRow::new_txids(blockhash, &txids).into_row());
1182+
rows.push(BlockRow::new_txids(blockhash, &b.txids).into_row());
11841183
rows.push(BlockRow::new_meta(blockhash, &BlockMeta::from(b)).into_row());
11851184
}
11861185

@@ -1271,9 +1270,10 @@ fn index_blocks(
12711270
.par_iter() // serialization is CPU-intensive
12721271
.map(|b| {
12731272
let mut rows = vec![];
1274-
for tx in &b.block.txdata {
1275-
let height = b.entry.height() as u32;
1276-
index_transaction(tx, height, previous_txos_map, &mut rows, iconfig);
1273+
let height = b.entry.height() as u32;
1274+
for (tx, txid) in b.block.txdata.iter().zip(b.txids.iter()) {
1275+
let txid = full_hash(&txid[..]);
1276+
index_transaction(tx, txid, height, previous_txos_map, &mut rows, iconfig);
12771277
}
12781278
rows.push(BlockRow::new_done(full_hash(&b.entry.hash()[..])).into_row()); // mark block as "indexed"
12791279
rows
@@ -1285,12 +1285,12 @@ fn index_blocks(
12851285
// TODO: return an iterator?
12861286
fn index_transaction(
12871287
tx: &Transaction,
1288+
txid: FullHash,
12881289
confirmed_height: u32,
12891290
previous_txos_map: &HashMap<OutPoint, TxOut>,
12901291
rows: &mut Vec<DBRow>,
12911292
iconfig: &IndexerConfig,
12921293
) {
1293-
let txid = full_hash(&tx.compute_txid()[..]);
12941294

12951295
// persist tx confirmation row:
12961296
// C{txid} → "{block_height}"
@@ -1892,7 +1892,9 @@ pub mod bench {
18921892
let height = 702861;
18931893
let hash = block.block_hash();
18941894
let header = block.header.clone();
1895+
let txids = block.txdata.iter().map(|tx| tx.compute_txid()).collect();
18951896
let block_entry = BlockEntry {
1897+
txids,
18961898
block,
18971899
entry: HeaderEntry::new(height, hash, header),
18981900
size: 0u32, // wrong but not needed for benching

0 commit comments

Comments
 (0)