Skip to content
This repository was archived by the owner on Jun 21, 2023. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
data
node_modules
1 change: 1 addition & 0 deletions .nvmrc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
15
33 changes: 33 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
FROM ubuntu:20.04

ARG SUBWASM_VERSION=0.14.1

RUN apt-get update && apt-get upgrade -yq
RUN apt-get install -y wget curl bsdmainutils iputils-ping && \
apt-get -y autoclean

RUN wget https://github.com/chevdor/subwasm/releases/download/v${SUBWASM_VERSION}/subwasm_linux_amd64_v${SUBWASM_VERSION}.deb -O subwasm.deb && \
dpkg -i subwasm.deb && \
subwasm --version && \
rm subwasm.deb

RUN curl -sL https://deb.nodesource.com/setup_16.x | bash
RUN apt install nodejs && npm install -g npm
RUN node -v; npm -v;

RUN useradd -m -u 1000 -U -s /bin/sh forker && \
mkdir -p /data && \
chown -R forker:forker /data

COPY . .
RUN npm install

# USER forker

ENV HTTP_RPC_ENDPOINT=http://localhost::9933
ENV FORK_CHUNKS_LEVEL=1
ENV QUICK_MODE=false

VOLUME [ "/data" ]

ENTRYPOINT [ "./scripts/docker-start.sh" ]
27 changes: 27 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,37 @@ This script allows bootstrapping a new substrate chain with the current state of
./binary --chain fork.json --alice --sealing 1000
```

## Configuration

The script can be tweaked and configured using various environment variables -

| Environment Variable | Effects | Default value |
| --- | --- | --- |
| HTTP_RPC_ENDPOINT | HTTP RPC endpoint that should be used to query state | http://localhost:9933 |
| FORK_CHUNKS_LEVEL | Determines how many chunks to split the RPC download in. Effect is exponential, recommended value for most is 1. You can try 0 for small chains and 2 for large chains for potential speed improvements | 1 |
| ORIG_CHAIN | Chain to use as the original chain. | `$default_of_the_binary` |
| FORK_CHAIN | Chain to use as base for the forked chain. | `dev` |
| ALICE | If set, the script will replace the chain's sudo account with `//Alice` | `NULL` |
| QUICK_MODE | If set, it parallelizes the data download from the RPC endpoint | `NULL` |

## Read more

If you would like to understand how this script works, please read this [blog post](https://mudit.blog/fork-substrate-blockchain/)

## Using Docker

### Build the image

docker build -t fork-off-substrate .

### Run

BINARY=/full/path/to/your/linux/binary
HTTP_RPC_ENDPOINT=http://localhost:9933
docker run --rm -it \
-e HTTP_RPC_ENDPOINT=$HTTP_RPC_ENDPOINT \
-v "$BINARY":/data/binary
fork-off-substrate

## Credits

Expand Down
48 changes: 37 additions & 11 deletions index.js
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ const provider = new HttpProvider(process.env.HTTP_RPC_ENDPOINT || 'http://local
const chunksLevel = process.env.FORK_CHUNKS_LEVEL || 1;
const totalChunks = Math.pow(256, chunksLevel);

const alice = process.env.ALICE || ''
const originalChain = process.env.ORIG_CHAIN || '';
const forkChain = process.env.FORK_CHAIN || '';

let chunksFetched = 0;
let separator = false;
const progressBar = new cliProgress.SingleBar({}, cliProgress.Presets.shades_classic);
Expand All @@ -44,7 +48,16 @@ const progressBar = new cliProgress.SingleBar({}, cliProgress.Presets.shades_cla
let prefixes = ['0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9' /* System.Account */];
const skippedModulesPrefix = ['System', 'Session', 'Babe', 'Grandpa', 'GrandpaFinality', 'FinalityTracker', 'Authorship'];

async function main(argv) {
async function fixParachinStates (api, forkedSpec) {
const skippedKeys = [
api.query.parasScheduler.sessionStartBlock.key()
];
for (const k of skippedKeys) {
delete forkedSpec.genesis.raw.top[k];
}
}

async function main() {
if (!fs.existsSync(binaryPath)) {
console.log(chalk.red('Binary missing. Please copy the binary of your substrate node to the data folder and rename the binary to "binary"'));
process.exit(1);
Expand Down Expand Up @@ -76,13 +89,11 @@ async function main(argv) {
} else {
// Download state of original chain
console.log(chalk.green('Fetching current state of the live chain. Please wait, it can take a while depending on the size of your chain.'));
if (argv.block) {
console.log(chalk.yellow(`Using specified block: ${argv.block}`));
}
let at = (await api.rpc.chain.getBlockHash()).toString();
progressBar.start(totalChunks, 0);
const stream = fs.createWriteStream(storagePath, { flags: 'a' });
stream.write("[");
await fetchChunks("0x", argv.block, chunksLevel, stream);
await fetchChunks("0x", chunksLevel, stream, at);
stream.write("]");
stream.end();
progressBar.stop();
Expand All @@ -100,8 +111,16 @@ async function main(argv) {
});

// Generate chain spec for original and forked chains
execSync(`${binaryPath} build-spec --chain ${argv.chain} --raw > ${originalSpecPath}`);
execSync(`${binaryPath} build-spec --dev --raw > ${forkedSpecPath}`);
if (originalChain == '') {
execSync(binaryPath + ` build-spec --raw > ` + originalSpecPath);
} else {
execSync(binaryPath + ` build-spec --chain ${originalChain} --raw > ` + originalSpecPath);
}
if (forkChain == '') {
execSync(binaryPath + ` build-spec --dev --raw > ` + forkedSpecPath);
} else {
execSync(binaryPath + ` build-spec --chain ${forkChain} --raw > ` + forkedSpecPath);
}

let storage = JSON.parse(fs.readFileSync(storagePath, 'utf8'));
let originalSpec = JSON.parse(fs.readFileSync(originalSpecPath, 'utf8'));
Expand All @@ -120,6 +139,8 @@ async function main(argv) {
// Delete System.LastRuntimeUpgrade to ensure that the on_runtime_upgrade event is triggered
delete forkedSpec.genesis.raw.top['0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8'];

fixParachinStates(api, forkedSpec);

// Set the code to the current runtime code
forkedSpec.genesis.raw.top['0x3a636f6465'] = '0x' + fs.readFileSync(hexPath, 'utf8').trim();

Expand All @@ -136,6 +157,11 @@ async function main(argv) {
// Set eligibility to 0x64
forkedSpec.genesis.raw.top['0x76310ee24dbd609d21d08ad7292757d0e48df801946c7a0cc54f1a4e51592741'] = '0x64';

if (alice !== '') {
// Set sudo key to //Alice
forkedSpec.genesis.raw.top['0x5c0d1176a568c1f92944340dbfed9e9c530ebca703c85910e7164cb7d1c9e47b'] = '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d';
}

fs.writeFileSync(forkedSpecPath, JSON.stringify(forkedSpec, null, 4));

console.log('Forked genesis generated successfully. Find it at ./data/fork.json');
Expand All @@ -157,9 +183,9 @@ const argv = yargs(hideBin(process.argv))

main(argv);

async function fetchChunks(prefix, block, levelsRemaining, stream) {
async function fetchChunks(prefix, levelsRemaining, stream, at) {
if (levelsRemaining <= 0) {
const pairs = await provider.send('state_getPairs', [prefix, block]);
const pairs = await provider.send('state_getPairs', [prefix, at]);
if (pairs.length > 0) {
separator ? stream.write(",") : separator = true;
stream.write(JSON.stringify(pairs).slice(1, -1));
Expand All @@ -172,12 +198,12 @@ async function fetchChunks(prefix, block, levelsRemaining, stream) {
if (process.env.QUICK_MODE && levelsRemaining == 1) {
let promises = [];
for (let i = 0; i < 256; i++) {
promises.push(fetchChunks(prefix + i.toString(16).padStart(2, "0"), block, levelsRemaining - 1, stream));
promises.push(fetchChunks(prefix + i.toString(16).padStart(2, "0"), levelsRemaining - 1, stream, at));
}
await Promise.all(promises);
} else {
for (let i = 0; i < 256; i++) {
await fetchChunks(prefix + i.toString(16).padStart(2, "0"), block, levelsRemaining - 1, stream);
await fetchChunks(prefix + i.toString(16).padStart(2, "0"), levelsRemaining - 1, stream, at);
}
}
}
Loading