repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/config/net/RopstenNetConfig.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.config.net;
import org.ethereum.config.blockchain.*;
/**
* Created by Anton Nashatyrev on 25.02.2016.
*/
public class RopstenNetConfig extends BaseNetConfig {
public RopstenNetConfig() {
add(0, new HomesteadConfig());
add(10, new RopstenConfig(new HomesteadConfig()));
add(1_700_000, new RopstenConfig(new ByzantiumConfig(new DaoHFConfig())));
add(4_230_000, new RopstenConfig(new ConstantinopleConfig(new DaoHFConfig())));
add(4_939_394, new RopstenConfig(new PetersburgConfig(new DaoHFConfig())));
}
}
| 1,368
| 38.114286
| 87
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/config/net/TestNetConfig.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.config.net;
import org.ethereum.config.blockchain.FrontierConfig;
import org.ethereum.config.blockchain.HomesteadConfig;
/**
* Created by Anton Nashatyrev on 25.02.2016.
*/
public class TestNetConfig extends BaseNetConfig {
public TestNetConfig() {
add(0, new FrontierConfig());
add(1_150_000, new HomesteadConfig());
}
}
| 1,161
| 35.3125
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/config/net/BaseNetConfig.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.config.net;
import org.ethereum.config.BlockchainConfig;
import org.ethereum.config.BlockchainNetConfig;
import org.ethereum.config.Constants;
import java.util.*;
/**
* Created by Anton Nashatyrev on 25.02.2016.
*/
public class BaseNetConfig implements BlockchainNetConfig {
private long[] blockNumbers = new long[64];
private BlockchainConfig[] configs = new BlockchainConfig[64];
private int count;
public void add(long startBlockNumber, BlockchainConfig config) {
if (count >= blockNumbers.length) throw new RuntimeException();
if (count > 0 && blockNumbers[count] >= startBlockNumber)
throw new RuntimeException("Block numbers should increase");
if (count == 0 && startBlockNumber > 0) throw new RuntimeException("First config should start from block 0");
blockNumbers[count] = startBlockNumber;
configs[count] = config;
count++;
}
@Override
public BlockchainConfig getConfigForBlock(long blockNumber) {
for (int i = 0; i < count; i++) {
if (blockNumber < blockNumbers[i]) return configs[i - 1];
}
return configs[count - 1];
}
@Override
public Constants getCommonConstants() {
// TODO make a guard wrapper which throws exception if the requested constant differs among configs
return configs[0].getConstants();
}
@Override
public String toString() {
StringBuilder res = new StringBuilder()
.append("BaseNetConfig{")
.append("blockNumbers= ");
for (int i = 0; i < count; ++i) {
res.append("#").append(blockNumbers[i]).append(" => ");
res.append(configs[i]);
if (i != count - 1) {
res.append(", ");
}
}
res.append(" (total: ").append(count).append(")}");
return res.toString();
}
}
| 2,711
| 34.220779
| 117
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/config/net/MordenNetConfig.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.config.net;
import org.ethereum.config.blockchain.Eip150HFConfig;
import org.ethereum.config.blockchain.Eip160HFConfig;
import org.ethereum.config.blockchain.MordenConfig;
/**
* Created by Anton Nashatyrev on 25.02.2016.
*/
public class MordenNetConfig extends BaseNetConfig {
public MordenNetConfig() {
add(0, new MordenConfig.Frontier());
add(494_000, new MordenConfig.Homestead());
add(1_783_000, new Eip150HFConfig(new MordenConfig.Homestead()));
add(1_885_000, new Eip160HFConfig(new MordenConfig.Homestead()));
}
}
| 1,378
| 36.27027
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/config/net/ETCNetConfig.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.config.net;
import org.ethereum.config.blockchain.*;
/**
* Created by Anton Nashatyrev on 25.02.2016.
*/
public class ETCNetConfig extends BaseNetConfig {
public static final ETCNetConfig INSTANCE = new ETCNetConfig();
public ETCNetConfig() {
add(0, new FrontierConfig());
add(1_150_000, new HomesteadConfig());
add(1_920_000, new DaoNoHFConfig());
add(2_500_000, new Eip150HFConfig(new DaoNoHFConfig()));
add(3_000_000, new ETCFork3M(new DaoNoHFConfig()));
}
}
| 1,330
| 35.972222
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/config/net/JsonNetConfig.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.config.net;
import com.google.common.base.MoreObjects;
import org.apache.commons.lang3.tuple.Pair;
import org.ethereum.config.BlockchainConfig;
import org.ethereum.config.blockchain.*;
import org.ethereum.core.genesis.GenesisConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* Convert JSON config from genesis to Java blockchain net config.
* Created by Stan Reshetnyk on 23.12.2016.
*/
public class JsonNetConfig extends BaseNetConfig {
private static Logger logger = LoggerFactory.getLogger("general");
final BlockchainConfig initialBlockConfig = new FrontierConfig();
/**
* We convert all string keys to lowercase before processing.
*
* Homestead block is 0 if not specified.
* If Homestead block is specified then Frontier will be used for 0 block.
*
* @param config
*/
public JsonNetConfig(GenesisConfig config) throws RuntimeException {
final List<Pair<Integer, ? extends BlockchainConfig>> candidates = new ArrayList<>();
{
if (logger.isDebugEnabled())
logger.debug("Rendering net config from genesis {} ...", config);
Pair<Integer, ? extends BlockchainConfig> lastCandidate = Pair.of(0, initialBlockConfig);
if (logger.isDebugEnabled())
logger.debug("Block #{} => Frontier", lastCandidate.getLeft());
candidates.add(lastCandidate);
// homestead block assumed to be 0 by default
lastCandidate = Pair.of(config.homesteadBlock == null ? 0 : config.homesteadBlock, new HomesteadConfig());
if (logger.isDebugEnabled())
logger.debug("Block #{} => Homestead", lastCandidate.getLeft());
candidates.add(lastCandidate);
if (config.daoForkBlock != null) {
AbstractDaoConfig daoConfig = config.daoForkSupport ?
new DaoHFConfig(lastCandidate.getRight(), config.daoForkBlock) :
new DaoNoHFConfig(lastCandidate.getRight(), config.daoForkBlock);
lastCandidate = Pair.of(config.daoForkBlock, daoConfig);
if (logger.isDebugEnabled())
logger.debug("Block #{} => DaoForkSupport", lastCandidate.getLeft());
candidates.add(lastCandidate);
}
if (config.eip150Block != null) {
lastCandidate = Pair.of(config.eip150Block, new Eip150HFConfig(lastCandidate.getRight()));
if (logger.isDebugEnabled())
logger.debug("Block #{} => EIP150", lastCandidate.getLeft());
candidates.add(lastCandidate);
}
if (config.eip155Block != null || config.eip158Block != null) {
int block;
StringBuilder logLine = new StringBuilder();
if (config.eip155Block != null) {
if (config.eip158Block != null && !config.eip155Block.equals(config.eip158Block)) {
throw new RuntimeException("Unable to build config with different blocks for EIP155 (" + config.eip155Block + ") and EIP158 (" + config.eip158Block + ")");
}
block = config.eip155Block;
if (logger.isDebugEnabled())
logLine.append("Block #").append(block).append(" => EIP155");
} else {
block = config.eip158Block;
if (logger.isDebugEnabled())
logLine.append("Block #").append(block).append(" => EIP158");
}
if (config.chainId != null) {
final int chainId = config.chainId;
if (logger.isDebugEnabled())
logLine.append(", chainId: ").append(chainId);
lastCandidate = Pair.of(block, new Eip160HFConfig(lastCandidate.getRight()) {
@Override
public Integer getChainId() {
return chainId;
}
@Override
public String toString() {
return Eip160HFConfig.class.getSimpleName();
}
});
} else {
lastCandidate = Pair.of(block, new Eip160HFConfig(lastCandidate.getRight()));
}
if (logger.isDebugEnabled())
logger.debug(logLine.toString());
candidates.add(lastCandidate);
}
if (config.byzantiumBlock != null) {
StringBuilder logLine = new StringBuilder();
if (logger.isDebugEnabled())
logLine.append("Block #").append(config.byzantiumBlock).append(" => Byzantium");
if (config.chainId != null) {
final int chainId = config.chainId;
if (logger.isDebugEnabled())
logLine.append(", chainId: ").append(chainId);
lastCandidate = Pair.of(config.byzantiumBlock, new ByzantiumConfig(lastCandidate.getRight()) {
@Override
public Integer getChainId() {
return chainId;
}
@Override
public String toString() {
return ByzantiumConfig.class.getSimpleName();
}
});
} else {
lastCandidate = Pair.of(config.byzantiumBlock, new ByzantiumConfig(lastCandidate.getRight()));
}
if (logger.isDebugEnabled())
logger.debug(logLine.toString());
candidates.add(lastCandidate);
}
if (config.constantinopleBlock != null) {
StringBuilder logLine = new StringBuilder();
if (logger.isDebugEnabled())
logLine.append("Block #").append(config.constantinopleBlock).append(" => Constantinople");
if (config.chainId != null) {
final int chainId = config.chainId;
if (logger.isDebugEnabled())
logLine.append(", chainId: ").append(chainId);
lastCandidate = Pair.of(config.constantinopleBlock, new ConstantinopleConfig(lastCandidate.getRight()) {
@Override
public Integer getChainId() {
return chainId;
}
@Override
public String toString() {
return ConstantinopleConfig.class.getSimpleName();
}
});
} else {
lastCandidate = Pair.of(config.constantinopleBlock, new ConstantinopleConfig(lastCandidate.getRight()));
}
if (logger.isDebugEnabled())
logger.debug(logLine.toString());
candidates.add(lastCandidate);
}
if (config.petersburgBlock != null) {
StringBuilder logLine = new StringBuilder();
if (logger.isDebugEnabled())
logLine.append("Block #").append(config.petersburgBlock).append(" => Petersburg");
if (config.chainId != null) {
final int chainId = config.chainId;
if (logger.isDebugEnabled())
logLine.append(", chainId: ").append(chainId);
lastCandidate = Pair.of(config.petersburgBlock, new PetersburgConfig(lastCandidate.getRight()) {
@Override
public Integer getChainId() {
return chainId;
}
@Override
public String toString() {
return PetersburgConfig.class.getSimpleName();
}
});
} else {
lastCandidate = Pair.of(config.petersburgBlock, new PetersburgConfig(lastCandidate.getRight()));
}
if (logger.isDebugEnabled())
logger.debug(logLine.toString());
candidates.add(lastCandidate);
}
}
if (logger.isDebugEnabled())
logger.debug("Finished rendering net config from genesis {}", config);
{
// add candidate per each block (take last in row for same block)
Pair<Integer, ? extends BlockchainConfig> last = candidates.remove(0);
for (Pair<Integer, ? extends BlockchainConfig> current : candidates) {
if (current.getLeft().compareTo(last.getLeft()) > 0) {
add(last.getLeft(), last.getRight());
}
last = current;
}
add(last.getLeft(), last.getRight());
}
}
}
| 9,986
| 43.784753
| 179
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/manager/AdminInfo.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.manager;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
/**
* @author Roman Mandeleil
* @since 11.12.2014
*/
@Component
public class AdminInfo {
private static final int ExecTimeListLimit = 10000;
private long startupTimeStamp;
private boolean consensus = true;
private List<Long> blockExecTime = new LinkedList<>();
@PostConstruct
public void init() {
startupTimeStamp = System.currentTimeMillis();
}
public long getStartupTimeStamp() {
return startupTimeStamp;
}
public boolean isConsensus() {
return consensus;
}
public void lostConsensus() {
consensus = false;
}
public void addBlockExecTime(long time){
while (blockExecTime.size() > ExecTimeListLimit) {
blockExecTime.remove(0);
}
blockExecTime.add(time);
}
public Long getExecAvg(){
if (blockExecTime.isEmpty()) return 0L;
long sum = 0;
for (int i = 0; i < blockExecTime.size(); ++i){
sum += blockExecTime.get(i);
}
return sum / blockExecTime.size();
}
public List<Long> getBlockExecTime(){
return blockExecTime;
}
}
| 2,127
| 25.6
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/manager/BlockLoader.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.manager;
import org.apache.commons.lang3.ArrayUtils;
import org.ethereum.core.Block;
import org.ethereum.core.BlockHeader;
import org.ethereum.core.Blockchain;
import org.ethereum.core.ImportResult;
import org.ethereum.core.Transaction;
import org.ethereum.db.DbFlushManager;
import org.ethereum.util.ExecutorPipeline;
import org.ethereum.validator.BlockHeaderValidator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.spongycastle.util.encoders.Hex;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.io.Closeable;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Iterator;
import java.util.Scanner;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
@Component
public class BlockLoader {
public interface DumpWalker extends Iterable<byte[]>, Closeable {
@Override
default void close() throws IOException {
}
}
private final static Logger logger = LoggerFactory.getLogger("blockqueue");
private final static DateTimeFormatter df = DateTimeFormatter.ofPattern("HH:mm:ss.SSSS");
private final BlockHeaderValidator headerValidator;
private final Blockchain blockchain;
private final DbFlushManager dbFlushManager;
private ExecutorPipeline<Block, Block> exec1;
private ExecutorPipeline<Block, ?> exec2;
@Autowired
public BlockLoader(BlockHeaderValidator headerValidator, Blockchain blockchain, DbFlushManager dbFlushManager) {
this.headerValidator = headerValidator;
this.blockchain = blockchain;
this.dbFlushManager = dbFlushManager;
}
private void initPipelines() {
exec1 = new ExecutorPipeline(8, 1000, true, (Function<Block, Block>) b -> {
if (b.getNumber() >= blockchain.getBestBlock().getNumber()) {
for (Transaction tx : b.getTransactionsList()) {
tx.getSender();
}
}
return b;
}, throwable -> logger.error("Unhandled exception: ", throwable));
exec2 = exec1.add(1, 1000, block -> {
try {
blockWork(block);
} catch (Exception e) {
e.printStackTrace();
}
});
}
private void blockWork(Block block) {
if (block.getNumber() >= blockchain.getBestBlock().getNumber() || blockchain.getBlockByHash(block.getHash()) == null) {
if (block.getNumber() > 0 && !isValid(block.getHeader())) {
throw new RuntimeException();
}
long start = System.currentTimeMillis();
ImportResult result = blockchain.tryToConnect(block);
if (block.getNumber() % 10 == 0) {
LocalDateTime finish = LocalDateTime.now();
System.out.printf("%s Imported block %s: %s (prework: %d, work: %d, blocks: %d) in %d ms.\n",
finish.format(df),
block.getShortDescr(),
result,
exec1.getQueue().size(),
exec2.getQueue().size(),
exec1.getOrderMap().size(),
System.currentTimeMillis() - start);
}
} else if (block.getNumber() % 10000 == 0) {
System.out.println("Skipping block #" + block.getNumber());
}
}
/**
* Tries import blocks from specified dumps.
*
* @param walkerFactory {@link DumpWalker} factory, which should instantiate new walker per each dump;
* @param paths list of dumps to import;
* @return <code>true</code> if all blocks within all dumps have been successfully imported, <code>false</code> otherwise.
*/
public boolean loadBlocks(Function<Path, DumpWalker> walkerFactory, Path... paths) {
if (ArrayUtils.isEmpty(paths)) {
logger.warn("There is nothing to import.");
return false;
}
initPipelines();
AtomicLong maxBlockNumber = new AtomicLong();
boolean allBlocksImported;
try {
for (Path dump : paths) {
try (DumpWalker walker = walkerFactory.apply(dump)) {
walker.forEach(rlp -> {
Block block = new Block(rlp);
if (maxBlockNumber.get() < block.getNumber()) {
maxBlockNumber.set(block.getNumber());
}
exec1.push(block);
});
}
}
exec1.join();
dbFlushManager.flushSync();
allBlocksImported = maxBlockNumber.get() == blockchain.getBestBlock().getNumber();
} catch (Exception e) {
e.printStackTrace();
allBlocksImported = false;
}
if (allBlocksImported) {
System.out.printf("All of %s blocks was successfully loaded.\n", maxBlockNumber);
} else {
System.out.printf("Some blocks have been lost during the loading.");
}
return allBlocksImported;
}
/**
* Tries import blocks from specified dumps with default {@link DumpWalker}.
*
* @param paths list of dumps to import;
* @return <code>true</code> if all blocks within all dumps have been successfully imported, <code>false</code> otherwise.
*/
public boolean loadBlocks(Path... paths) {
return loadBlocks(HexLineDumpWalker::new, paths);
}
private boolean isValid(BlockHeader header) {
return headerValidator.validateAndLog(header, logger);
}
private class HexLineDumpWalker implements DumpWalker {
private final Scanner scanner;
public HexLineDumpWalker(Path path) {
try {
System.out.println("Loading hex encoded blocks dump from: " + path);
this.scanner = new Scanner(Files.newInputStream(path), "UTF-8");
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void close() throws IOException {
this.scanner.close();
}
@Override
public Iterator<byte[]> iterator() {
return new Iterator<byte[]>() {
@Override
public boolean hasNext() {
return scanner.hasNextLine();
}
@Override
public byte[] next() {
return Hex.decode(scanner.nextLine());
}
};
}
}
}
| 7,580
| 33.935484
| 127
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/manager/WorldManager.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.manager;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.*;
import org.ethereum.db.BlockStore;
import org.ethereum.db.DbFlushManager;
import org.ethereum.db.HeaderStore;
import org.ethereum.db.migrate.MigrateHeaderSourceTotalDiff;
import org.ethereum.listener.CompositeEthereumListener;
import org.ethereum.listener.EthereumListener;
import org.ethereum.net.client.PeerClient;
import org.ethereum.net.rlpx.discover.UDPListener;
import org.ethereum.sync.FastSyncManager;
import org.ethereum.sync.SyncManager;
import org.ethereum.net.rlpx.discover.NodeManager;
import org.ethereum.net.server.ChannelManager;
import org.ethereum.sync.SyncPool;
import org.ethereum.util.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.spongycastle.util.encoders.Hex;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationContext;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import static org.ethereum.crypto.HashUtil.EMPTY_TRIE_HASH;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* WorldManager is a singleton containing references to different parts of the system.
*
* @author Roman Mandeleil
* @since 01.06.2014
*/
@Component
public class WorldManager {
private static final Logger logger = LoggerFactory.getLogger("general");
@Autowired
private PeerClient activePeer;
@Autowired
private ChannelManager channelManager;
@Autowired
private AdminInfo adminInfo;
@Autowired
private NodeManager nodeManager;
@Autowired
private SyncManager syncManager;
@Autowired
private SyncPool pool;
@Autowired
private PendingState pendingState;
@Autowired
private UDPListener discoveryUdpListener;
@Autowired
private EventDispatchThread eventDispatchThread;
@Autowired
private DbFlushManager dbFlushManager;
@Autowired
private ApplicationContext ctx;
private SystemProperties config;
private EthereumListener listener;
private Blockchain blockchain;
private Repository repository;
private BlockStore blockStore;
@Autowired
public WorldManager(final SystemProperties config, final Repository repository,
final EthereumListener listener, final Blockchain blockchain,
final BlockStore blockStore) {
this.listener = listener;
this.blockchain = blockchain;
this.repository = repository;
this.blockStore = blockStore;
this.config = config;
loadBlockchain();
}
@PostConstruct
private void init() {
fastSyncDbJobs();
syncManager.init(channelManager, pool);
}
public void addListener(EthereumListener listener) {
logger.info("Ethereum listener added");
((CompositeEthereumListener) this.listener).addListener(listener);
}
public void startPeerDiscovery() {
}
public void stopPeerDiscovery() {
discoveryUdpListener.close();
nodeManager.close();
}
public void initSyncing() {
config.setSyncEnabled(true);
syncManager.init(channelManager, pool);
}
public ChannelManager getChannelManager() {
return channelManager;
}
public EthereumListener getListener() {
return listener;
}
public org.ethereum.facade.Repository getRepository() {
return (org.ethereum.facade.Repository)repository;
}
public Blockchain getBlockchain() {
return blockchain;
}
public PeerClient getActivePeer() {
return activePeer;
}
public BlockStore getBlockStore() {
return blockStore;
}
public PendingState getPendingState() {
return pendingState;
}
public void loadBlockchain() {
if (!config.databaseReset() || config.databaseResetBlock() != 0)
blockStore.load();
if (blockStore.getBestBlock() == null) {
logger.info("DB is empty - adding Genesis");
Genesis genesis = Genesis.getInstance(config);
Genesis.populateRepository(repository, genesis);
// repository.commitBlock(genesis.getHeader());
repository.commit();
blockStore.saveBlock(Genesis.getInstance(config), Genesis.getInstance(config).getDifficultyBI(), true);
blockchain.setBestBlock(Genesis.getInstance(config));
blockchain.setTotalDifficulty(Genesis.getInstance(config).getDifficultyBI());
listener.onBlock(new BlockSummary(Genesis.getInstance(config), new HashMap<byte[], BigInteger>(), new ArrayList<TransactionReceipt>(), new ArrayList<TransactionExecutionSummary>()), true);
// repository.dumpState(Genesis.getInstance(config), 0, 0, null);
logger.info("Genesis block loaded");
} else {
if (!config.databaseReset() &&
!Arrays.equals(blockchain.getBlockByNumber(0).getHash(), config.getGenesis().getHash())) {
// fatal exit
Utils.showErrorAndExit("*** DB is incorrect, 0 block in DB doesn't match genesis");
}
Block bestBlock = blockStore.getBestBlock();
if (config.databaseReset() && config.databaseResetBlock() > 0) {
if (config.databaseResetBlock() > bestBlock.getNumber()) {
logger.error("*** Can't reset to block [{}] since block store is at block [{}].", config.databaseResetBlock(), bestBlock);
throw new RuntimeException("Reset block ahead of block store.");
}
bestBlock = blockStore.getChainBlockByNumber(config.databaseResetBlock());
Repository snapshot = repository.getSnapshotTo(bestBlock.getStateRoot());
if (false) { // TODO: some way to tell if the snapshot hasn't been pruned
logger.error("*** Could not reset database to block [{}] with stateRoot [{}], since state information is " +
"unavailable. It might have been pruned from the database.");
throw new RuntimeException("State unavailable for reset block.");
}
}
blockchain.setBestBlock(bestBlock);
BigInteger totalDifficulty = blockStore.getTotalDifficultyForHash(bestBlock.getHash());
blockchain.setTotalDifficulty(totalDifficulty);
logger.info("*** Loaded up to block [{}] totalDifficulty [{}] with stateRoot [{}]",
blockchain.getBestBlock().getNumber(),
blockchain.getTotalDifficulty().toString(),
toHexString(blockchain.getBestBlock().getStateRoot()));
}
if (config.rootHashStart() != null) {
// update world state by dummy hash
byte[] rootHash = Hex.decode(config.rootHashStart());
logger.info("Loading root hash from property file: [{}]", config.rootHashStart());
this.repository.syncToRoot(rootHash);
} else {
// Update world state to latest loaded block from db
// if state is not generated from empty premine list
// todo this is just a workaround, move EMPTY_TRIE_HASH logic to Trie implementation
if (!Arrays.equals(blockchain.getBestBlock().getStateRoot(), EMPTY_TRIE_HASH)) {
this.repository.syncToRoot(blockchain.getBestBlock().getStateRoot());
}
}
/* todo: return it when there is no state conflicts on the chain
boolean dbValid = this.repository.getWorldState().validate() || bestBlock.isGenesis();
if (!dbValid){
logger.error("The DB is not valid for that blockchain");
System.exit(-1); // todo: reset the repository and blockchain
}
*/
}
/**
* After introducing skipHistory in FastSync this method
* adds additional header storage to Blockchain
* as Blockstore is incomplete in this mode
*/
private void fastSyncDbJobs() {
// checking if fast sync ran sometime ago with "skipHistory flag"
if (blockStore.getBestBlock().getNumber() > 0 &&
blockStore.getChainBlockByNumber(1) == null) {
FastSyncManager fastSyncManager = ctx.getBean(FastSyncManager.class);
if (fastSyncManager.isInProgress()) {
return;
}
logger.info("DB is filled using Fast Sync with skipHistory, adopting headerStore");
((BlockchainImpl) blockchain).setHeaderStore(ctx.getBean(HeaderStore.class));
}
MigrateHeaderSourceTotalDiff tempMigration = new MigrateHeaderSourceTotalDiff(ctx, blockStore, blockchain, config);
tempMigration.run();
}
public void close() {
logger.info("close: stopping peer discovery ...");
stopPeerDiscovery();
logger.info("close: stopping ChannelManager ...");
channelManager.close();
logger.info("close: stopping SyncManager ...");
syncManager.close();
logger.info("close: stopping PeerClient ...");
activePeer.close();
logger.info("close: shutting down event dispatch thread used by EventBus ...");
eventDispatchThread.shutdown();
logger.info("close: closing Blockchain instance ...");
blockchain.close();
logger.info("close: closing main repository ...");
repository.close();
logger.info("close: database flush manager ...");
dbFlushManager.close();
}
}
| 10,506
| 34.738095
| 200
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/TransactionStore.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.apache.commons.collections4.map.LRUMap;
import org.ethereum.datasource.*;
import org.ethereum.core.TransactionInfo;
import org.ethereum.util.FastByteComparisons;
import org.ethereum.util.RLP;
import org.ethereum.util.RLPList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import javax.annotation.PreDestroy;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Storage (tx hash) => List of (block idx, tx idx, TransactionReceipt)
*
* Since a transaction could be included into blocks from different forks and
* have different receipts the class stores all of them (the same manner fork blocks are stored)
*
* NOTE: the TransactionInfo instances returned contains TransactionReceipt which
* has no initialized Transaction object. If needed use BlockStore to retrieve and setup
* Transaction instance
*
* Created by Anton Nashatyrev on 07.04.2016.
*/
@Component
public class TransactionStore extends ObjectDataSource<List<TransactionInfo>> {
private static final Logger logger = LoggerFactory.getLogger("db");
private final LRUMap<ByteArrayWrapper, Object> lastSavedTxHash = new LRUMap<>(5000);
private final Object object = new Object();
private final static Serializer<List<TransactionInfo>, byte[]> serializer =
new Serializer<List<TransactionInfo>, byte[]>() {
@Override
public byte[] serialize(List<TransactionInfo> object) {
byte[][] txsRlp = new byte[object.size()][];
for (int i = 0; i < txsRlp.length; i++) {
txsRlp[i] = object.get(i).getEncoded();
}
return RLP.encodeList(txsRlp);
}
@Override
public List<TransactionInfo> deserialize(byte[] stream) {
try {
if (stream == null) return null;
RLPList infoList = RLP.unwrapList(stream);
List<TransactionInfo> ret = new ArrayList<>();
for (int i = 0; i < infoList.size(); i++) {
ret.add(new TransactionInfo(infoList.get(i).getRLPData()));
}
return ret;
} catch (Exception e) {
// fallback to previous DB version
return Collections.singletonList(new TransactionInfo(stream));
}
}
};
/**
* Adds TransactionInfo to the store.
* If entries for this transaction already exist the method adds new entry to the list
* if no entry for the same block exists
* @return true if TransactionInfo was added, false if already exist
*/
public boolean put(TransactionInfo tx) {
byte[] txHash = tx.getReceipt().getTransaction().getHash();
List<TransactionInfo> existingInfos = null;
synchronized (lastSavedTxHash) {
if (lastSavedTxHash.put(new ByteArrayWrapper(txHash), object) != null || !lastSavedTxHash.isFull()) {
existingInfos = get(txHash);
}
}
// else it is highly unlikely that the transaction was included into another block
// earlier than 5000 transactions before with regard to regular block import process
if (existingInfos == null) {
existingInfos = new ArrayList<>();
} else {
for (TransactionInfo info : existingInfos) {
if (FastByteComparisons.equal(info.getBlockHash(), tx.getBlockHash())) {
return false;
}
}
}
existingInfos.add(tx);
put(txHash, existingInfos);
return true;
}
public TransactionInfo get(byte[] txHash, byte[] blockHash) {
List<TransactionInfo> existingInfos = get(txHash);
for (TransactionInfo info : existingInfos) {
if (FastByteComparisons.equal(info.getBlockHash(), blockHash)) {
return info;
}
}
return null;
}
public TransactionStore(Source<byte[], byte[]> src) {
super(src, serializer, 256);
}
@PreDestroy
public void close() {
// try {
// logger.info("Closing TransactionStore...");
// super.close();
// } catch (Exception e) {
// logger.warn("Problems closing TransactionStore", e);
// }
}
}
| 5,166
| 36.172662
| 113
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/BlockStoreDummy.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.core.Block;
import org.ethereum.core.BlockHeader;
import org.ethereum.crypto.HashUtil;
import java.math.BigInteger;
import java.util.List;
/**
* @author Roman Mandeleil
* @since 10.02.2015
*/
public class BlockStoreDummy implements BlockStore {
@Override
public byte[] getBlockHashByNumber(long blockNumber) {
byte[] data = String.valueOf(blockNumber).getBytes();
return HashUtil.sha3(data);
}
@Override
public byte[] getBlockHashByNumber(long blockNumber, byte[] branchBlockHash) {
return getBlockHashByNumber(blockNumber);
}
@Override
public Block getChainBlockByNumber(long blockNumber) {
return null;
}
@Override
public Block getBlockByHash(byte[] hash) {
return null;
}
@Override
public boolean isBlockExist(byte[] hash) {
return false;
}
@Override
public List<byte[]> getListHashesEndWith(byte[] hash, long qty) {
return null;
}
@Override
public List<BlockHeader> getListHeadersEndWith(byte[] hash, long qty) {
return null;
}
@Override
public List<Block> getListBlocksEndWith(byte[] hash, long qty) {
return null;
}
@Override
public void saveBlock(Block block, BigInteger totalDifficulty, boolean mainChain) {
}
@Override
public BigInteger getTotalDifficulty() {
return null;
}
@Override
public Block getBestBlock() {
return null;
}
@Override
public void flush() {
}
@Override
public void load() {
}
@Override
public long getMaxNumber() {
return 0;
}
@Override
public void reBranch(Block forkBlock) {
}
@Override
public BigInteger getTotalDifficultyForHash(byte[] hash) {
return null;
}
@Override
public void close() {}
}
| 2,697
| 21.483333
| 87
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/BlockStore.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.core.Block;
import org.ethereum.core.BlockHeader;
import java.math.BigInteger;
import java.util.List;
/**
* @author Roman Mandeleil
* @since 08.01.2015
*/
public interface BlockStore {
byte[] getBlockHashByNumber(long blockNumber);
/**
* Gets the block hash by its index.
* When more than one block with the specified index exists (forks)
* the select the block which is ancestor of the branchBlockHash
*/
byte[] getBlockHashByNumber(long blockNumber, byte[] branchBlockHash);
Block getChainBlockByNumber(long blockNumber);
Block getBlockByHash(byte[] hash);
boolean isBlockExist(byte[] hash);
List<byte[]> getListHashesEndWith(byte[] hash, long qty);
List<BlockHeader> getListHeadersEndWith(byte[] hash, long qty);
List<Block> getListBlocksEndWith(byte[] hash, long qty);
void saveBlock(Block block, BigInteger totalDifficulty, boolean mainChain);
BigInteger getTotalDifficultyForHash(byte[] hash);
BigInteger getTotalDifficulty();
Block getBestBlock();
long getMaxNumber();
void flush();
void reBranch(Block forkBlock);
void load();
void close();
}
| 2,001
| 27.197183
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/RepositoryWrapper.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.core.AccountState;
import org.ethereum.core.Block;
import org.ethereum.core.BlockchainImpl;
import org.ethereum.core.Repository;
import org.ethereum.vm.DataWord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import javax.annotation.Nullable;
import java.math.BigInteger;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
* Repository delegating all calls to the last Repository
*
* Created by Anton Nashatyrev on 22.12.2016.
*/
@Component
public class RepositoryWrapper implements Repository {
@Autowired
BlockchainImpl blockchain;
public RepositoryWrapper() {
}
@Override
public AccountState createAccount(byte[] addr) {
return blockchain.getRepository().createAccount(addr);
}
@Override
public boolean isExist(byte[] addr) {
return blockchain.getRepository().isExist(addr);
}
@Override
public AccountState getAccountState(byte[] addr) {
return blockchain.getRepository().getAccountState(addr);
}
@Override
public void delete(byte[] addr) {
blockchain.getRepository().delete(addr);
}
@Override
public BigInteger increaseNonce(byte[] addr) {
return blockchain.getRepository().increaseNonce(addr);
}
@Override
public BigInteger setNonce(byte[] addr, BigInteger nonce) {
return blockchain.getRepository().setNonce(addr, nonce);
}
@Override
public BigInteger getNonce(byte[] addr) {
return blockchain.getRepository().getNonce(addr);
}
@Override
public ContractDetails getContractDetails(byte[] addr) {
return blockchain.getRepository().getContractDetails(addr);
}
@Override
public boolean hasContractDetails(byte[] addr) {
return blockchain.getRepository().hasContractDetails(addr);
}
@Override
public void saveCode(byte[] addr, byte[] code) {
blockchain.getRepository().saveCode(addr, code);
}
@Override
public byte[] getCode(byte[] addr) {
return blockchain.getRepository().getCode(addr);
}
@Override
public byte[] getCodeHash(byte[] addr) {
return blockchain.getRepository().getCodeHash(addr);
}
@Override
public void addStorageRow(byte[] addr, DataWord key, DataWord value) {
blockchain.getRepository().addStorageRow(addr, key, value);
}
@Override
public DataWord getStorageValue(byte[] addr, DataWord key) {
return blockchain.getRepository().getStorageValue(addr, key);
}
@Override
public BigInteger getBalance(byte[] addr) {
return blockchain.getRepository().getBalance(addr);
}
@Override
public BigInteger addBalance(byte[] addr, BigInteger value) {
return blockchain.getRepository().addBalance(addr, value);
}
@Override
public Set<byte[]> getAccountsKeys() {
return blockchain.getRepository().getAccountsKeys();
}
@Override
public void dumpState(Block block, long gasUsed, int txNumber, byte[] txHash) {
blockchain.getRepository().dumpState(block, gasUsed, txNumber, txHash);
}
@Override
public Repository startTracking() {
return blockchain.getRepository().startTracking();
}
@Override
public void flush() {
blockchain.getRepository().flush();
}
@Override
public void flushNoReconnect() {
blockchain.getRepository().flushNoReconnect();
}
@Override
public void commit() {
blockchain.getRepository().commit();
}
@Override
public void rollback() {
blockchain.getRepository().rollback();
}
@Override
public void syncToRoot(byte[] root) {
blockchain.getRepository().syncToRoot(root);
}
@Override
public boolean isClosed() {
return blockchain.getRepository().isClosed();
}
@Override
public void close() {
blockchain.getRepository().close();
}
@Override
public void reset() {
blockchain.getRepository().reset();
}
@Override
public void updateBatch(HashMap<ByteArrayWrapper, AccountState> accountStates, HashMap<ByteArrayWrapper, ContractDetails> contractDetailes) {
blockchain.getRepository().updateBatch(accountStates, contractDetailes);
}
@Override
public byte[] getRoot() {
return blockchain.getRepository().getRoot();
}
@Override
public void loadAccount(byte[] addr, HashMap<ByteArrayWrapper, AccountState> cacheAccounts, HashMap<ByteArrayWrapper, ContractDetails> cacheDetails) {
blockchain.getRepository().loadAccount(addr, cacheAccounts, cacheDetails);
}
@Override
public Repository getSnapshotTo(byte[] root) {
return blockchain.getRepository().getSnapshotTo(root);
}
@Override
public Repository clone() {
return getSnapshotTo(getRoot());
}
@Override
public int getStorageSize(byte[] addr) {
return blockchain.getRepository().getStorageSize(addr);
}
@Override
public Set<DataWord> getStorageKeys(byte[] addr) {
return blockchain.getRepository().getStorageKeys(addr);
}
@Override
public Map<DataWord, DataWord> getStorage(byte[] addr, @Nullable Collection<DataWord> keys) {
return blockchain.getRepository().getStorage(addr, keys);
}
}
| 6,256
| 26.933036
| 154
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/StateSource.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.config.CommonConfig;
import org.ethereum.config.SystemProperties;
import org.ethereum.datasource.*;
import org.springframework.beans.factory.annotation.Autowired;
/**
* Created by Anton Nashatyrev on 29.11.2016.
*/
public class StateSource extends SourceChainBox<byte[], byte[], byte[], byte[]>
implements HashedKeySource<byte[], byte[]> {
// for debug purposes
public static StateSource INST;
JournalSource<byte[]> journalSource;
NoDeleteSource<byte[], byte[]> noDeleteSource;
ReadCache<byte[], byte[]> readCache;
AbstractCachedSource<byte[], byte[]> writeCache;
public StateSource(Source<byte[], byte[]> src, boolean pruningEnabled) {
super(src);
INST = this;
add(readCache = new ReadCache.BytesKey<>(src).withMaxCapacity(16 * 1024 * 1024 / 512)); // 512 - approx size of a node
readCache.setFlushSource(true);
writeCache = new AsyncWriteCache<byte[], byte[]>(readCache) {
@Override
protected WriteCache<byte[], byte[]> createCache(Source<byte[], byte[]> source) {
WriteCache.BytesKey<byte[]> ret = new WriteCache.BytesKey<byte[]>(source, WriteCache.CacheType.SIMPLE);
ret.withSizeEstimators(MemSizeEstimator.ByteArrayEstimator, MemSizeEstimator.ByteArrayEstimator);
ret.setFlushSource(true);
return ret;
}
}.withName("state");
add(writeCache);
if (pruningEnabled) {
add(journalSource = new JournalSource<>(writeCache));
} else {
add(noDeleteSource = new NoDeleteSource<>(writeCache));
}
}
@Autowired
public void setConfig(SystemProperties config) {
int size = config.getConfig().getInt("cache.stateCacheSize");
readCache.withMaxCapacity(size * 1024 * 1024 / 512); // 512 - approx size of a node
}
@Autowired
public void setCommonConfig(CommonConfig commonConfig) {
if (journalSource != null) {
journalSource.setJournalStore(commonConfig.cachedDbSource("journal"));
}
}
public JournalSource<byte[]> getJournalSource() {
return journalSource;
}
/**
* Returns the source behind JournalSource
*/
public Source<byte[], byte[]> getNoJournalSource() {
return writeCache;
}
public AbstractCachedSource<byte[], byte[]> getWriteCache() {
return writeCache;
}
public ReadCache<byte[], byte[]> getReadCache() {
return readCache;
}
}
| 3,365
| 34.0625
| 126
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/HeaderStore.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.core.BlockHeader;
import org.ethereum.datasource.DataSourceArray;
import org.ethereum.datasource.ObjectDataSource;
import org.ethereum.datasource.Serializers;
import org.ethereum.datasource.Source;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* BlockHeaders store
* Assumes one chain
* Uses indexes by header hash and block number
*/
public class HeaderStore {
private static final Logger logger = LoggerFactory.getLogger("general");
Source<byte[], byte[]> indexDS;
DataSourceArray<byte[]> index;
Source<byte[], byte[]> headersDS;
ObjectDataSource<BlockHeader> headers;
public HeaderStore() {
}
public void init(Source<byte[], byte[]> index, Source<byte[], byte[]> headers) {
indexDS = index;
this.index = new DataSourceArray<>(
new ObjectDataSource<>(index,Serializers.AsIsSerializer, 2048));
this.headersDS = headers;
this.headers = new ObjectDataSource<>(headers, Serializers.BlockHeaderSerializer, 512);
}
public synchronized BlockHeader getBestHeader() {
long maxNumber = getMaxNumber();
if (maxNumber < 0) return null;
return getHeaderByNumber(maxNumber);
}
public synchronized void flush() {
headers.flush();
index.flush();
headersDS.flush();
indexDS.flush();
}
public synchronized void saveHeader(BlockHeader header) {
index.set((int) header.getNumber(), header.getHash());
headers.put(header.getHash(), header);
}
public synchronized BlockHeader getHeaderByNumber(long number) {
if (number < 0 || number >= index.size()) {
return null;
}
byte[] hash = index.get((int) number);
if (hash == null) {
return null;
}
return headers.get(hash);
}
public synchronized int size() {
return index.size();
}
public synchronized BlockHeader getHeaderByHash(byte[] hash) {
return headers.get(hash);
}
public synchronized long getMaxNumber(){
if (index.size() > 0) {
return (long) index.size() - 1;
} else {
return -1;
}
}
}
| 3,041
| 28.25
| 95
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/PeerSource.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.apache.commons.lang3.tuple.Pair;
import org.ethereum.datasource.DataSourceArray;
import org.ethereum.datasource.DbSource;
import org.ethereum.datasource.ObjectDataSource;
import org.ethereum.datasource.Serializer;
import org.ethereum.datasource.Source;
import org.ethereum.net.rlpx.Node;
import org.ethereum.util.ByteUtil;
import org.ethereum.util.RLP;
import org.ethereum.util.RLPList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.math.BigInteger;
/**
* Source for {@link org.ethereum.net.rlpx.Node} also known as Peers
*/
public class PeerSource {
private static final Logger logger = LoggerFactory.getLogger("db");
// for debug purposes
public static PeerSource INST;
private Source<byte[], byte[]> src;
DataSourceArray<Pair<Node, Integer>> nodes;
public static final Serializer<Pair<Node, Integer>, byte[]> NODE_SERIALIZER = new Serializer<Pair<Node, Integer>, byte[]>(){
@Override
public byte[] serialize(Pair<Node, Integer> value) {
byte[] nodeRlp = value.getLeft().getRLP();
byte[] nodeIsDiscovery = RLP.encodeByte(value.getLeft().isDiscoveryNode() ? (byte) 1 : 0);
byte[] savedReputation = RLP.encodeBigInteger(BigInteger.valueOf(value.getRight()));
return RLP.encodeList(nodeRlp, nodeIsDiscovery, savedReputation);
}
@Override
public Pair<Node, Integer> deserialize(byte[] bytes) {
if (bytes == null) return null;
RLPList nodeElement = (RLPList) RLP.decode2(bytes).get(0);
byte[] nodeRlp = nodeElement.get(0).getRLPData();
byte[] nodeIsDiscovery = nodeElement.get(1).getRLPData();
byte[] savedReputation = nodeElement.get(2).getRLPData();
Node node = new Node(nodeRlp);
node.setDiscoveryNode(nodeIsDiscovery != null);
return Pair.of(node, ByteUtil.byteArrayToInt(savedReputation));
}
};
public PeerSource(Source<byte[], byte[]> src) {
this.src = src;
INST = this;
this.nodes = new DataSourceArray<>(
new ObjectDataSource<>(src, NODE_SERIALIZER, 512));
}
public DataSourceArray<Pair<Node, Integer>> getNodes() {
return nodes;
}
public void clear() {
if (src instanceof DbSource) {
((DbSource) src).reset();
this.nodes = new DataSourceArray<>(
new ObjectDataSource<>(src, NODE_SERIALIZER, 512));
} else {
throw new RuntimeException("Not supported");
}
}
}
| 3,401
| 35.191489
| 128
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/ContractDetails.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.vm.DataWord;
import javax.annotation.Nullable;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
public interface ContractDetails {
void put(DataWord key, DataWord value);
DataWord get(DataWord key);
byte[] getCode();
byte[] getCode(byte[] codeHash);
void setCode(byte[] code);
byte[] getStorageHash();
void decode(byte[] rlpCode);
void setDirty(boolean dirty);
void setDeleted(boolean deleted);
boolean isDirty();
boolean isDeleted();
byte[] getEncoded();
int getStorageSize();
Set<DataWord> getStorageKeys();
// Removes all storage, key by key, if supported
void deleteStorage();
Map<DataWord,DataWord> getStorage(@Nullable Collection<DataWord> keys);
Map<DataWord, DataWord> getStorage();
void setStorage(List<DataWord> storageKeys, List<DataWord> storageValues);
void setStorage(Map<DataWord, DataWord> storage);
byte[] getAddress();
void setAddress(byte[] address);
ContractDetails clone();
String toString();
void syncStorage();
ContractDetails getSnapshotTo(byte[] hash);
}
| 1,994
| 23.62963
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/AbstractBlockstore.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.core.Block;
/**
* Created by Anton Nashatyrev on 29.10.2015.
*/
public abstract class AbstractBlockstore implements BlockStore {
@Override
public byte[] getBlockHashByNumber(long blockNumber, byte[] branchBlockHash) {
Block branchBlock = getBlockByHash(branchBlockHash);
if (branchBlock.getNumber() < blockNumber) {
throw new IllegalArgumentException("Requested block number > branch hash number: " + blockNumber + " < " + branchBlock.getNumber());
}
while(branchBlock.getNumber() > blockNumber) {
branchBlock = getBlockByHash(branchBlock.getParentHash());
}
return branchBlock.getHash();
}
}
| 1,517
| 37.923077
| 144
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/PruneManager.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.Block;
import org.ethereum.core.BlockHeader;
import org.ethereum.datasource.JournalSource;
import org.ethereum.datasource.Source;
import org.ethereum.db.prune.Segment;
import org.ethereum.db.prune.Pruner;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* Manages state pruning part of block processing.
*
* <p>
* Constructs chain segments and prune them when they are complete
*
* Created by Anton Nashatyrev on 10.11.2016.
*
* @see Segment
* @see Pruner
*/
public class PruneManager {
private static final int LONGEST_CHAIN = 192;
private JournalSource<?> journalSource;
@Autowired
private IndexedBlockStore blockStore;
private int pruneBlocksCnt;
private Segment segment;
private Pruner pruner;
@Autowired
private PruneManager(SystemProperties config) {
pruneBlocksCnt = config.databasePruneDepth();
}
public PruneManager(IndexedBlockStore blockStore, JournalSource<?> journalSource,
Source<byte[], ?> pruneStorage, int pruneBlocksCnt) {
this.blockStore = blockStore;
this.journalSource = journalSource;
this.pruneBlocksCnt = pruneBlocksCnt;
if (journalSource != null && pruneStorage != null)
this.pruner = new Pruner(journalSource.getJournal(), pruneStorage);
}
@Autowired
public void setStateSource(StateSource stateSource) {
journalSource = stateSource.getJournalSource();
if (journalSource != null)
pruner = new Pruner(journalSource.getJournal(), stateSource.getNoJournalSource());
}
public void blockCommitted(BlockHeader block) {
if (pruneBlocksCnt < 0) return; // pruning disabled
JournalSource.Update update = journalSource.commitUpdates(block.getHash());
pruner.feed(update);
long forkBlockNum = block.getNumber() - getForkBlocksCnt();
if (forkBlockNum < 0) return;
List<Block> pruneBlocks = blockStore.getBlocksByNumber(forkBlockNum);
Block chainBlock = blockStore.getChainBlockByNumber(forkBlockNum);
// reset segment and return
// if chainBlock is accidentally null
if (chainBlock == null) {
segment = null;
return;
}
if (segment == null) {
if (pruneBlocks.size() == 1) // wait for a single chain
segment = new Segment(chainBlock);
return;
}
Segment.Tracker tracker = segment.startTracking();
tracker.addMain(chainBlock);
tracker.addAll(pruneBlocks);
tracker.commit();
if (segment.isComplete()) {
if (!pruner.isReady()) {
List<byte[]> forkWindow = getAllChainsHashes(segment.getRootNumber() + 1, blockStore.getMaxNumber());
pruner.init(forkWindow, getForkBlocksCnt());
int mainChainWindowSize = pruneBlocksCnt - getForkBlocksCnt();
if (mainChainWindowSize > 0) {
List<byte[]> mainChainWindow = getMainChainHashes(Math.max(1, segment.getRootNumber() - mainChainWindowSize + 1),
segment.getRootNumber());
pruner.withSecondStep(mainChainWindow, mainChainWindowSize);
}
}
pruner.prune(segment);
segment = new Segment(chainBlock);
}
long mainBlockNum = block.getNumber() - getMainBlocksCnt();
if (mainBlockNum < 0) return;
byte[] hash = blockStore.getBlockHashByNumber(mainBlockNum);
pruner.persist(hash);
}
private int getForkBlocksCnt() {
return Math.min(pruneBlocksCnt, 2 * LONGEST_CHAIN);
}
private int getMainBlocksCnt() {
if (pruneBlocksCnt <= 2 * LONGEST_CHAIN) {
return Integer.MAX_VALUE;
} else {
return pruneBlocksCnt;
}
}
private List<byte[]> getAllChainsHashes(long fromBlock, long toBlock) {
List<byte[]> ret = new ArrayList<>();
for (long num = fromBlock; num <= toBlock; num++) {
List<Block> blocks = blockStore.getBlocksByNumber(num);
List<byte[]> hashes = blocks.stream().map(Block::getHash).collect(Collectors.toList());
ret.addAll(hashes);
}
return ret;
}
private List<byte[]> getMainChainHashes(long fromBlock, long toBlock) {
List<byte[]> ret = new ArrayList<>();
for (long num = fromBlock; num <= toBlock; num++) {
byte[] hash = blockStore.getBlockHashByNumber(num);
ret.add(hash);
}
return ret;
}
}
| 5,594
| 33.115854
| 133
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/ByteArrayWrapper.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.util.FastByteComparisons;
import java.io.Serializable;
import java.util.Arrays;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* @author Roman Mandeleil
* @since 11.06.2014
*/
public class ByteArrayWrapper implements Comparable<ByteArrayWrapper>, Serializable {
private final byte[] data;
private int hashCode = 0;
public ByteArrayWrapper(byte[] data) {
if (data == null)
throw new NullPointerException("Data must not be null");
this.data = data;
this.hashCode = Arrays.hashCode(data);
}
public boolean equals(Object other) {
if (!(other instanceof ByteArrayWrapper))
return false;
byte[] otherData = ((ByteArrayWrapper) other).getData();
return FastByteComparisons.compareTo(
data, 0, data.length,
otherData, 0, otherData.length) == 0;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public int compareTo(ByteArrayWrapper o) {
return FastByteComparisons.compareTo(
data, 0, data.length,
o.getData(), 0, o.getData().length);
}
public byte[] getData() {
return data;
}
@Override
public String toString() {
return toHexString(data);
}
}
| 2,148
| 28.438356
| 85
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/DbFlushManager.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.JdkFutureAdapters;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.ethereum.config.CommonConfig;
import org.ethereum.config.SystemProperties;
import org.ethereum.datasource.*;
import org.ethereum.listener.CompositeEthereumListener;
import org.ethereum.listener.EthereumListenerAdapter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.*;
/**
* Created by Anton Nashatyrev on 01.12.2016.
*/
public class DbFlushManager {
private static final Logger logger = LoggerFactory.getLogger("db");
List<AbstractCachedSource<byte[], ?>> writeCaches = new CopyOnWriteArrayList<>();
List<Source<byte[], ?>> sources = new CopyOnWriteArrayList<>();
Set<DbSource> dbSources = new HashSet<>();
AbstractCachedSource<byte[], byte[]> stateDbCache;
long sizeThreshold;
int commitsCountThreshold;
boolean syncDone = false;
boolean flushAfterSyncDone;
SystemProperties config;
int commitCount = 0;
private final BlockingQueue<Runnable> executorQueue = new ArrayBlockingQueue<>(1);
private final ExecutorService flushThread = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS,
executorQueue, new ThreadFactoryBuilder().setNameFormat("DbFlushManagerThread-%d").build());
Future<Boolean> lastFlush = Futures.immediateFuture(false);
public DbFlushManager(SystemProperties config, Set<DbSource> dbSources, AbstractCachedSource<byte[], byte[]> stateDbCache) {
this.config = config;
this.dbSources = dbSources;
sizeThreshold = config.getConfig().getInt("cache.flush.writeCacheSize") * 1024 * 1024;
commitsCountThreshold = config.getConfig().getInt("cache.flush.blocks");
flushAfterSyncDone = config.getConfig().getBoolean("cache.flush.shortSyncFlush");
this.stateDbCache = stateDbCache;
}
@Autowired
public void setEthereumListener(CompositeEthereumListener listener) {
if (!flushAfterSyncDone) return;
listener.addListener(new EthereumListenerAdapter() {
@Override
public void onSyncDone(SyncState state) {
if (state == SyncState.COMPLETE) {
logger.info("DbFlushManager: long sync done, flushing each block now");
syncDone = true;
}
}
});
}
public void setSizeThreshold(long sizeThreshold) {
this.sizeThreshold = sizeThreshold;
}
public void addCache(AbstractCachedSource<byte[], ?> cache) {
writeCaches.add(cache);
}
public void addSource(Source<byte[], ?> src) {
sources.add(src);
}
public long getCacheSize() {
long ret = 0;
for (AbstractCachedSource<byte[], ?> writeCache : writeCaches) {
ret += writeCache.estimateCacheSize();
}
return ret;
}
public synchronized void commit(Runnable atomicUpdate) {
atomicUpdate.run();
commit();
}
public synchronized void commit() {
long cacheSize = getCacheSize();
if (sizeThreshold >= 0 && cacheSize >= sizeThreshold) {
logger.info("DbFlushManager: flushing db due to write cache size (" + cacheSize + ") reached threshold (" + sizeThreshold + ")");
flush();
} else if (commitsCountThreshold > 0 && commitCount >= commitsCountThreshold) {
logger.info("DbFlushManager: flushing db due to commits (" + commitCount + ") reached threshold (" + commitsCountThreshold + ")");
flush();
commitCount = 0;
} else if (flushAfterSyncDone && syncDone) {
logger.debug("DbFlushManager: flushing db due to short sync");
flush();
}
commitCount++;
}
public synchronized void flushSync() {
try {
flush().get();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public synchronized Future<Boolean> flush() {
if (!lastFlush.isDone()) {
logger.info("Waiting for previous flush to complete...");
try {
lastFlush.get();
} catch (Exception e) {
logger.error("Error during last flush", e);
}
}
logger.debug("Flipping async storages");
for (AbstractCachedSource<byte[], ?> writeCache : writeCaches) {
try {
if (writeCache instanceof AsyncFlushable) {
((AsyncFlushable) writeCache).flipStorage();
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
logger.debug("Submitting flush task");
return lastFlush = flushThread.submit(() -> {
boolean ret = false;
long s = System.nanoTime();
logger.info("Flush started");
sources.forEach(Source::flush);
for (AbstractCachedSource<byte[], ?> writeCache : writeCaches) {
if (writeCache instanceof AsyncFlushable) {
try {
ret |= ((AsyncFlushable) writeCache).flushAsync().get();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} else {
ret |= writeCache.flush();
}
}
if (stateDbCache != null) {
logger.debug("Flushing to DB");
stateDbCache.flush();
}
logger.info("Flush completed in " + (System.nanoTime() - s) / 1000000 + " ms");
return ret;
});
}
/**
* Flushes all caches and closes all databases
*/
public synchronized void close() {
logger.info("Flushing DBs...");
flushSync();
logger.info("Flush done.");
for (DbSource dbSource : dbSources) {
logger.info("Closing DB: {}", dbSource.getName());
try {
dbSource.close();
} catch (Exception ex) {
logger.error(String.format("Caught error while closing DB: %s", dbSource.getName()), ex);
}
}
}
}
| 7,352
| 35.221675
| 142
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/IndexedBlockStore.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.core.Block;
import org.ethereum.core.BlockHeader;
import org.ethereum.datasource.DataSourceArray;
import org.ethereum.datasource.ObjectDataSource;
import org.ethereum.datasource.Serializer;
import org.ethereum.datasource.Source;
import org.ethereum.util.ByteUtil;
import org.ethereum.util.FastByteComparisons;
import org.ethereum.util.RLP;
import org.ethereum.util.RLPElement;
import org.ethereum.util.RLPList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.List;
import static java.math.BigInteger.ZERO;
import static org.ethereum.crypto.HashUtil.shortHash;
import static org.spongycastle.util.Arrays.areEqual;
public class IndexedBlockStore extends AbstractBlockstore{
private static final Logger logger = LoggerFactory.getLogger("general");
Source<byte[], byte[]> indexDS;
DataSourceArray<List<BlockInfo>> index;
Source<byte[], byte[]> blocksDS;
ObjectDataSource<Block> blocks;
public IndexedBlockStore(){
}
public void init(Source<byte[], byte[]> index, Source<byte[], byte[]> blocks) {
indexDS = index;
this.index = new DataSourceArray<>(
new ObjectDataSource<>(index, BLOCK_INFO_SERIALIZER, 512));
this.blocksDS = blocks;
this.blocks = new ObjectDataSource<>(blocks, new Serializer<Block, byte[]>() {
@Override
public byte[] serialize(Block block) {
return block.getEncoded();
}
@Override
public Block deserialize(byte[] bytes) {
return bytes == null ? null : new Block(bytes);
}
}, 256);
}
public synchronized Block getBestBlock(){
Long maxLevel = getMaxNumber();
if (maxLevel < 0) return null;
Block bestBlock = getChainBlockByNumber(maxLevel);
if (bestBlock != null) return bestBlock;
// That scenario can happen
// if there is a fork branch that is
// higher than main branch but has
// less TD than the main branch TD
while (bestBlock == null){
--maxLevel;
bestBlock = getChainBlockByNumber(maxLevel);
}
return bestBlock;
}
public synchronized byte[] getBlockHashByNumber(long blockNumber){
Block chainBlock = getChainBlockByNumber(blockNumber);
return chainBlock == null ? null : chainBlock.getHash(); // FIXME: can be improved by accessing the hash directly in the index
}
@Override
public synchronized void flush(){
blocks.flush();
index.flush();
blocksDS.flush();
indexDS.flush();
}
@Override
public synchronized void saveBlock(Block block, BigInteger totalDifficulty, boolean mainChain){
addInternalBlock(block, totalDifficulty, mainChain);
}
private void addInternalBlock(Block block, BigInteger totalDifficulty, boolean mainChain){
List<BlockInfo> blockInfos = block.getNumber() >= index.size() ? null : index.get((int) block.getNumber());
blockInfos = blockInfos == null ? new ArrayList<BlockInfo>() : blockInfos;
BlockInfo blockInfo = new BlockInfo();
blockInfo.setTotalDifficulty(totalDifficulty);
blockInfo.setHash(block.getHash());
blockInfo.setMainChain(mainChain); // FIXME:maybe here I should force reset main chain for all uncles on that level
putBlockInfo(blockInfos, blockInfo);
index.set((int) block.getNumber(), blockInfos);
blocks.put(block.getHash(), block);
}
private void putBlockInfo(List<BlockInfo> blockInfos, BlockInfo blockInfo) {
for (int i = 0; i < blockInfos.size(); i++) {
BlockInfo curBlockInfo = blockInfos.get(i);
if (FastByteComparisons.equal(curBlockInfo.getHash(), blockInfo.getHash())) {
blockInfos.set(i, blockInfo);
return;
}
}
blockInfos.add(blockInfo);
}
public synchronized List<Block> getBlocksByNumber(long number){
List<Block> result = new ArrayList<>();
if (number >= index.size()) {
return result;
}
List<BlockInfo> blockInfos = index.get((int) number);
if (blockInfos == null) {
return result;
}
for (BlockInfo blockInfo : blockInfos){
byte[] hash = blockInfo.getHash();
Block block = blocks.get(hash);
result.add(block);
}
return result;
}
@Override
public synchronized Block getChainBlockByNumber(long number){
if (number >= index.size()){
return null;
}
List<BlockInfo> blockInfos = index.get((int) number);
if (blockInfos == null) {
return null;
}
for (BlockInfo blockInfo : blockInfos){
if (blockInfo.isMainChain()){
byte[] hash = blockInfo.getHash();
return blocks.get(hash);
}
}
return null;
}
@Override
public synchronized Block getBlockByHash(byte[] hash) {
return blocks.get(hash);
}
@Override
public synchronized boolean isBlockExist(byte[] hash) {
return blocks.get(hash) != null;
}
@Override
public synchronized BigInteger getTotalDifficultyForHash(byte[] hash){
Block block = this.getBlockByHash(hash);
if (block == null) return ZERO;
Long level = block.getNumber();
List<BlockInfo> blockInfos = index.get(level.intValue());
for (BlockInfo blockInfo : blockInfos)
if (areEqual(blockInfo.getHash(), hash)) {
return blockInfo.totalDifficulty;
}
return ZERO;
}
@Override
public synchronized BigInteger getTotalDifficulty(){
long maxNumber = getMaxNumber();
List<BlockInfo> blockInfos = index.get((int) maxNumber);
for (BlockInfo blockInfo : blockInfos){
if (blockInfo.isMainChain()){
return blockInfo.getTotalDifficulty();
}
}
while (true){
--maxNumber;
List<BlockInfo> infos = getBlockInfoForLevel(maxNumber);
for (BlockInfo blockInfo : infos) {
if (blockInfo.isMainChain()) {
return blockInfo.getTotalDifficulty();
}
}
}
}
public synchronized void updateTotDifficulties(long index) {
List<BlockInfo> level = getBlockInfoForLevel(index);
for (BlockInfo blockInfo : level) {
Block block = getBlockByHash(blockInfo.getHash());
List<BlockInfo> parentInfos = getBlockInfoForLevel(index - 1);
BlockInfo parentInfo = getBlockInfoForHash(parentInfos, block.getParentHash());
blockInfo.setTotalDifficulty(parentInfo.getTotalDifficulty().add(block.getDifficultyBI()));
}
this.index.set((int) index, level);
}
@Override
public synchronized long getMaxNumber(){
Long bestIndex = 0L;
if (index.size() > 0){
bestIndex = (long) index.size();
}
return bestIndex - 1L;
}
@Override
public synchronized List<byte[]> getListHashesEndWith(byte[] hash, long number){
List<Block> blocks = getListBlocksEndWith(hash, number);
List<byte[]> hashes = new ArrayList<>(blocks.size());
for (Block b : blocks) {
hashes.add(b.getHash());
}
return hashes;
}
@Override
public synchronized List<BlockHeader> getListHeadersEndWith(byte[] hash, long qty) {
List<Block> blocks = getListBlocksEndWith(hash, qty);
List<BlockHeader> headers = new ArrayList<>(blocks.size());
for (Block b : blocks) {
headers.add(b.getHeader());
}
return headers;
}
@Override
public synchronized List<Block> getListBlocksEndWith(byte[] hash, long qty) {
return getListBlocksEndWithInner(hash, qty);
}
private List<Block> getListBlocksEndWithInner(byte[] hash, long qty) {
Block block = this.blocks.get(hash);
if (block == null) return new ArrayList<>();
List<Block> blocks = new ArrayList<>((int) qty);
for (int i = 0; i < qty; ++i) {
blocks.add(block);
block = this.blocks.get(block.getParentHash());
if (block == null) break;
}
return blocks;
}
@Override
public synchronized void reBranch(Block forkBlock){
Block bestBlock = getBestBlock();
long maxLevel = Math.max(bestBlock.getNumber(), forkBlock.getNumber());
// 1. First ensure that you are one the save level
long currentLevel = maxLevel;
Block forkLine = forkBlock;
if (forkBlock.getNumber() > bestBlock.getNumber()){
while(currentLevel > bestBlock.getNumber()){
List<BlockInfo> blocks = getBlockInfoForLevel(currentLevel);
BlockInfo blockInfo = getBlockInfoForHash(blocks, forkLine.getHash());
if (blockInfo != null) {
blockInfo.setMainChain(true);
setBlockInfoForLevel(currentLevel, blocks);
}
forkLine = getBlockByHash(forkLine.getParentHash());
--currentLevel;
}
}
Block bestLine = bestBlock;
if (bestBlock.getNumber() > forkBlock.getNumber()){
while(currentLevel > forkBlock.getNumber()){
List<BlockInfo> blocks = getBlockInfoForLevel(currentLevel);
BlockInfo blockInfo = getBlockInfoForHash(blocks, bestLine.getHash());
if (blockInfo != null) {
blockInfo.setMainChain(false);
setBlockInfoForLevel(currentLevel, blocks);
}
bestLine = getBlockByHash(bestLine.getParentHash());
--currentLevel;
}
}
// 2. Loop back on each level until common block
while( !bestLine.isEqual(forkLine) ) {
List<BlockInfo> levelBlocks = getBlockInfoForLevel(currentLevel);
BlockInfo bestInfo = getBlockInfoForHash(levelBlocks, bestLine.getHash());
if (bestInfo != null) {
bestInfo.setMainChain(false);
setBlockInfoForLevel(currentLevel, levelBlocks);
}
BlockInfo forkInfo = getBlockInfoForHash(levelBlocks, forkLine.getHash());
if (forkInfo != null) {
forkInfo.setMainChain(true);
setBlockInfoForLevel(currentLevel, levelBlocks);
}
bestLine = getBlockByHash(bestLine.getParentHash());
forkLine = getBlockByHash(forkLine.getParentHash());
--currentLevel;
}
}
public synchronized List<byte[]> getListHashesStartWith(long number, long maxBlocks){
List<byte[]> result = new ArrayList<>();
int i;
for ( i = 0; i < maxBlocks; ++i){
List<BlockInfo> blockInfos = index.get((int) number);
if (blockInfos == null) break;
for (BlockInfo blockInfo : blockInfos)
if (blockInfo.isMainChain()){
result.add(blockInfo.getHash());
break;
}
++number;
}
maxBlocks -= i;
return result;
}
public static class BlockInfo implements Serializable {
byte[] hash;
BigInteger totalDifficulty;
boolean mainChain;
public byte[] getHash() {
return hash;
}
public void setHash(byte[] hash) {
this.hash = hash;
}
public BigInteger getTotalDifficulty() {
return totalDifficulty;
}
public void setTotalDifficulty(BigInteger totalDifficulty) {
this.totalDifficulty = totalDifficulty;
}
public boolean isMainChain() {
return mainChain;
}
public void setMainChain(boolean mainChain) {
this.mainChain = mainChain;
}
}
public static final Serializer<List<BlockInfo>, byte[]> BLOCK_INFO_SERIALIZER = new Serializer<List<BlockInfo>, byte[]>(){
@Override
public byte[] serialize(List<BlockInfo> value) {
List<byte[]> rlpBlockInfoList = new ArrayList<>();
for (BlockInfo blockInfo : value) {
byte[] hash = RLP.encodeElement(blockInfo.getHash());
// Encoding works correctly only with positive BigIntegers
if (blockInfo.getTotalDifficulty() == null || blockInfo.getTotalDifficulty().compareTo(BigInteger.ZERO) < 0) {
throw new RuntimeException("BlockInfo totalDifficulty should be positive BigInteger");
}
byte[] totalDiff = RLP.encodeBigInteger(blockInfo.getTotalDifficulty());
byte[] isMainChain = RLP.encodeInt(blockInfo.isMainChain() ? 1 : 0);
rlpBlockInfoList.add(RLP.encodeList(hash, totalDiff, isMainChain));
}
byte[][] elements = rlpBlockInfoList.toArray(new byte[rlpBlockInfoList.size()][]);
return RLP.encodeList(elements);
}
@Override
public List<BlockInfo> deserialize(byte[] bytes) {
if (bytes == null) return null;
List<BlockInfo> blockInfoList = new ArrayList<>();
RLPList list = (RLPList) RLP.decode2(bytes).get(0);
for (RLPElement element : list) {
RLPList rlpBlock = (RLPList) element;
BlockInfo blockInfo = new BlockInfo();
byte[] rlpHash = rlpBlock.get(0).getRLPData();
blockInfo.setHash(rlpHash == null ? new byte[0] : rlpHash);
byte[] rlpTotalDiff = rlpBlock.get(1).getRLPData();
blockInfo.setTotalDifficulty(rlpTotalDiff == null ? BigInteger.ZERO : ByteUtil.bytesToBigInteger(rlpTotalDiff));
blockInfo.setMainChain(ByteUtil.byteArrayToInt(rlpBlock.get(2).getRLPData()) == 1);
blockInfoList.add(blockInfo);
}
return blockInfoList;
}
};
public synchronized void printChain(){
Long number = getMaxNumber();
for (int i = 0; i < number; ++i){
List<BlockInfo> levelInfos = index.get(i);
if (levelInfos != null) {
System.out.print(i);
for (BlockInfo blockInfo : levelInfos){
if (blockInfo.isMainChain())
System.out.print(" [" + shortHash(blockInfo.getHash()) + "] ");
else
System.out.print(" " + shortHash(blockInfo.getHash()) + " ");
}
System.out.println();
}
}
}
private synchronized List<BlockInfo> getBlockInfoForLevel(long level){
return index.get((int) level);
}
private synchronized void setBlockInfoForLevel(long level, List<BlockInfo> infos){
index.set((int) level, infos);
}
private static BlockInfo getBlockInfoForHash(List<BlockInfo> blocks, byte[] hash){
for (BlockInfo blockInfo : blocks)
if (areEqual(hash, blockInfo.getHash())) return blockInfo;
return null;
}
@Override
public synchronized void load() {
}
@Override
public synchronized void close() {
// logger.info("Closing IndexedBlockStore...");
// try {
// indexDS.close();
// } catch (Exception e) {
// logger.warn("Problems closing indexDS", e);
// }
// try {
// blocksDS.close();
// } catch (Exception e) {
// logger.warn("Problems closing blocksDS", e);
// }
}
}
| 16,851
| 30.796226
| 134
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/RepositoryRoot.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.core.AccountState;
import org.ethereum.core.Repository;
import org.ethereum.datasource.*;
import org.ethereum.trie.*;
import org.ethereum.vm.DataWord;
/**
* Created by Anton Nashatyrev on 07.10.2016.
*/
public class RepositoryRoot extends RepositoryImpl {
private static class StorageCache extends ReadWriteCache<DataWord, DataWord> {
Trie<byte[]> trie;
public StorageCache(Trie<byte[]> trie) {
super(new SourceCodec<>(trie, Serializers.StorageKeySerializer, Serializers.StorageValueSerializer), WriteCache.CacheType.SIMPLE);
this.trie = trie;
}
}
private class MultiStorageCache extends MultiCache<StorageCache> {
public MultiStorageCache() {
super(null);
}
@Override
protected synchronized StorageCache create(byte[] key, StorageCache srcCache) {
AccountState accountState = accountStateCache.get(key);
Serializer<byte[], byte[]> keyCompositor = new NodeKeyCompositor(key);
Source<byte[], byte[]> composingSrc = new SourceCodec.KeyOnly<>(trieCache, keyCompositor);
TrieImpl storageTrie = createTrie(composingSrc, accountState == null ? null : accountState.getStateRoot());
return new StorageCache(storageTrie);
}
@Override
protected synchronized boolean flushChild(byte[] key, StorageCache childCache) {
if (super.flushChild(key, childCache)) {
if (childCache != null) {
AccountState storageOwnerAcct = accountStateCache.get(key);
// need to update account storage root
childCache.trie.flush();
byte[] rootHash = childCache.trie.getRootHash();
accountStateCache.put(key, storageOwnerAcct.withStateRoot(rootHash));
return true;
} else {
// account was deleted
return true;
}
} else {
// no storage changes
return false;
}
}
}
private Source<byte[], byte[]> stateDS;
private CachedSource.BytesKey<byte[]> trieCache;
private Trie<byte[]> stateTrie;
public RepositoryRoot(Source<byte[], byte[]> stateDS) {
this(stateDS, null);
}
/**
* Building the following structure for snapshot Repository:
*
* stateDS --> trieCache --> stateTrie --> accountStateCodec --> accountStateCache
* \ \
* \ \-->>> storageKeyCompositor --> contractStorageTrie --> storageCodec --> storageCache
* \--> codeCache
*
*
* @param stateDS
* @param root
*/
public RepositoryRoot(final Source<byte[], byte[]> stateDS, byte[] root) {
this.stateDS = stateDS;
trieCache = new WriteCache.BytesKey<>(stateDS, WriteCache.CacheType.COUNTING);
stateTrie = new SecureTrie(trieCache, root);
SourceCodec.BytesKey<AccountState, byte[]> accountStateCodec = new SourceCodec.BytesKey<>(stateTrie, Serializers.AccountStateSerializer);
final ReadWriteCache.BytesKey<AccountState> accountStateCache = new ReadWriteCache.BytesKey<>(accountStateCodec, WriteCache.CacheType.SIMPLE);
final MultiCache<StorageCache> storageCache = new MultiStorageCache();
// counting as there can be 2 contracts with the same code, 1 can suicide
Source<byte[], byte[]> codeCache = new WriteCache.BytesKey<>(stateDS, WriteCache.CacheType.COUNTING);
init(accountStateCache, codeCache, storageCache);
}
@Override
public synchronized void commit() {
super.commit();
stateTrie.flush();
trieCache.flush();
}
@Override
public synchronized byte[] getRoot() {
storageCache.flush();
accountStateCache.flush();
return stateTrie.getRootHash();
}
@Override
public synchronized void flush() {
commit();
}
@Override
public Repository getSnapshotTo(byte[] root) {
return new RepositoryRoot(stateDS, root);
}
@Override
public Repository clone() {
return getSnapshotTo(getRoot());
}
@Override
public synchronized String dumpStateTrie() {
return ((TrieImpl) stateTrie).dumpTrie();
}
@Override
public synchronized void syncToRoot(byte[] root) {
stateTrie.setRoot(root);
}
protected TrieImpl createTrie(Source<byte[], byte[]> trieCache, byte[] root) {
return new SecureTrie(trieCache, root);
}
}
| 5,435
| 33.624204
| 150
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/RepositoryImpl.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.AccountState;
import org.ethereum.core.Block;
import org.ethereum.core.Repository;
import org.ethereum.crypto.HashUtil;
import org.ethereum.datasource.*;
import org.ethereum.util.ByteUtil;
import org.ethereum.util.FastByteComparisons;
import org.ethereum.vm.DataWord;
import org.springframework.beans.factory.annotation.Autowired;
import javax.annotation.Nullable;
import java.math.BigInteger;
import java.util.*;
/**
* Created by Anton Nashatyrev on 07.10.2016.
*/
public class RepositoryImpl implements Repository, org.ethereum.facade.Repository {
protected RepositoryImpl parent;
protected Source<byte[], AccountState> accountStateCache;
protected Source<byte[], byte[]> codeCache;
protected MultiCache<? extends CachedSource<DataWord, DataWord>> storageCache;
@Autowired
protected SystemProperties config = SystemProperties.getDefault();
protected RepositoryImpl() {
}
public RepositoryImpl(Source<byte[], AccountState> accountStateCache, Source<byte[], byte[]> codeCache,
MultiCache<? extends CachedSource<DataWord, DataWord>> storageCache) {
init(accountStateCache, codeCache, storageCache);
}
protected void init(Source<byte[], AccountState> accountStateCache, Source<byte[], byte[]> codeCache,
MultiCache<? extends CachedSource<DataWord, DataWord>> storageCache) {
this.accountStateCache = accountStateCache;
this.codeCache = codeCache;
this.storageCache = storageCache;
}
@Override
public synchronized AccountState createAccount(byte[] addr) {
AccountState state = new AccountState(config.getBlockchainConfig().getCommonConstants().getInitialNonce(),
BigInteger.ZERO);
accountStateCache.put(addr, state);
return state;
}
@Override
public synchronized boolean isExist(byte[] addr) {
return getAccountState(addr) != null;
}
@Override
public synchronized AccountState getAccountState(byte[] addr) {
return accountStateCache.get(addr);
}
synchronized AccountState getOrCreateAccountState(byte[] addr) {
AccountState ret = accountStateCache.get(addr);
if (ret == null) {
ret = createAccount(addr);
}
return ret;
}
@Override
public synchronized void delete(byte[] addr) {
accountStateCache.delete(addr);
storageCache.delete(addr);
}
@Override
public synchronized BigInteger increaseNonce(byte[] addr) {
AccountState accountState = getOrCreateAccountState(addr);
accountStateCache.put(addr, accountState.withIncrementedNonce());
return accountState.getNonce();
}
@Override
public synchronized BigInteger setNonce(byte[] addr, BigInteger nonce) {
AccountState accountState = getOrCreateAccountState(addr);
accountStateCache.put(addr, accountState.withNonce(nonce));
return accountState.getNonce();
}
@Override
public synchronized BigInteger getNonce(byte[] addr) {
AccountState accountState = getAccountState(addr);
return accountState == null ? config.getBlockchainConfig().getCommonConstants().getInitialNonce() :
accountState.getNonce();
}
@Override
public synchronized ContractDetails getContractDetails(byte[] addr) {
return new ContractDetailsImpl(addr);
}
@Override
public synchronized boolean hasContractDetails(byte[] addr) {
return getContractDetails(addr) != null;
}
@Override
public synchronized void saveCode(byte[] addr, byte[] code) {
byte[] codeHash = HashUtil.sha3(code);
codeCache.put(codeKey(codeHash, addr), code);
AccountState accountState = getOrCreateAccountState(addr);
accountStateCache.put(addr, accountState.withCodeHash(codeHash));
}
@Override
public synchronized byte[] getCode(byte[] addr) {
byte[] codeHash = getCodeHash(addr);
return codeHash == null || FastByteComparisons.equal(codeHash, HashUtil.EMPTY_DATA_HASH) ?
ByteUtil.EMPTY_BYTE_ARRAY : codeCache.get(codeKey(codeHash, addr));
}
// composing a key as there can be several contracts with the same code
private byte[] codeKey(byte[] codeHash, byte[] addr) {
return NodeKeyCompositor.compose(codeHash, addr);
}
@Override
public byte[] getCodeHash(byte[] addr) {
AccountState accountState = getAccountState(addr);
return accountState != null ? accountState.getCodeHash() : null;
}
@Override
public synchronized void addStorageRow(byte[] addr, DataWord key, DataWord value) {
getOrCreateAccountState(addr);
Source<DataWord, DataWord> contractStorage = storageCache.get(addr);
contractStorage.put(key, value.isZero() ? null : value);
}
@Override
public synchronized DataWord getStorageValue(byte[] addr, DataWord key) {
AccountState accountState = getAccountState(addr);
return accountState == null ? null : storageCache.get(addr).get(key);
}
@Override
public synchronized BigInteger getBalance(byte[] addr) {
AccountState accountState = getAccountState(addr);
return accountState == null ? BigInteger.ZERO : accountState.getBalance();
}
@Override
public synchronized BigInteger addBalance(byte[] addr, BigInteger value) {
AccountState accountState = getOrCreateAccountState(addr);
accountStateCache.put(addr, accountState.withBalanceIncrement(value));
return accountState.getBalance();
}
@Override
public synchronized RepositoryImpl startTracking() {
Source<byte[], AccountState> trackAccountStateCache = new WriteCache.BytesKey<>(accountStateCache,
WriteCache.CacheType.SIMPLE);
Source<byte[], byte[]> trackCodeCache = new WriteCache.BytesKey<>(codeCache, WriteCache.CacheType.SIMPLE);
MultiCache<CachedSource<DataWord, DataWord>> trackStorageCache = new MultiCache(storageCache) {
@Override
protected CachedSource create(byte[] key, CachedSource srcCache) {
return new WriteCache<>(srcCache, WriteCache.CacheType.SIMPLE);
}
};
RepositoryImpl ret = new RepositoryImpl(trackAccountStateCache, trackCodeCache, trackStorageCache);
ret.parent = this;
return ret;
}
@Override
public synchronized Repository getSnapshotTo(byte[] root) {
return parent.getSnapshotTo(root);
}
@Override
public synchronized void commit() {
Repository parentSync = parent == null ? this : parent;
// need to synchronize on parent since between different caches flush
// the parent repo would not be in consistent state
// when no parent just take this instance as a mock
synchronized (parentSync) {
storageCache.flush();
codeCache.flush();
accountStateCache.flush();
}
}
@Override
public synchronized void rollback() {
// nothing to do, will be GCed
}
@Override
public byte[] getRoot() {
throw new RuntimeException("Not supported");
}
public synchronized String getTrieDump() {
return dumpStateTrie();
}
public String dumpStateTrie() {
throw new RuntimeException("Not supported");
}
/**
* As tests only implementation this hack is pretty sufficient
*/
@Override
public Repository clone() {
return parent.startTracking();
}
class ContractDetailsImpl implements ContractDetails {
private byte[] address;
public ContractDetailsImpl(byte[] address) {
this.address = address;
}
@Override
public void put(DataWord key, DataWord value) {
RepositoryImpl.this.addStorageRow(address, key, value);
}
@Override
public DataWord get(DataWord key) {
return RepositoryImpl.this.getStorageValue(address, key);
}
@Override
public byte[] getCode() {
return RepositoryImpl.this.getCode(address);
}
@Override
public byte[] getCode(byte[] codeHash) {
throw new RuntimeException("Not supported");
}
@Override
public void setCode(byte[] code) {
RepositoryImpl.this.saveCode(address, code);
}
@Override
public byte[] getStorageHash() {
throw new RuntimeException("Not supported");
}
@Override
public void decode(byte[] rlpCode) {
throw new RuntimeException("Not supported");
}
@Override
public void setDirty(boolean dirty) {
throw new RuntimeException("Not supported");
}
@Override
public void setDeleted(boolean deleted) {
RepositoryImpl.this.delete(address);
}
@Override
public boolean isDirty() {
throw new RuntimeException("Not supported");
}
@Override
public boolean isDeleted() {
throw new RuntimeException("Not supported");
}
@Override
public byte[] getEncoded() {
throw new RuntimeException("Not supported");
}
@Override
public int getStorageSize() {
throw new RuntimeException("Not supported");
}
@Override
public Set<DataWord> getStorageKeys() {
throw new RuntimeException("Not supported");
}
@Override
public void deleteStorage() {
// do nothing as getStorageKeys() is not supported
}
@Override
public Map<DataWord, DataWord> getStorage(@Nullable Collection<DataWord> keys) {
throw new RuntimeException("Not supported");
}
@Override
public Map<DataWord, DataWord> getStorage() {
throw new RuntimeException("Not supported");
}
@Override
public void setStorage(List<DataWord> storageKeys, List<DataWord> storageValues) {
throw new RuntimeException("Not supported");
}
@Override
public void setStorage(Map<DataWord, DataWord> storage) {
throw new RuntimeException("Not supported");
}
@Override
public byte[] getAddress() {
return address;
}
@Override
public void setAddress(byte[] address) {
throw new RuntimeException("Not supported");
}
@Override
public ContractDetails clone() {
throw new RuntimeException("Not supported");
}
@Override
public void syncStorage() {
throw new RuntimeException("Not supported");
}
@Override
public ContractDetails getSnapshotTo(byte[] hash) {
throw new RuntimeException("Not supported");
}
}
@Override
public Set<byte[]> getAccountsKeys() {
throw new RuntimeException("Not supported");
}
@Override
public void dumpState(Block block, long gasUsed, int txNumber, byte[] txHash) {
throw new RuntimeException("Not supported");
}
@Override
public void flush() {
throw new RuntimeException("Not supported");
}
@Override
public void flushNoReconnect() {
throw new RuntimeException("Not supported");
}
@Override
public void syncToRoot(byte[] root) {
throw new RuntimeException("Not supported");
}
@Override
public boolean isClosed() {
throw new RuntimeException("Not supported");
}
@Override
public void close() {
}
@Override
public void reset() {
throw new RuntimeException("Not supported");
}
@Override
public int getStorageSize(byte[] addr) {
throw new RuntimeException("Not supported");
}
@Override
public Set<DataWord> getStorageKeys(byte[] addr) {
throw new RuntimeException("Not supported");
}
@Override
public Map<DataWord, DataWord> getStorage(byte[] addr, @Nullable Collection<DataWord> keys) {
throw new RuntimeException("Not supported");
}
@Override
public void updateBatch(HashMap<ByteArrayWrapper, AccountState> accountStates, HashMap<ByteArrayWrapper, ContractDetails> contractDetailes) {
for (Map.Entry<ByteArrayWrapper, AccountState> entry : accountStates.entrySet()) {
accountStateCache.put(entry.getKey().getData(), entry.getValue());
}
for (Map.Entry<ByteArrayWrapper, ContractDetails> entry : contractDetailes.entrySet()) {
ContractDetails details = getContractDetails(entry.getKey().getData());
for (DataWord key : entry.getValue().getStorageKeys()) {
details.put(key, entry.getValue().get(key));
}
byte[] code = entry.getValue().getCode();
if (code != null && code.length > 0) {
details.setCode(code);
}
}
}
@Override
public void loadAccount(byte[] addr, HashMap<ByteArrayWrapper, AccountState> cacheAccounts, HashMap<ByteArrayWrapper, ContractDetails> cacheDetails) {
throw new RuntimeException("Not supported");
}
}
| 14,252
| 30.814732
| 154
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/index/ArrayListIndex.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db.index;
import java.util.*;
/**
* @author Mikhail Kalinin
* @since 28.01.2016
*/
public class ArrayListIndex implements Index {
private List<Long> index;
public ArrayListIndex(Collection<Long> numbers) {
index = new ArrayList<>(numbers);
sort();
}
@Override
public synchronized void addAll(Collection<Long> nums) {
index.addAll(nums);
sort();
}
@Override
public synchronized void add(Long num) {
index.add(num);
sort();
}
@Override
public synchronized Long peek() {
return index.get(0);
}
@Override
public synchronized Long poll() {
Long num = index.get(0);
index.remove(0);
return num;
}
@Override
public synchronized boolean contains(Long num) {
return Collections.binarySearch(index, num) >= 0;
}
@Override
public synchronized boolean isEmpty() {
return index.isEmpty();
}
@Override
public synchronized int size() {
return index.size();
}
@Override
public synchronized void clear() {
index.clear();
}
private void sort() {
Collections.sort(index);
}
@Override
public synchronized Iterator<Long> iterator() {
return new ArrayList<>(index).iterator();
}
public synchronized void removeAll(Collection<Long> indexes) {
index.removeAll(indexes);
}
@Override
public synchronized Long peekLast() {
if (index.isEmpty()) return null;
return index.get(index.size() - 1);
}
@Override
public synchronized void remove(Long num) {
index.remove(num);
}
}
| 2,496
| 23.242718
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/index/Index.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db.index;
import java.util.Collection;
/**
* @author Mikhail Kalinin
* @since 28.01.2016
*/
public interface Index extends Iterable<Long> {
void addAll(Collection<Long> nums);
void add(Long num);
Long peek();
Long poll();
boolean contains(Long num);
boolean isEmpty();
int size();
void clear();
void removeAll(Collection<Long> indexes);
Long peekLast();
void remove(Long num);
}
| 1,250
| 24.02
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/prune/Chain.java
|
package org.ethereum.db.prune;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* A single chain in a blockchain {@link Segment}.
* It could represent either fork or the main chain.
*
* <p>
* Chain consists of certain number of {@link ChainItem}
* connected to each other with inheritance
*
* @author Mikhail Kalinin
* @since 24.01.2018
*/
public class Chain {
static final Chain NULL = new Chain() {
@Override
boolean connect(ChainItem item) {
throw new RuntimeException("Not supported for null chain");
}
};
List<ChainItem> items = new ArrayList<>();
public List<byte[]> getHashes() {
return items.stream().map(item -> item.hash).collect(Collectors.toList());
}
private Chain() {
}
Chain(ChainItem item) {
this.items.add(item);
}
ChainItem top() {
return items.size() > 0 ? items.get(items.size() - 1) : null;
}
long topNumber() {
return top() != null ? top().number : 0;
}
long startNumber() {
return items.isEmpty() ? 0 : items.get(0).number;
}
boolean isHigher(Chain other) {
return other.topNumber() < this.topNumber();
}
boolean contains(ChainItem other) {
for (ChainItem item : items) {
if (item.equals(other))
return true;
}
return false;
}
boolean connect(ChainItem item) {
if (top().isParentOf(item)) {
items.add(item);
return true;
}
return false;
}
static Chain fromItems(ChainItem ... items) {
if (items.length == 0) {
return NULL;
}
Chain chain = null;
for (ChainItem item : items) {
if (chain == null) {
chain = new Chain(item);
} else {
chain.connect(item);
}
}
return chain;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Chain chain = (Chain) o;
return !(items != null ? !items.equals(chain.items) : chain.items != null);
}
@Override
public String toString() {
if (items.isEmpty()) {
return "(empty)";
}
return "[" + items.get(0) +
" ~> " + items.get(items.size() - 1) +
']';
}
}
| 2,495
| 21.486486
| 83
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/prune/Pruner.java
|
package org.ethereum.db.prune;
import org.ethereum.crypto.HashUtil;
import org.ethereum.datasource.CountingQuotientFilter;
import org.ethereum.datasource.JournalSource;
import org.ethereum.datasource.QuotientFilter;
import org.ethereum.datasource.Source;
import org.ethereum.util.ByteArraySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* This class is responsible for state pruning.
*
* <p>
* Taking the information supplied by {@link #journal} (check {@link JournalSource} for details)
* removes unused nodes from the {@link #storage}.
* There are two types of unused nodes:
* nodes not references in the trie after N blocks from the current one and
* nodes which were inserted in the forks that finally were not accepted
*
* <p>
* Each prune session uses a certain chain {@link Segment}
* which is going to be 'pruned'. To be confident that live nodes won't be removed,
* pruner must be initialized with the top of the chain, see {@link #init(List, int)}}.
* And after that it must be fed with each newly processed block, see {@link #feed(JournalSource.Update)}.
* {@link QuotientFilter} ({@link CountingQuotientFilter} implementation in particular) instance is used to
* efficiently keep upcoming inserts in memory and protect newly inserted nodes from being deleted during
* prune session. The filter is constantly recycled in {@link #prune(Segment)} method.
*
* <p>
* When 'prune.maxDepth' param is quite big, it becomes not efficient to keep reverted nodes until prune block number has come.
* Hence Pruner has two step mode to mitigate memory consumption, second step is initiated by {@link #withSecondStep(List, int)}.
* In that mode nodes from not accepted forks are deleted from storage immediately but main chain deletions are
* postponed for the second step.
* Second step uses another one instance of QuotientFilter with less memory impact, check {@link #instantiateFilter(int, int)}.
*
* <p>
* Basically, prune session initiated by {@link #prune(Segment)} method
* consists of 3 steps: first, it reverts forks, then it persists main chain,
* after that it recycles {@link #journal} by removing processed updates from it.
* During the session reverted and deleted nodes are propagated to the {@link #storage} immediately.
*
* @author Mikhail Kalinin
* @since 25.01.2018
*/
public class Pruner {
private static final Logger logger = LoggerFactory.getLogger("prune");
Source<byte[], JournalSource.Update> journal;
Source<byte[], ?> storage;
QuotientFilter filter;
QuotientFilter distantFilter;
boolean ready = false;
private static class Stats {
int collisions = 0;
int deleted = 0;
double load = 0;
@Override
public String toString() {
return String.format("load %.4f, collisions %d, deleted %d", load, collisions, deleted);
}
}
Stats maxLoad = new Stats();
Stats maxCollisions = new Stats();
int maxKeysInMemory = 0;
int statsTracker = 0;
Stats distantMaxLoad = new Stats();
Stats distantMaxCollisions = new Stats();
public Pruner(Source<byte[], JournalSource.Update> journal, Source<byte[], ?> storage) {
this.storage = storage;
this.journal = journal;
}
public boolean isReady() {
return ready;
}
public boolean init(List<byte[]> forkWindow, int sizeInBlocks) {
if (ready) return true;
if (!forkWindow.isEmpty() && journal.get(forkWindow.get(0)) == null) {
logger.debug("pruner init aborted: can't fetch update " + toHexString(forkWindow.get(0)));
return false;
}
QuotientFilter filter = instantiateFilter(sizeInBlocks, FILTER_ENTRIES_FORK);
for (byte[] hash : forkWindow) {
JournalSource.Update update = journal.get(hash);
if (update == null) {
logger.debug("pruner init aborted: can't fetch update " + toHexString(hash));
return false;
}
update.getInsertedKeys().forEach(filter::insert);
}
this.filter = filter;
return ready = true;
}
public boolean withSecondStep() {
return distantFilter != null;
}
public void withSecondStep(List<byte[]> mainChainWindow, int sizeInBlocks) {
if (!ready) return;
QuotientFilter filter = instantiateFilter(sizeInBlocks, FILTER_ENTRIES_DISTANT);
if (!mainChainWindow.isEmpty()) {
int i = mainChainWindow.size() - 1;
for (; i >= 0; i--) {
byte[] hash = mainChainWindow.get(i);
JournalSource.Update update = journal.get(hash);
if (update == null) {
break;
}
update.getInsertedKeys().forEach(filter::insert);
}
logger.debug("distant filter initialized with set of " + (i < 0 ? mainChainWindow.size() : mainChainWindow.size() - i) +
" hashes, last hash " + toHexString(mainChainWindow.get(i < 0 ? 0 : i)));
} else {
logger.debug("distant filter initialized with empty set");
}
this.distantFilter = filter;
}
private static final int FILTER_ENTRIES_FORK = 1 << 13; // approximate number of nodes per block
private static final int FILTER_ENTRIES_DISTANT = 1 << 11;
private static final int FILTER_MAX_SIZE = Integer.MAX_VALUE >> 1; // that filter will consume ~3g of mem
private QuotientFilter instantiateFilter(int blocksCnt, int entries) {
int size = Math.min(entries * blocksCnt, FILTER_MAX_SIZE);
return CountingQuotientFilter.create(size, size);
}
public boolean init(byte[] ... upcoming) {
return init(Arrays.asList(upcoming), 192);
}
public void feed(JournalSource.Update update) {
if (ready)
update.getInsertedKeys().forEach(filter::insert);
}
public void prune(Segment segment) {
if (!ready) return;
assert segment.isComplete();
logger.trace("prune " + segment);
long t = System.currentTimeMillis();
Pruning pruning = new Pruning();
// important for fork management, check Pruning#insertedInMainChain and Pruning#insertedInForks for details
segment.forks.sort((f1, f2) -> Long.compare(f1.startNumber(), f2.startNumber()));
segment.forks.forEach(pruning::revert);
// delete updates
for (Chain chain : segment.forks) {
chain.getHashes().forEach(journal::delete);
}
int nodesPostponed = 0;
if (withSecondStep()) {
nodesPostponed = postpone(segment.main);
} else {
pruning.nodesDeleted += persist(segment.main);
segment.main.getHashes().forEach(journal::delete);
}
if (logger.isTraceEnabled()) logger.trace("nodes {}, keys in mem: {}, filter load: {}/{}: {}, distinct collisions: {}",
(withSecondStep() ? "postponed: " + nodesPostponed : "deleted: " + pruning.nodesDeleted),
pruning.insertedInForks.size() + pruning.insertedInMainChain.size(),
((CountingQuotientFilter) filter).getEntryNumber(), ((CountingQuotientFilter) filter).getMaxInsertions(),
String.format("%.4f", (double) ((CountingQuotientFilter) filter).getEntryNumber() /
((CountingQuotientFilter) filter).getMaxInsertions()),
((CountingQuotientFilter) filter).getCollisionNumber());
if (logger.isDebugEnabled()) {
int collisions = ((CountingQuotientFilter) filter).getCollisionNumber();
double load = (double) ((CountingQuotientFilter) filter).getEntryNumber() /
((CountingQuotientFilter) filter).getMaxInsertions();
if (collisions > maxCollisions.collisions) {
maxCollisions.collisions = collisions;
maxCollisions.load = load;
maxCollisions.deleted = pruning.nodesDeleted;
}
if (load > maxLoad.load) {
maxLoad.load = load;
maxLoad.collisions = collisions;
maxLoad.deleted = pruning.nodesDeleted;
}
maxKeysInMemory = Math.max(maxKeysInMemory, pruning.insertedInForks.size() + pruning.insertedInMainChain.size());
if (++statsTracker % 100 == 0) {
logger.debug("fork filter: max load: " + maxLoad);
logger.debug("fork filter: max collisions: " + maxCollisions);
logger.debug("fork filter: max keys in mem: " + maxKeysInMemory);
}
}
logger.trace(segment + " pruned in {}ms", System.currentTimeMillis() - t);
}
public void persist(byte[] hash) {
if (!ready || !withSecondStep()) return;
logger.trace("persist [{}]", toHexString(hash));
long t = System.currentTimeMillis();
JournalSource.Update update = journal.get(hash);
if (update == null) {
logger.debug("skip [{}]: can't fetch update", HashUtil.shortHash(hash));
return;
}
// persist deleted keys
int nodesDeleted = 0;
for (byte[] key : update.getDeletedKeys()) {
if (!filter.maybeContains(key) && !distantFilter.maybeContains(key)) {
++nodesDeleted;
storage.delete(key);
}
}
// clean up filter
update.getInsertedKeys().forEach(distantFilter::remove);
// delete update
journal.delete(hash);
if (logger.isDebugEnabled()) {
int collisions = ((CountingQuotientFilter) distantFilter).getCollisionNumber();
double load = (double) ((CountingQuotientFilter) distantFilter).getEntryNumber() /
((CountingQuotientFilter) distantFilter).getMaxInsertions();
if (collisions > distantMaxCollisions.collisions) {
distantMaxCollisions.collisions = collisions;
distantMaxCollisions.load = load;
distantMaxCollisions.deleted = nodesDeleted;
}
if (load > distantMaxLoad.load) {
distantMaxLoad.load = load;
distantMaxLoad.collisions = collisions;
distantMaxLoad.deleted = nodesDeleted;
}
if (statsTracker % 100 == 0) {
logger.debug("distant filter: max load: " + distantMaxLoad);
logger.debug("distant filter: max collisions: " + distantMaxCollisions);
}
}
if (logger.isTraceEnabled()) logger.trace("[{}] persisted in {}ms: {}/{} ({}%) nodes deleted, filter load: {}/{}: {}, distinct collisions: {}",
HashUtil.shortHash(hash), System.currentTimeMillis() - t, nodesDeleted, update.getDeletedKeys().size(),
nodesDeleted * 100 / update.getDeletedKeys().size(),
((CountingQuotientFilter) distantFilter).getEntryNumber(),
((CountingQuotientFilter) distantFilter).getMaxInsertions(),
String.format("%.4f", (double) ((CountingQuotientFilter) distantFilter).getEntryNumber() /
((CountingQuotientFilter) distantFilter).getMaxInsertions()),
((CountingQuotientFilter) distantFilter).getCollisionNumber());
}
private int postpone(Chain chain) {
if (logger.isTraceEnabled())
logger.trace("<~ postponing " + chain + ": " + strSample(chain.getHashes()));
int nodesPostponed = 0;
for (byte[] hash : chain.getHashes()) {
JournalSource.Update update = journal.get(hash);
if (update == null) {
logger.debug("postponing: can't fetch update " + toHexString(hash));
continue;
}
// feed distant filter
update.getInsertedKeys().forEach(distantFilter::insert);
// clean up fork filter
update.getInsertedKeys().forEach(filter::remove);
nodesPostponed += update.getDeletedKeys().size();
}
return nodesPostponed;
}
private int persist(Chain chain) {
if (logger.isTraceEnabled())
logger.trace("<~ persisting " + chain + ": " + strSample(chain.getHashes()));
int nodesDeleted = 0;
for (byte[] hash : chain.getHashes()) {
JournalSource.Update update = journal.get(hash);
if (update == null) {
logger.debug("pruning aborted: can't fetch update of main chain " + toHexString(hash));
return 0;
}
// persist deleted keys
for (byte[] key : update.getDeletedKeys()) {
if (!filter.maybeContains(key)) {
++nodesDeleted;
storage.delete(key);
}
}
// clean up filter
update.getInsertedKeys().forEach(filter::remove);
}
return nodesDeleted;
}
private String strSample(Collection<byte[]> hashes) {
String sample = hashes.stream().limit(3)
.map(HashUtil::shortHash).collect(Collectors.joining(", "));
if (hashes.size() > 3) {
sample += ", ... (" + hashes.size() + " total)";
}
return sample;
}
private class Pruning {
// track nodes inserted and deleted in forks
// to avoid deletion of those nodes which were originally inserted in the main chain
Set<byte[]> insertedInMainChain = new ByteArraySet();
Set<byte[]> insertedInForks = new ByteArraySet();
int nodesDeleted = 0;
private void revert(Chain chain) {
if (logger.isTraceEnabled())
logger.trace("<~ reverting " + chain + ": " + strSample(chain.getHashes()));
for (byte[] hash : chain.getHashes()) {
JournalSource.Update update = journal.get(hash);
if (update == null) {
logger.debug("reverting chain " + chain + " aborted: can't fetch update " + toHexString(hash));
return;
}
// clean up filter
update.getInsertedKeys().forEach(filter::remove);
// node that was deleted in fork considered as a node that had earlier been inserted in main chain
update.getDeletedKeys().forEach(key -> {
if (!insertedInForks.contains(key)) {
insertedInMainChain.add(key);
}
});
update.getInsertedKeys().forEach(key -> {
if (!insertedInMainChain.contains(key)) {
insertedInForks.add(key);
}
});
// revert inserted keys
for (byte[] key : update.getInsertedKeys()) {
if (!filter.maybeContains(key) && !insertedInMainChain.contains(key)) {
++nodesDeleted;
storage.delete(key);
}
}
}
}
}
}
| 15,429
| 40.478495
| 151
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/prune/Segment.java
|
package org.ethereum.db.prune;
import org.ethereum.core.Block;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* Provides an interface for building and tracking chain segment.
*
* <p>
* Chain segment is a fragment of the blockchain, it includes both forks and main chain.
* Segment always has a 'root' item which must belong to the main chain,
* anyway 'root' item itself is not treated as a part of the segment.
*
* <p>
* Segment is complete when its main chain top item is the highest (fork tops have lower numbers).
* Whether segment is complete or not can be checked by call to {@link #isComplete()}
*
* <p>
* Segment has a {@link Tracker} class which helps to update segment with new blocks.
* Its Usage is simple: add all blocks with {@link Tracker#addAll(List)},
* add main chain blocks with {@link Tracker#addMain(Block)},
* then when all blocks are added {@link Tracker#commit()} should be fired
* to connect added blocks to the segment
*
* @author Mikhail Kalinin
* @since 24.01.2018
*
* @see Chain
* @see ChainItem
*/
public class Segment {
List<Chain> forks = new ArrayList<>();
Chain main = Chain.NULL;
ChainItem root;
public Segment(Block root) {
this.root = new ChainItem(root);
}
public Segment(long number, byte[] hash, byte[] parentHash) {
this.root = new ChainItem(number, hash, parentHash);
}
public boolean isComplete() {
if (main == Chain.NULL)
return false;
for (Chain fork : forks) {
if (!main.isHigher(fork))
return false;
}
return true;
}
public long getRootNumber() {
return root.number;
}
public long getMaxNumber() {
return main.topNumber();
}
public Tracker startTracking() {
return new Tracker(this);
}
public int size() {
return main.items.size();
}
private void branch(ChainItem item) {
forks.add(new Chain(item));
}
private void connectMain(ChainItem item) {
if (main == Chain.NULL) {
if (root.isParentOf(item))
main = new Chain(item); // start new
} else {
main.connect(item);
}
}
private void connectFork(ChainItem item) {
for (Chain fork : forks) {
if (fork.contains(item))
return;
}
if (root.isParentOf(item)) {
branch(item);
} else {
for (ChainItem mainItem : main.items) {
if (mainItem.isParentOf(item)) {
branch(item);
}
}
for (Chain fork : forks) {
if (fork.connect(item)) {
return;
}
}
List<Chain> branchedForks = new ArrayList<>();
for (Chain fork : forks) {
for (ChainItem forkItem : fork.items) {
if (forkItem.isParentOf(item)) {
branchedForks.add(new Chain(item));
}
}
}
forks.addAll(branchedForks);
}
}
@Override
public String toString() {
return "" + main;
}
public static final class Tracker {
Segment segment;
List<ChainItem> main = new ArrayList<>();
List<ChainItem> items = new ArrayList<>();
Tracker(Segment segment) {
this.segment = segment;
}
public void addMain(Block block) {
main.add(new ChainItem(block));
}
public void addAll(List<Block> blocks) {
items.addAll(blocks.stream()
.map(ChainItem::new)
.collect(Collectors.toList()));
}
public Tracker addMain(long number, byte[] hash, byte[] parentHash) {
main.add(new ChainItem(number, hash, parentHash));
return this;
}
public Tracker addItem(long number, byte[] hash, byte[] parentHash) {
items.add(new ChainItem(number, hash, parentHash));
return this;
}
public void commit() {
items.removeAll(main);
main.sort((i1, i2) -> Long.compare(i1.number, i2.number));
items.sort((i1, i2) -> Long.compare(i1.number, i2.number));
main.forEach(segment::connectMain);
items.forEach(segment::connectFork);
}
}
}
| 4,540
| 25.869822
| 102
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/prune/ChainItem.java
|
package org.ethereum.db.prune;
import org.ethereum.core.Block;
import org.ethereum.util.FastByteComparisons;
import java.util.Arrays;
/**
* Represents a block in the {@link Chain}
*
* @author Mikhail Kalinin
* @since 26.01.2018
*/
class ChainItem {
long number;
byte[] hash;
byte[] parentHash;
ChainItem(Block block) {
this.number = block.getNumber();
this.hash = block.getHash();
this.parentHash = block.getParentHash();
}
ChainItem(long number, byte[] hash, byte[] parentHash) {
this.number = number;
this.hash = hash;
this.parentHash = parentHash;
}
boolean isParentOf(ChainItem that) {
return FastByteComparisons.equal(hash, that.parentHash);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ChainItem that = (ChainItem) o;
return FastByteComparisons.equal(hash, that.hash);
}
@Override
public int hashCode() {
return hash != null ? Arrays.hashCode(hash) : 0;
}
@Override
public String toString() {
return String.valueOf(number);
}
}
| 1,212
| 21.886792
| 66
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/db/migrate/MigrateHeaderSourceTotalDiff.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.db.migrate;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.Block;
import org.ethereum.core.BlockHeader;
import org.ethereum.core.Blockchain;
import org.ethereum.core.BlockchainImpl;
import org.ethereum.datasource.DataSourceArray;
import org.ethereum.datasource.DbSource;
import org.ethereum.datasource.ObjectDataSource;
import org.ethereum.datasource.Serializers;
import org.ethereum.db.BlockStore;
import org.ethereum.db.DbFlushManager;
import org.ethereum.db.HeaderStore;
import org.ethereum.sync.FastSyncManager;
import org.ethereum.util.FileUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
import java.math.BigInteger;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
/**
* @deprecated
* TODO: Remove after a few versions (current: 1.7.3) or with DB version update
* TODO: Make {@link FastSyncManager#removeHeadersDb(Logger)} private after removing
* Also remove CommonConfig.headerSource with it as no more used
*
* - Repairs Headers DB after FastSync with skipHistory to be usable
* a) Updates incorrect total difficulty
* b) Migrates headers without index to usable scheme with index
* - Removes headers DB otherwise as it's not needed
*/
@Deprecated
public class MigrateHeaderSourceTotalDiff implements Runnable {
private static final Logger logger = LoggerFactory.getLogger("general");
private ApplicationContext ctx;
private BlockStore blockStore;
private Blockchain blockchain;
private SystemProperties systemProperties;
public MigrateHeaderSourceTotalDiff(ApplicationContext ctx, BlockStore blockStore,
Blockchain blockchain, SystemProperties systemProperties) {
this.ctx = ctx;
this.blockStore = blockStore;
this.blockchain = blockchain;
this.systemProperties = systemProperties;
}
@Override
public void run() {
// checking whether we should do any kind of migration:
if (!systemProperties.isFastSyncEnabled()) {
return;
}
FastSyncManager fastSyncManager = ctx.getBean(FastSyncManager.class);
if (fastSyncManager.isInProgress()|| blockStore.getBestBlock().getNumber() == 0) { // Fast sync is not over
return;
}
logger.info("Fast Sync was used. Checking if migration required.");
boolean dbRemoved = fastSyncManager.removeHeadersDb(logger);
if (dbRemoved) {
logger.info("Migration finished.");
return;
}
if (blockStore.getBestBlock().getNumber() > 0 && blockStore.getChainBlockByNumber(1) == null) {
// Maybe migration of headerStore and totalDifficulty is required?
HeaderStore headerStore = ctx.getBean(HeaderStore.class);
if (headerStore.getHeaderByNumber(1) != null) {
logger.info("No migration required.");
return;
}
logger.info("Migration required. Updating total difficulty.");
logger.info("=== Don't stop or exit from application, migration could not be resumed ===");
long firstFullBlockNum = blockStore.getMaxNumber();
while (blockStore.getChainBlockByNumber(firstFullBlockNum - 1) != null) {
--firstFullBlockNum;
}
Block firstFullBlock = blockStore.getChainBlockByNumber(firstFullBlockNum);
DbSource<byte[]> headerDbSource = (DbSource<byte[]>) ctx.getBean("headerSource");
ObjectDataSource<BlockHeader> objectDataSource = new ObjectDataSource<>(headerDbSource, Serializers.BlockHeaderSerializer, 0);
DataSourceArray<BlockHeader> headerSource = new DataSourceArray<>(objectDataSource);
BigInteger totalDifficulty = blockStore.getChainBlockByNumber(0).getDifficultyBI();
for (int i = 1; i < firstFullBlockNum; ++i) {
totalDifficulty = totalDifficulty.add(headerSource.get(i).getDifficultyBI());
}
blockStore.saveBlock(firstFullBlock, totalDifficulty.add(firstFullBlock.getDifficultyBI()), true);
((BlockchainImpl) blockchain).updateBlockTotDifficulties(firstFullBlockNum + 1);
logger.info("Total difficulty updated");
logger.info("Migrating headerStore");
int maxHeaderNumber = headerSource.size() - 1;
DbFlushManager flushManager = ctx.getBean(DbFlushManager.class);
for (int i = 1; i < headerSource.size(); ++i) {
BlockHeader curHeader = headerSource.get(i);
headerStore.saveHeader(curHeader);
headerSource.set(i, null);
if (i % 10000 == 0) {
logger.info("#{} of {} headers moved. Flushing...", i, maxHeaderNumber);
flushManager.commit();
flushManager.flush();
}
}
flushManager.commit();
flushManager.flush();
logger.info("headerStore migration finished. No more migrations required");
} else {
logger.info("No migration required.");
}
}
}
| 6,033
| 42.410072
| 138
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/MinMaxMap.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;
/**
* Created by Anton Nashatyrev on 08.12.2016.
*/
public class MinMaxMap<V> extends TreeMap<Long, V> {
public void clearAllAfter(long key) {
if (isEmpty()) return;
navigableKeySet().subSet(key, false, getMax(), true).clear();
}
public void clearAllBefore(long key) {
if (isEmpty()) return;
descendingKeySet().subSet(key, false, getMin(), true).clear();
}
public Long getMin() {
return isEmpty() ? null : firstKey();
}
public Long getMax() {
return isEmpty() ? null : lastKey();
}
}
| 1,463
| 30.148936
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/CopyOnWriteMap.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.ethereum.util;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
* A thread-safe version of {@link Map} in which all operations that change the
* Map are implemented by making a new copy of the underlying Map.
*
* While the creation of a new Map can be expensive, this class is designed for
* cases in which the primary function is to read data from the Map, not to
* modify the Map. Therefore the operations that do not cause a change to this
* class happen quickly and concurrently.
*
* @author The Apache MINA Project (dev@mina.apache.org)
* @version $Rev$, $Date$
*/
public class CopyOnWriteMap<K, V> implements Map<K, V>, Cloneable {
private volatile Map<K, V> internalMap;
/**
* Creates a new instance of CopyOnWriteMap.
*
*/
public CopyOnWriteMap() {
internalMap = new HashMap<K, V>();
}
/**
* Creates a new instance of CopyOnWriteMap with the specified initial size
*
* @param initialCapacity
* The initial size of the Map.
*/
public CopyOnWriteMap(int initialCapacity) {
internalMap = new HashMap<K, V>(initialCapacity);
}
/**
* Creates a new instance of CopyOnWriteMap in which the
* initial data being held by this map is contained in
* the supplied map.
*
* @param data
* A Map containing the initial contents to be placed into
* this class.
*/
public CopyOnWriteMap(Map<K, V> data) {
internalMap = new HashMap<K, V>(data);
}
/**
* Adds the provided key and value to this map.
*
* @see java.util.Map#put(java.lang.Object, java.lang.Object)
*/
public V put(K key, V value) {
synchronized (this) {
Map<K, V> newMap = new HashMap<K, V>(internalMap);
V val = newMap.put(key, value);
internalMap = newMap;
return val;
}
}
/**
* Removed the value and key from this map based on the
* provided key.
*
* @see java.util.Map#remove(java.lang.Object)
*/
public V remove(Object key) {
synchronized (this) {
Map<K, V> newMap = new HashMap<K, V>(internalMap);
V val = newMap.remove(key);
internalMap = newMap;
return val;
}
}
/**
* Inserts all the keys and values contained in the
* provided map to this map.
*
* @see java.util.Map#putAll(java.util.Map)
*/
public void putAll(Map<? extends K, ? extends V> newData) {
synchronized (this) {
Map<K, V> newMap = new HashMap<K, V>(internalMap);
newMap.putAll(newData);
internalMap = newMap;
}
}
/**
* Removes all entries in this map.
*
* @see java.util.Map#clear()
*/
public void clear() {
synchronized (this) {
internalMap = new HashMap<K, V>();
}
}
//
// Below are methods that do not modify
// the internal Maps
/**
* Returns the number of key/value pairs in this map.
*
* @see java.util.Map#size()
*/
public int size() {
return internalMap.size();
}
/**
* Returns true if this map is empty, otherwise false.
*
* @see java.util.Map#isEmpty()
*/
public boolean isEmpty() {
return internalMap.isEmpty();
}
/**
* Returns true if this map contains the provided key, otherwise
* this method return false.
*
* @see java.util.Map#containsKey(java.lang.Object)
*/
public boolean containsKey(Object key) {
return internalMap.containsKey(key);
}
/**
* Returns true if this map contains the provided value, otherwise
* this method returns false.
*
* @see java.util.Map#containsValue(java.lang.Object)
*/
public boolean containsValue(Object value) {
return internalMap.containsValue(value);
}
/**
* Returns the value associated with the provided key from this
* map.
*
* @see java.util.Map#get(java.lang.Object)
*/
public V get(Object key) {
return internalMap.get(key);
}
/**
* This method will return a read-only {@link Set}.
*/
public Set<K> keySet() {
return internalMap.keySet();
}
/**
* This method will return a read-only {@link Collection}.
*/
public Collection<V> values() {
return internalMap.values();
}
/**
* This method will return a read-only {@link Set}.
*/
public Set<Entry<K, V>> entrySet() {
return internalMap.entrySet();
}
@Override
public Object clone() {
try {
return super.clone();
} catch (CloneNotSupportedException e) {
throw new InternalError();
}
}
}
| 6,516
| 28.09375
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/RLPList.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import java.util.ArrayList;
/**
* @author Roman Mandeleil
* @since 21.04.14
*/
public class RLPList extends ArrayList<RLPElement> implements RLPElement {
byte[] rlpData;
public void setRLPData(byte[] rlpData) {
this.rlpData = rlpData;
}
public byte[] getRLPData() {
return rlpData;
}
public static void recursivePrint(RLPElement element) {
if (element == null)
throw new RuntimeException("RLPElement object can't be null");
if (element instanceof RLPList) {
RLPList rlpList = (RLPList) element;
System.out.print("[");
for (RLPElement singleElement : rlpList)
recursivePrint(singleElement);
System.out.print("]");
} else {
String hex = ByteUtil.toHexString(element.getRLPData());
System.out.print(hex + ", ");
}
}
}
| 1,719
| 30.272727
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/ByteArraySet.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import org.ethereum.db.ByteArrayWrapper;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Created by Anton Nashatyrev on 06.10.2016.
*/
public class ByteArraySet implements Set<byte[]> {
Set<ByteArrayWrapper> delegate;
public ByteArraySet() {
this(new HashSet<ByteArrayWrapper>());
}
ByteArraySet(Set<ByteArrayWrapper> delegate) {
this.delegate = delegate;
}
@Override
public int size() {
return delegate.size();
}
@Override
public boolean isEmpty() {
return delegate.isEmpty();
}
@Override
public boolean contains(Object o) {
return delegate.contains(new ByteArrayWrapper((byte[]) o));
}
@Override
public Iterator<byte[]> iterator() {
return new Iterator<byte[]>() {
Iterator<ByteArrayWrapper> it = delegate.iterator();
@Override
public boolean hasNext() {
return it.hasNext();
}
@Override
public byte[] next() {
return it.next().getData();
}
@Override
public void remove() {
it.remove();
}
};
}
@Override
public Object[] toArray() {
byte[][] ret = new byte[size()][];
ByteArrayWrapper[] arr = delegate.toArray(new ByteArrayWrapper[size()]);
for (int i = 0; i < arr.length; i++) {
ret[i] = arr[i].getData();
}
return ret;
}
@Override
public <T> T[] toArray(T[] a) {
return (T[]) toArray();
}
@Override
public boolean add(byte[] bytes) {
return delegate.add(new ByteArrayWrapper(bytes));
}
@Override
public boolean remove(Object o) {
return delegate.remove(new ByteArrayWrapper((byte[]) o));
}
@Override
public boolean containsAll(Collection<?> c) {
throw new RuntimeException("Not implemented");
}
@Override
public boolean addAll(Collection<? extends byte[]> c) {
boolean ret = false;
for (byte[] bytes : c) {
ret |= add(bytes);
}
return ret;
}
@Override
public boolean retainAll(Collection<?> c) {
throw new RuntimeException("Not implemented");
}
@Override
public boolean removeAll(Collection<?> c) {
boolean changed = false;
for (Object el : c) {
changed |= remove(el);
}
return changed;
}
@Override
public void clear() {
delegate.clear();
}
@Override
public boolean equals(Object o) {
throw new RuntimeException("Not implemented");
}
@Override
public int hashCode() {
throw new RuntimeException("Not implemented");
}
}
| 3,705
| 24.040541
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/Value.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import com.cedarsoftware.util.DeepEquals;
import org.ethereum.crypto.HashUtil;
import org.spongycastle.util.encoders.Hex;
import java.math.BigInteger;
import java.util.Arrays;
import java.util.List;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* Class to encapsulate an object and provide utilities for conversion
*/
public class Value {
private Object value;
private byte[] rlp;
private byte[] sha3;
private boolean decoded = false;
public static Value fromRlpEncoded(byte[] data) {
if (data != null && data.length != 0) {
Value v = new Value();
v.init(data);
return v;
}
return null;
}
public Value(){
}
public void init(byte[] rlp){
this.rlp = rlp;
}
public Value(Object obj) {
this.decoded = true;
if (obj == null) return;
if (obj instanceof Value) {
this.value = ((Value) obj).asObj();
} else {
this.value = obj;
}
}
public Value withHash(byte[] hash) {
sha3 = hash;
return this;
}
/* *****************
* Convert
* *****************/
public Object asObj() {
decode();
return value;
}
public List<Object> asList() {
decode();
Object[] valueArray = (Object[]) value;
return Arrays.asList(valueArray);
}
public int asInt() {
decode();
if (isInt()) {
return (Integer) value;
} else if (isBytes()) {
return new BigInteger(1, asBytes()).intValue();
}
return 0;
}
public long asLong() {
decode();
if (isLong()) {
return (Long) value;
} else if (isBytes()) {
return new BigInteger(1, asBytes()).longValue();
}
return 0;
}
public BigInteger asBigInt() {
decode();
return (BigInteger) value;
}
public String asString() {
decode();
if (isBytes()) {
return new String((byte[]) value);
} else if (isString()) {
return (String) value;
}
return "";
}
public byte[] asBytes() {
decode();
if (isBytes()) {
return (byte[]) value;
} else if (isString()) {
return asString().getBytes();
}
return ByteUtil.EMPTY_BYTE_ARRAY;
}
public String getHex(){
return Hex.toHexString(this.encode());
}
public byte[] getData(){
return this.encode();
}
public int[] asSlice() {
return (int[]) value;
}
public Value get(int index) {
if (isList()) {
// Guard for OutOfBounds
if (asList().size() <= index) {
return new Value(null);
}
if (index < 0) {
throw new RuntimeException("Negative index not allowed");
}
return new Value(asList().get(index));
}
// If this wasn't a slice you probably shouldn't be using this function
return new Value(null);
}
/* *****************
* Utility
* *****************/
public void decode(){
if (!this.decoded) {
this.value = RLP.decode(rlp, 0).getDecoded();
this.decoded = true;
}
}
public byte[] encode() {
if (rlp == null)
rlp = RLP.encode(value);
return rlp;
}
public byte[] hash(){
if (sha3 == null)
sha3 = HashUtil.sha3(encode());
return sha3;
}
public boolean cmp(Value o) {
return DeepEquals.deepEquals(this, o);
}
/* *****************
* Checks
* *****************/
public boolean isList() {
decode();
return value != null && value.getClass().isArray() && !value.getClass().getComponentType().isPrimitive();
}
public boolean isString() {
decode();
return value instanceof String;
}
public boolean isInt() {
decode();
return value instanceof Integer;
}
public boolean isLong() {
decode();
return value instanceof Long;
}
public boolean isBigInt() {
decode();
return value instanceof BigInteger;
}
public boolean isBytes() {
decode();
return value instanceof byte[];
}
// it's only if the isBytes() = true;
public boolean isReadableString() {
decode();
int readableChars = 0;
byte[] data = (byte[]) value;
if (data.length == 1 && data[0] > 31 && data[0] < 126) {
return true;
}
for (byte aData : data) {
if (aData > 32 && aData < 126) ++readableChars;
}
return (double) readableChars / (double) data.length > 0.55;
}
// it's only if the isBytes() = true;
public boolean isHexString() {
decode();
int hexChars = 0;
byte[] data = (byte[]) value;
for (byte aData : data) {
if ((aData >= 48 && aData <= 57)
|| (aData >= 97 && aData <= 102))
++hexChars;
}
return (double) hexChars / (double) data.length > 0.9;
}
public boolean isHashCode() {
decode();
return this.asBytes().length == 32;
}
public boolean isNull() {
decode();
return value == null;
}
public boolean isEmpty() {
decode();
if (isNull()) return true;
if (isBytes() && asBytes().length == 0) return true;
if (isList() && asList().isEmpty()) return true;
if (isString() && asString().isEmpty()) return true;
return false;
}
public int length() {
decode();
if (isList()) {
return asList().size();
} else if (isBytes()) {
return asBytes().length;
} else if (isString()) {
return asString().length();
}
return 0;
}
public String toString() {
decode();
StringBuilder stringBuilder = new StringBuilder();
if (isList()) {
Object[] list = (Object[]) value;
// special case - key/value node
if (list.length == 2) {
stringBuilder.append("[ ");
Value key = new Value(list[0]);
byte[] keyNibbles = CompactEncoder.binToNibblesNoTerminator(key.asBytes());
String keyString = ByteUtil.nibblesToPrettyString(keyNibbles);
stringBuilder.append(keyString);
stringBuilder.append(",");
Value val = new Value(list[1]);
stringBuilder.append(val.toString());
stringBuilder.append(" ]");
return stringBuilder.toString();
}
stringBuilder.append(" [");
for (int i = 0; i < list.length; ++i) {
Value val = new Value(list[i]);
if (val.isString() || val.isEmpty()) {
stringBuilder.append("'").append(val.toString()).append("'");
} else {
stringBuilder.append(val.toString());
}
if (i < list.length - 1)
stringBuilder.append(", ");
}
stringBuilder.append("] ");
return stringBuilder.toString();
} else if (isEmpty()) {
return "";
} else if (isBytes()) {
StringBuilder output = new StringBuilder();
if (isHashCode()) {
output.append(toHexString(asBytes()));
} else if (isReadableString()) {
output.append("'");
for (byte oneByte : asBytes()) {
if (oneByte < 16) {
output.append("\\x").append(ByteUtil.oneByteToHexString(oneByte));
} else {
output.append(Character.valueOf((char) oneByte));
}
}
output.append("'");
return output.toString();
}
return toHexString(this.asBytes());
} else if (isString()) {
return asString();
}
return "Unexpected type";
}
public int countBranchNodes() {
decode();
if (this.isList()) {
List<Object> objList = this.asList();
int i = 0;
for (Object obj : objList) {
i += (new Value(obj)).countBranchNodes();
}
return i;
} else if (this.isBytes()) {
this.asBytes();
}
return 0;
}
}
| 9,552
| 24.339523
| 113
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/BIUtil.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import org.ethereum.core.Repository;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
public class BIUtil {
/**
* @param value - not null
* @return true - if the param is zero
*/
public static boolean isZero(BigInteger value){
return value.compareTo(BigInteger.ZERO) == 0;
}
/**
* @param valueA - not null
* @param valueB - not null
* @return true - if the valueA is equal to valueB is zero
*/
public static boolean isEqual(BigInteger valueA, BigInteger valueB){
return valueA.compareTo(valueB) == 0;
}
/**
* @param valueA - not null
* @param valueB - not null
* @return true - if the valueA is not equal to valueB is zero
*/
public static boolean isNotEqual(BigInteger valueA, BigInteger valueB){
return !isEqual(valueA, valueB);
}
/**
* @param valueA - not null
* @param valueB - not null
* @return true - if the valueA is less than valueB is zero
*/
public static boolean isLessThan(BigInteger valueA, BigInteger valueB){
return valueA.compareTo(valueB) < 0;
}
/**
* @param valueA - not null
* @param valueB - not null
* @return true - if the valueA is more than valueB is zero
*/
public static boolean isMoreThan(BigInteger valueA, BigInteger valueB){
return valueA.compareTo(valueB) > 0;
}
/**
* @param valueA - not null
* @param valueB - not null
* @return sum - valueA + valueB
*/
public static BigInteger sum(BigInteger valueA, BigInteger valueB){
return valueA.add(valueB);
}
/**
* @param data = not null
* @return new positive BigInteger
*/
public static BigInteger toBI(byte[] data){
return new BigInteger(1, data);
}
/**
* @param data = not null
* @return new positive BigInteger
*/
public static BigInteger toBI(long data){
return BigInteger.valueOf(data);
}
public static boolean isPositive(BigInteger value){
return value.signum() > 0;
}
public static boolean isCovers(BigInteger covers, BigInteger value){
return !isNotCovers(covers, value);
}
public static boolean isNotCovers(BigInteger covers, BigInteger value){
return covers.compareTo(value) < 0;
}
public static void transfer(Repository repository, byte[] fromAddr, byte[] toAddr, BigInteger value){
repository.addBalance(fromAddr, value.negate());
repository.addBalance(toAddr, value);
}
public static boolean exitLong(BigInteger value){
return (value.compareTo(new BigInteger(Long.MAX_VALUE + ""))) > -1;
}
public static boolean isIn20PercentRange(BigInteger first, BigInteger second) {
BigInteger five = BigInteger.valueOf(5);
BigInteger limit = first.add(first.divide(five));
return !isMoreThan(second, limit);
}
public static BigInteger max(BigInteger first, BigInteger second) {
return first.compareTo(second) < 0 ? second : first;
}
/**
* Returns a result of safe addition of two {@code int} values
* {@code Integer.MAX_VALUE} is returned if overflow occurs
*/
public static int addSafely(int a, int b) {
long res = (long) a + (long) b;
return res > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) res;
}
}
| 4,250
| 28.727273
| 105
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/Utils.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import org.ethereum.datasource.DbSource;
import org.ethereum.db.ByteArrayWrapper;
import org.ethereum.vm.DataWord;
import org.slf4j.LoggerFactory;
import org.spongycastle.util.encoders.DecoderException;
import org.spongycastle.util.encoders.Hex;
import java.lang.reflect.Array;
import java.math.BigInteger;
import java.net.URL;
import java.security.SecureRandom;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.regex.Pattern;
import javax.swing.*;
public class Utils {
private static final DataWord DIVISOR = DataWord.of(64);
private static SecureRandom random = new SecureRandom();
/**
* @param number should be in form '0x34fabd34....'
* @return String
*/
public static BigInteger unifiedNumericToBigInteger(String number) {
boolean match = Pattern.matches("0[xX][0-9a-fA-F]+", number);
if (!match)
return (new BigInteger(number));
else{
number = number.substring(2);
number = number.length() % 2 != 0 ? "0".concat(number) : number;
byte[] numberBytes = Hex.decode(number);
return (new BigInteger(1, numberBytes));
}
}
/**
* Return formatted Date String: yyyy.MM.dd HH:mm:ss
* Based on Unix's time() input in seconds
*
* @param timestamp seconds since start of Unix-time
* @return String formatted as - yyyy.MM.dd HH:mm:ss
*/
public static String longToDateTime(long timestamp) {
Date date = new Date(timestamp * 1000);
DateFormat formatter = new SimpleDateFormat("yyyy.MM.dd HH:mm:ss");
return formatter.format(date);
}
public static String longToTimePeriod(long msec) {
if (msec < 1000) return msec + "ms";
if (msec < 3000) return String.format("%.2fs", msec / 1000d);
if (msec < 60 * 1000) return (msec / 1000) + "s";
long sec = msec / 1000;
if (sec < 5 * 60) return (sec / 60) + "m" + (sec % 60) + "s";
long min = sec / 60;
if (min < 60) return min + "m";
long hour = min / 60;
if (min < 24 * 60) return hour + "h" + (min % 60) + "m";
long day = hour / 24;
return day + "d" + (hour % 24) + "h";
}
public static ImageIcon getImageIcon(String resource) {
URL imageURL = ClassLoader.getSystemResource(resource);
ImageIcon image = new ImageIcon(imageURL);
return image;
}
static BigInteger _1000_ = new BigInteger("1000");
public static String getValueShortString(BigInteger number) {
BigInteger result = number;
int pow = 0;
while (result.compareTo(_1000_) == 1 || result.compareTo(_1000_) == 0) {
result = result.divide(_1000_);
pow += 3;
}
return result.toString() + "\u00b7(" + "10^" + pow + ")";
}
/**
* Decodes a hex string to address bytes and checks validity
*
* @param hex - a hex string of the address, e.g., 6c386a4b26f73c802f34673f7248bb118f97424a
* @return - decode and validated address byte[]
*/
public static byte[] addressStringToBytes(String hex) {
final byte[] addr;
try {
addr = Hex.decode(hex);
} catch (DecoderException addressIsNotValid) {
return null;
}
if (isValidAddress(addr))
return addr;
return null;
}
public static boolean isValidAddress(byte[] addr) {
return addr != null && addr.length == 20;
}
/**
* @param addr length should be 20
* @return short string represent 1f21c...
*/
public static String getAddressShortString(byte[] addr) {
if (!isValidAddress(addr)) throw new Error("not an address");
String addrShort = Hex.toHexString(addr, 0, 3);
StringBuffer sb = new StringBuffer();
sb.append(addrShort);
sb.append("...");
return sb.toString();
}
public static SecureRandom getRandom() {
return random;
}
public static double JAVA_VERSION = getJavaVersion();
static double getJavaVersion() {
String version = System.getProperty("java.version");
// on android this property equals to 0
if (version.equals("0")) return 0;
int pos = 0, count = 0;
for (; pos < version.length() && count < 2; pos++) {
if (version.charAt(pos) == '.') count++;
}
return Double.parseDouble(version.substring(0, pos - 1));
}
public static String getHashListShort(List<byte[]> blockHashes) {
if (blockHashes.isEmpty()) return "[]";
StringBuilder sb = new StringBuilder();
String firstHash = Hex.toHexString(blockHashes.get(0));
String lastHash = Hex.toHexString(blockHashes.get(blockHashes.size() - 1));
return sb.append(" ").append(firstHash).append("...").append(lastHash).toString();
}
public static String getNodeIdShort(String nodeId) {
return nodeId == null ? "<null>" : nodeId.substring(0, 8);
}
public static long toUnixTime(long javaTime) {
return javaTime / 1000;
}
public static long fromUnixTime(long unixTime) {
return unixTime * 1000;
}
public static <T> T[] mergeArrays(T[] ... arr) {
int size = 0;
for (T[] ts : arr) {
size += ts.length;
}
T[] ret = (T[]) Array.newInstance(arr[0].getClass().getComponentType(), size);
int off = 0;
for (T[] ts : arr) {
System.arraycopy(ts, 0, ret, off, ts.length);
off += ts.length;
}
return ret;
}
public static String align(String s, char fillChar, int targetLen, boolean alignRight) {
if (targetLen <= s.length()) return s;
String alignString = repeat("" + fillChar, targetLen - s.length());
return alignRight ? alignString + s : s + alignString;
}
public static String repeat(String s, int n) {
if (s.length() == 1) {
byte[] bb = new byte[n];
Arrays.fill(bb, s.getBytes()[0]);
return new String(bb);
} else {
StringBuilder ret = new StringBuilder();
for (int i = 0; i < n; i++) ret.append(s);
return ret.toString();
}
}
public static List<ByteArrayWrapper> dumpKeys(DbSource<byte[]> ds) {
ArrayList<ByteArrayWrapper> keys = new ArrayList<>();
for (byte[] key : ds.keys()) {
keys.add(ByteUtil.wrap(key));
}
Collections.sort(keys);
return keys;
}
public static DataWord allButOne64th(DataWord dw) {
DataWord divResult = dw.div(DIVISOR);
return dw.sub(divResult);
}
/**
* Show std err messages in red and throw RuntimeException to stop execution.
*/
public static void showErrorAndExit(String message, String... messages) {
LoggerFactory.getLogger("general").error(message);
final String ANSI_RED = "\u001B[31m";
final String ANSI_RESET = "\u001B[0m";
System.err.println(ANSI_RED);
System.err.println("");
System.err.println(" " + message);
for (String msg : messages) {
System.err.println(" " + msg);
}
System.err.println("");
System.err.println(ANSI_RESET);
throw new RuntimeException(message);
}
/**
* Show std warning messages in red.
*/
public static void showWarn(String message, String... messages) {
LoggerFactory.getLogger("general").warn(message);
final String ANSI_RED = "\u001B[31m";
final String ANSI_RESET = "\u001B[0m";
System.err.println(ANSI_RED);
System.err.println("");
System.err.println(" " + message);
for (String msg : messages) {
System.err.println(" " + msg);
}
System.err.println("");
System.err.println(ANSI_RESET);
}
public static String sizeToStr(long size) {
if (size < 2 * (1L << 10)) return size + "b";
if (size < 2 * (1L << 20)) return String.format("%dKb", size / (1L << 10));
if (size < 2 * (1L << 30)) return String.format("%dMb", size / (1L << 20));
return String.format("%dGb", size / (1L << 30));
}
public static void sleep(long ms) {
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
public static boolean isHexEncoded(String value) {
if (value == null) return false;
if ("".equals(value)) return true;
try {
//noinspection ResultOfMethodCallIgnored
new BigInteger(value, 16);
return true;
} catch (NumberFormatException e) {
return false;
}
}
}
| 9,701
| 31.125828
| 95
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/CollectionUtils.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import java.util.*;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.Function;
import java.util.function.Predicate;
/**
* @author Mikhail Kalinin
* @since 14.07.2015
*/
public class CollectionUtils {
public static <T> List<T> truncate(final List<T> items, int limit) {
if(limit > items.size()) {
return new ArrayList<>(items);
}
List<T> truncated = new ArrayList<>(limit);
for(T item : items) {
truncated.add(item);
if(truncated.size() == limit) {
break;
}
}
return truncated;
}
public static <T> List<T> truncateRand(final List<T> items, int limit) {
if(limit > items.size()) {
return new ArrayList<>(items);
}
List<T> truncated = new ArrayList<>(limit);
LinkedList<Integer> index = new LinkedList<>();
for (int i = 0; i < items.size(); ++i) {
index.add(i);
}
if (limit * 2 < items.size()) {
// Limit is very small comparing to items.size()
Set<Integer> smallIndex = new HashSet<>();
for (int i = 0; i < limit; ++i) {
int randomNum = ThreadLocalRandom.current().nextInt(0, index.size());
smallIndex.add(index.remove(randomNum));
}
smallIndex.forEach(i -> truncated.add(items.get(i)));
} else {
// Limit is compared to items.size()
for (int i = 0; i < items.size() - limit; ++i) {
int randomNum = ThreadLocalRandom.current().nextInt(0, index.size());
index.remove(randomNum);
}
index.forEach(i -> truncated.add(items.get(i)));
}
return truncated;
}
}
| 2,605
| 33.746667
| 85
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/BuildInfo.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
public class BuildInfo {
private static final Logger logger = LoggerFactory.getLogger("general");
public static String buildHash;
public static String buildTime;
public static String buildBranch;
static {
try {
Properties props = new Properties();
InputStream is = BuildInfo.class.getResourceAsStream("/build-info.properties");
if (is != null) {
props.load(is);
buildHash = props.getProperty("build.hash");
buildTime = props.getProperty("build.time");
buildBranch = props.getProperty("build.branch");
} else {
logger.warn("File not found `build-info.properties`. Run `gradle build` to generate it");
}
} catch (IOException e) {
logger.error("Error reading /build-info.properties", e);
}
}
public static void printInfo(){
logger.info("git.hash: [{}]", buildHash);
logger.info("build.time: {}", buildTime);
}
}
| 2,001
| 32.932203
| 105
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/ExecutorPipeline.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.Function;
/**
* Queues execution tasks into a single pipeline where some tasks can be executed in parallel
* but preserve 'messages' order so the next task process messages on a single thread in
* the same order they were added to the previous executor
*
* Created by Anton Nashatyrev on 23.02.2016.
*/
public class ExecutorPipeline <In, Out>{
private BlockingQueue<Runnable> queue;
private ThreadPoolExecutor exec;
private boolean preserveOrder = false;
private Function<In, Out> processor;
private Consumer<Throwable> exceptionHandler;
private ExecutorPipeline <Out, ?> next;
private AtomicLong orderCounter = new AtomicLong();
private long nextOutTaskNumber = 0;
private Map<Long, Out> orderMap = new HashMap<>();
private ReentrantLock lock = new ReentrantLock();
private String threadPoolName;
private static AtomicInteger pipeNumber = new AtomicInteger(1);
private AtomicInteger threadNumber = new AtomicInteger(1);
public ExecutorPipeline(int threads, int queueSize, boolean preserveOrder, Function<In, Out> processor,
Consumer<Throwable> exceptionHandler) {
queue = new LimitedQueue<>(queueSize);
exec = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, queue, r ->
new Thread(r, threadPoolName + "-" + threadNumber.getAndIncrement())
);
this.preserveOrder = preserveOrder;
this.processor = processor;
this.exceptionHandler = exceptionHandler;
this.threadPoolName = "pipe-" + pipeNumber.getAndIncrement();
}
public ExecutorPipeline<Out, Void> add(int threads, int queueSize, final Consumer<Out> consumer) {
return add(threads, queueSize, false, out -> {
consumer.accept(out);
return null;
});
}
public <NextOut> ExecutorPipeline<Out, NextOut> add(int threads, int queueSize, boolean preserveOrder,
Function<Out, NextOut> processor) {
ExecutorPipeline<Out, NextOut> ret = new ExecutorPipeline<>(threads, queueSize, preserveOrder, processor, exceptionHandler);
next = ret;
return ret;
}
private void pushNext(long order, Out res) {
if (next != null) {
if (!preserveOrder) {
next.push(res);
} else {
lock.lock();
try {
if (order == nextOutTaskNumber) {
next.push(res);
while(true) {
nextOutTaskNumber++;
Out out = orderMap.remove(nextOutTaskNumber);
if (out == null) break;
next.push(out);
}
} else {
orderMap.put(order, res);
}
} finally {
lock.unlock();
}
}
}
}
public void push(final In in) {
final long order = orderCounter.getAndIncrement();
exec.execute(() -> {
try {
pushNext(order, processor.apply(in));
} catch (Throwable e) {
exceptionHandler.accept(e);
}
});
}
public void pushAll(final List<In> list) {
for (In in : list) {
push(in);
}
}
public ExecutorPipeline<In, Out> setThreadPoolName(String threadPoolName) {
this.threadPoolName = threadPoolName;
return this;
}
public BlockingQueue<Runnable> getQueue() {
return queue;
}
public Map<Long, Out> getOrderMap() {
return orderMap;
}
public void shutdown() {
try {
exec.shutdown();
} catch (Exception e) {}
if (next != null) {
exec.shutdown();
}
}
public boolean isShutdown() {
return exec.isShutdown();
}
/**
* Shutdowns executors and waits until all pipeline
* submitted tasks complete
* @throws InterruptedException
*/
public void join() throws InterruptedException {
exec.shutdown();
exec.awaitTermination(10, TimeUnit.MINUTES);
if (next != null) next.join();
}
private static class LimitedQueue<E> extends LinkedBlockingQueue<E> {
public LimitedQueue(int maxSize) {
super(maxSize);
}
@Override
public boolean offer(E e) {
// turn offer() and add() into a blocking calls (unless interrupted)
try {
put(e);
return true;
} catch(InterruptedException ie) {
Thread.currentThread().interrupt();
}
return false;
}
}
}
| 5,972
| 32.55618
| 132
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/ByteUtil.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import org.ethereum.db.ByteArrayWrapper;
import org.spongycastle.util.encoders.Hex;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
public class ByteUtil {
public static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
public static final byte[] ZERO_BYTE_ARRAY = new byte[]{0};
/**
* Creates a copy of bytes and appends b to the end of it
*/
public static byte[] appendByte(byte[] bytes, byte b) {
byte[] result = Arrays.copyOf(bytes, bytes.length + 1);
result[result.length - 1] = b;
return result;
}
/**
* The regular {@link java.math.BigInteger#toByteArray()} method isn't quite what we often need:
* it appends a leading zero to indicate that the number is positive and may need padding.
*
* @param b the integer to format into a byte array
* @param numBytes the desired size of the resulting byte array
* @return numBytes byte long array.
*/
public static byte[] bigIntegerToBytes(BigInteger b, int numBytes) {
if (b == null)
return null;
byte[] bytes = new byte[numBytes];
byte[] biBytes = b.toByteArray();
int start = (biBytes.length == numBytes + 1) ? 1 : 0;
int length = Math.min(biBytes.length, numBytes);
System.arraycopy(biBytes, start, bytes, numBytes - length, length);
return bytes;
}
public static byte[] bigIntegerToBytesSigned(BigInteger b, int numBytes) {
if (b == null)
return null;
byte[] bytes = new byte[numBytes];
Arrays.fill(bytes, b.signum() < 0 ? (byte) 0xFF : 0x00);
byte[] biBytes = b.toByteArray();
int start = (biBytes.length == numBytes + 1) ? 1 : 0;
int length = Math.min(biBytes.length, numBytes);
System.arraycopy(biBytes, start, bytes, numBytes - length, length);
return bytes;
}
/**
* Omitting sign indication byte.
* <br><br>
* Instead of {@link org.spongycastle.util.BigIntegers#asUnsignedByteArray(BigInteger)}
* <br>we use this custom method to avoid an empty array in case of BigInteger.ZERO
*
* @param value - any big integer number. A <code>null</code>-value will return <code>null</code>
* @return A byte array without a leading zero byte if present in the signed encoding.
* BigInteger.ZERO will return an array with length 1 and byte-value 0.
*/
public static byte[] bigIntegerToBytes(BigInteger value) {
if (value == null)
return null;
byte[] data = value.toByteArray();
if (data.length != 1 && data[0] == 0) {
byte[] tmp = new byte[data.length - 1];
System.arraycopy(data, 1, tmp, 0, tmp.length);
data = tmp;
}
return data;
}
/**
* Cast hex encoded value from byte[] to BigInteger
* null is parsed like byte[0]
*
* @param bb byte array contains the values
* @return unsigned positive BigInteger value.
*/
public static BigInteger bytesToBigInteger(byte[] bb) {
return (bb == null || bb.length == 0) ? BigInteger.ZERO : new BigInteger(1, bb);
}
/**
* Returns the amount of nibbles that match each other from 0 ...
* amount will never be larger than smallest input
*
* @param a - first input
* @param b - second input
* @return Number of bytes that match
*/
public static int matchingNibbleLength(byte[] a, byte[] b) {
int i = 0;
int length = a.length < b.length ? a.length : b.length;
while (i < length) {
if (a[i] != b[i])
return i;
i++;
}
return i;
}
/**
* Converts a long value into a byte array.
*
* @param val - long value to convert
* @return <code>byte[]</code> of length 8, representing the long value
*/
public static byte[] longToBytes(long val) {
return ByteBuffer.allocate(Long.BYTES).putLong(val).array();
}
/**
* Converts a long value into a byte array.
*
* @param val - long value to convert
* @return decimal value with leading byte that are zeroes striped
*/
public static byte[] longToBytesNoLeadZeroes(long val) {
// todo: improve performance by while strip numbers until (long >> 8 == 0)
if (val == 0) return EMPTY_BYTE_ARRAY;
byte[] data = ByteBuffer.allocate(Long.BYTES).putLong(val).array();
return stripLeadingZeroes(data);
}
/**
* Converts int value into a byte array.
*
* @param val - int value to convert
* @return <code>byte[]</code> of length 4, representing the int value
*/
public static byte[] intToBytes(int val){
return ByteBuffer.allocate(Integer.BYTES).putInt(val).array();
}
/**
* Converts a int value into a byte array.
*
* @param val - int value to convert
* @return value with leading byte that are zeroes striped
*/
public static byte[] intToBytesNoLeadZeroes(int val){
if (val == 0) return EMPTY_BYTE_ARRAY;
int lenght = 0;
int tmpVal = val;
while (tmpVal != 0){
tmpVal = tmpVal >>> 8;
++lenght;
}
byte[] result = new byte[lenght];
int index = result.length - 1;
while(val != 0){
result[index] = (byte)(val & 0xFF);
val = val >>> 8;
index -= 1;
}
return result;
}
/**
* Convert a byte-array into a hex String.<br>
* Works similar to {@link Hex#toHexString}
* but allows for <code>null</code>
*
* @param data - byte-array to convert to a hex-string
* @return hex representation of the data.<br>
* Returns an empty String if the input is <code>null</code>
*
* @see Hex#toHexString
*/
public static String toHexString(byte[] data) {
return data == null ? "" : Hex.toHexString(data);
}
/**
* Calculate packet length
*
* @param msg byte[]
* @return byte-array with 4 elements
*/
public static byte[] calcPacketLength(byte[] msg) {
int msgLen = msg.length;
return new byte[]{
(byte) ((msgLen >> 24) & 0xFF),
(byte) ((msgLen >> 16) & 0xFF),
(byte) ((msgLen >> 8) & 0xFF),
(byte) ((msgLen) & 0xFF)};
}
/**
* Cast hex encoded value from byte[] to int
* null is parsed like byte[0]
*
* Limited to Integer.MAX_VALUE: 2^32-1 (4 bytes)
*
* @param b array contains the values
* @return unsigned positive int value.
*/
public static int byteArrayToInt(byte[] b) {
if (b == null || b.length == 0)
return 0;
return new BigInteger(1, b).intValue();
}
/**
* Cast hex encoded value from byte[] to long
* null is parsed like byte[0]
*
* Limited to Long.MAX_VALUE: 2<sup>63</sup>-1 (8 bytes)
*
* @param b array contains the values
* @return unsigned positive long value.
*/
public static long byteArrayToLong(byte[] b) {
if (b == null || b.length == 0)
return 0;
return new BigInteger(1, b).longValue();
}
/**
* Turn nibbles to a pretty looking output string
*
* Example. [ 1, 2, 3, 4, 5 ] becomes '\x11\x23\x45'
*
* @param nibbles - getting byte of data [ 04 ] and turning
* it to a '\x04' representation
* @return pretty string of nibbles
*/
public static String nibblesToPrettyString(byte[] nibbles) {
StringBuilder builder = new StringBuilder();
for (byte nibble : nibbles) {
final String nibbleString = oneByteToHexString(nibble);
builder.append("\\x").append(nibbleString);
}
return builder.toString();
}
public static String oneByteToHexString(byte value) {
String retVal = Integer.toString(value & 0xFF, 16);
if (retVal.length() == 1) retVal = "0" + retVal;
return retVal;
}
/**
* Calculate the number of bytes need
* to encode the number
*
* @param val - number
* @return number of min bytes used to encode the number
*/
public static int numBytes(String val) {
BigInteger bInt = new BigInteger(val);
int bytes = 0;
while (!bInt.equals(BigInteger.ZERO)) {
bInt = bInt.shiftRight(8);
++bytes;
}
if (bytes == 0) ++bytes;
return bytes;
}
/**
* @param arg - not more that 32 bits
* @return - bytes of the value pad with complete to 32 zeroes
*/
public static byte[] encodeValFor32Bits(Object arg) {
byte[] data;
// check if the string is numeric
if (arg.toString().trim().matches("-?\\d+(\\.\\d+)?"))
data = new BigInteger(arg.toString().trim()).toByteArray();
// check if it's hex number
else if (arg.toString().trim().matches("0[xX][0-9a-fA-F]+"))
data = new BigInteger(arg.toString().trim().substring(2), 16).toByteArray();
else
data = arg.toString().trim().getBytes();
if (data.length > 32)
throw new RuntimeException("values can't be more than 32 byte");
byte[] val = new byte[32];
int j = 0;
for (int i = data.length; i > 0; --i) {
val[31 - j] = data[i - 1];
++j;
}
return val;
}
/**
* encode the values and concatenate together
*
* @param args Object
* @return byte[]
*/
public static byte[] encodeDataList(Object... args) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
for (Object arg : args) {
byte[] val = encodeValFor32Bits(arg);
try {
baos.write(val);
} catch (IOException e) {
throw new Error("Happen something that should never happen ", e);
}
}
return baos.toByteArray();
}
public static int firstNonZeroByte(byte[] data) {
for (int i = 0; i < data.length; ++i) {
if (data[i] != 0) {
return i;
}
}
return -1;
}
public static byte[] stripLeadingZeroes(byte[] data) {
if (data == null)
return null;
final int firstNonZero = firstNonZeroByte(data);
switch (firstNonZero) {
case -1:
return ZERO_BYTE_ARRAY;
case 0:
return data;
default:
byte[] result = new byte[data.length - firstNonZero];
System.arraycopy(data, firstNonZero, result, 0, data.length - firstNonZero);
return result;
}
}
/**
* increment byte array as a number until max is reached
*
* @param bytes byte[]
* @return boolean
*/
public static boolean increment(byte[] bytes) {
final int startIndex = 0;
int i;
for (i = bytes.length - 1; i >= startIndex; i--) {
bytes[i]++;
if (bytes[i] != 0)
break;
}
// we return false when all bytes are 0 again
return (i >= startIndex || bytes[startIndex] != 0);
}
/**
* Utility function to copy a byte array into a new byte array with given size.
* If the src length is smaller than the given size, the result will be left-padded
* with zeros.
*
* @param value - a BigInteger with a maximum value of 2^256-1
* @return Byte array of given size with a copy of the <code>src</code>
*/
public static byte[] copyToArray(BigInteger value) {
byte[] src = ByteUtil.bigIntegerToBytes(value);
byte[] dest = ByteBuffer.allocate(32).array();
System.arraycopy(src, 0, dest, dest.length - src.length, src.length);
return dest;
}
public static ByteArrayWrapper wrap(byte[] data) {
return new ByteArrayWrapper(data);
}
public static byte[] setBit(byte[] data, int pos, int val) {
if ((data.length * 8) - 1 < pos)
throw new Error("outside byte array limit, pos: " + pos);
int posByte = data.length - 1 - (pos) / 8;
int posBit = (pos) % 8;
byte setter = (byte) (1 << (posBit));
byte toBeSet = data[posByte];
byte result;
if (val == 1)
result = (byte) (toBeSet | setter);
else
result = (byte) (toBeSet & ~setter);
data[posByte] = result;
return data;
}
public static int getBit(byte[] data, int pos) {
if ((data.length * 8) - 1 < pos)
throw new Error("outside byte array limit, pos: " + pos);
int posByte = data.length - 1 - pos / 8;
int posBit = pos % 8;
byte dataByte = data[posByte];
return Math.min(1, (dataByte & (1 << (posBit))));
}
public static byte[] and(byte[] b1, byte[] b2) {
if (b1.length != b2.length) throw new RuntimeException("Array sizes differ");
byte[] ret = new byte[b1.length];
for (int i = 0; i < ret.length; i++) {
ret[i] = (byte) (b1[i] & b2[i]);
}
return ret;
}
public static byte[] or(byte[] b1, byte[] b2) {
if (b1.length != b2.length) throw new RuntimeException("Array sizes differ");
byte[] ret = new byte[b1.length];
for (int i = 0; i < ret.length; i++) {
ret[i] = (byte) (b1[i] | b2[i]);
}
return ret;
}
public static byte[] xor(byte[] b1, byte[] b2) {
if (b1.length != b2.length) throw new RuntimeException("Array sizes differ");
byte[] ret = new byte[b1.length];
for (int i = 0; i < ret.length; i++) {
ret[i] = (byte) (b1[i] ^ b2[i]);
}
return ret;
}
/**
* XORs byte arrays of different lengths by aligning length of the shortest via adding zeros at beginning
*/
public static byte[] xorAlignRight(byte[] b1, byte[] b2) {
if (b1.length > b2.length) {
byte[] b2_ = new byte[b1.length];
System.arraycopy(b2, 0, b2_, b1.length - b2.length, b2.length);
b2 = b2_;
} else if (b2.length > b1.length) {
byte[] b1_ = new byte[b2.length];
System.arraycopy(b1, 0, b1_, b2.length - b1.length, b1.length);
b1 = b1_;
}
return xor(b1, b2);
}
/**
* @param arrays - arrays to merge
* @return - merged array
*/
public static byte[] merge(byte[]... arrays)
{
int count = 0;
for (byte[] array: arrays)
{
count += array.length;
}
// Create new array and copy all array contents
byte[] mergedArray = new byte[count];
int start = 0;
for (byte[] array: arrays) {
System.arraycopy(array, 0, mergedArray, start, array.length);
start += array.length;
}
return mergedArray;
}
public static boolean isNullOrZeroArray(byte[] array){
return (array == null) || (array.length == 0);
}
public static boolean isSingleZero(byte[] array){
return (array.length == 1 && array[0] == 0);
}
public static Set<byte[]> difference(Set<byte[]> setA, Set<byte[]> setB){
Set<byte[]> result = new HashSet<>();
for (byte[] elementA : setA){
boolean found = false;
for (byte[] elementB : setB){
if (Arrays.equals(elementA, elementB)){
found = true;
break;
}
}
if (!found) result.add(elementA);
}
return result;
}
public static int length(byte[]... bytes) {
int result = 0;
for (byte[] array : bytes) {
result += (array == null) ? 0 : array.length;
}
return result;
}
public static byte[] intsToBytes(int[] arr, boolean bigEndian) {
byte[] ret = new byte[arr.length * 4];
intsToBytes(arr, ret, bigEndian);
return ret;
}
public static int[] bytesToInts(byte[] arr, boolean bigEndian) {
int[] ret = new int[arr.length / 4];
bytesToInts(arr, ret, bigEndian);
return ret;
}
public static void bytesToInts(byte[] b, int[] arr, boolean bigEndian) {
if (!bigEndian) {
int off = 0;
for (int i = 0; i < arr.length; i++) {
int ii = b[off++] & 0x000000FF;
ii |= (b[off++] << 8) & 0x0000FF00;
ii |= (b[off++] << 16) & 0x00FF0000;
ii |= (b[off++] << 24);
arr[i] = ii;
}
} else {
int off = 0;
for (int i = 0; i < arr.length; i++) {
int ii = b[off++] << 24;
ii |= (b[off++] << 16) & 0x00FF0000;
ii |= (b[off++] << 8) & 0x0000FF00;
ii |= b[off++] & 0x000000FF;
arr[i] = ii;
}
}
}
public static void intsToBytes(int[] arr, byte[] b, boolean bigEndian) {
if (!bigEndian) {
int off = 0;
for (int i = 0; i < arr.length; i++) {
int ii = arr[i];
b[off++] = (byte) (ii & 0xFF);
b[off++] = (byte) ((ii >> 8) & 0xFF);
b[off++] = (byte) ((ii >> 16) & 0xFF);
b[off++] = (byte) ((ii >> 24) & 0xFF);
}
} else {
int off = 0;
for (int i = 0; i < arr.length; i++) {
int ii = arr[i];
b[off++] = (byte) ((ii >> 24) & 0xFF);
b[off++] = (byte) ((ii >> 16) & 0xFF);
b[off++] = (byte) ((ii >> 8) & 0xFF);
b[off++] = (byte) (ii & 0xFF);
}
}
}
public static short bigEndianToShort(byte[] bs) {
return bigEndianToShort(bs, 0);
}
public static short bigEndianToShort(byte[] bs, int off) {
int n = bs[off] << 8;
++off;
n |= bs[off] & 0xFF;
return (short) n;
}
public static byte[] shortToBytes(short n) {
return ByteBuffer.allocate(2).putShort(n).array();
}
/**
* Converts string hex representation to data bytes
* Accepts following hex:
* - with or without 0x prefix
* - with no leading 0, like 0xabc -> 0x0abc
* @param data String like '0xa5e..' or just 'a5e..'
* @return decoded bytes array
*/
public static byte[] hexStringToBytes(String data) {
if (data == null) return EMPTY_BYTE_ARRAY;
if (data.startsWith("0x")) data = data.substring(2);
if (data.length() % 2 == 1) data = "0" + data;
return Hex.decode(data);
}
/**
* Converts string representation of host/ip to 4-bytes byte[] IPv4
*/
public static byte[] hostToBytes(String ip) {
byte[] bytesIp;
try {
bytesIp = InetAddress.getByName(ip).getAddress();
} catch (UnknownHostException e) {
bytesIp = new byte[4]; // fall back to invalid 0.0.0.0 address
}
return bytesIp;
}
/**
* Converts 4 bytes IPv4 IP to String representation
*/
public static String bytesToIp(byte[] bytesIp) {
StringBuilder sb = new StringBuilder();
sb.append(bytesIp[0] & 0xFF);
sb.append(".");
sb.append(bytesIp[1] & 0xFF);
sb.append(".");
sb.append(bytesIp[2] & 0xFF);
sb.append(".");
sb.append(bytesIp[3] & 0xFF);
String ip = sb.toString();
return ip;
}
/**
* Returns a number of zero bits preceding the highest-order ("leftmost") one-bit
* interpreting input array as a big-endian integer value
*/
public static int numberOfLeadingZeros(byte[] bytes) {
int i = firstNonZeroByte(bytes);
if (i == -1) {
return bytes.length * 8;
} else {
int byteLeadingZeros = Integer.numberOfLeadingZeros((int)bytes[i] & 0xff) - 24;
return i * 8 + byteLeadingZeros;
}
}
/**
* Parses fixed number of bytes starting from {@code offset} in {@code input} array.
* If {@code input} has not enough bytes return array will be right padded with zero bytes.
* I.e. if {@code offset} is higher than {@code input.length} then zero byte array of length {@code len} will be returned
*/
public static byte[] parseBytes(byte[] input, int offset, int len) {
if (offset >= input.length || len == 0)
return EMPTY_BYTE_ARRAY;
byte[] bytes = new byte[len];
System.arraycopy(input, offset, bytes, 0, Math.min(input.length - offset, len));
return bytes;
}
/**
* Parses 32-bytes word from given input.
* Uses {@link #parseBytes(byte[], int, int)} method,
* thus, result will be right-padded with zero bytes if there is not enough bytes in {@code input}
*
* @param idx an index of the word starting from {@code 0}
*/
public static byte[] parseWord(byte[] input, int idx) {
return parseBytes(input, 32 * idx, 32);
}
/**
* Parses 32-bytes word from given input.
* Uses {@link #parseBytes(byte[], int, int)} method,
* thus, result will be right-padded with zero bytes if there is not enough bytes in {@code input}
*
* @param idx an index of the word starting from {@code 0}
* @param offset an offset in {@code input} array to start parsing from
*/
public static byte[] parseWord(byte[] input, int offset, int idx) {
return parseBytes(input, offset + 32 * idx, 32);
}
}
| 22,846
| 30.29726
| 125
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/RLPElement.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import java.io.Serializable;
/**
* Wrapper class for decoded elements from an RLP encoded byte array.
*
* @author Roman Mandeleil
* @since 01.04.2014
*/
public interface RLPElement extends Serializable {
byte[] getRLPData();
}
| 1,056
| 32.03125
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/SetAdapter.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
/**
* Created by Anton Nashatyrev on 06.10.2016.
*/
public class SetAdapter<E> implements Set<E> {
private static final Object DummyValue = new Object();
Map<E, Object> delegate;
public SetAdapter(Map<E, ?> delegate) {
this.delegate = (Map<E, Object>) delegate;
}
@Override
public int size() {
return delegate.size();
}
@Override
public boolean isEmpty() {
return delegate.isEmpty();
}
@Override
public boolean contains(Object o) {
return delegate.containsKey(o);
}
@Override
public Iterator<E> iterator() {
return delegate.keySet().iterator();
}
@Override
public Object[] toArray() {
return delegate.keySet().toArray();
}
@Override
public <T> T[] toArray(T[] a) {
return delegate.keySet().toArray(a);
}
@Override
public boolean add(E e) {
return delegate.put(e, DummyValue) == null;
}
@Override
public boolean remove(Object o) {
return delegate.remove(o) != null;
}
@Override
public boolean containsAll(Collection<?> c) {
return delegate.keySet().containsAll(c);
}
@Override
public boolean addAll(Collection<? extends E> c) {
boolean ret = false;
for (E e : c) {
ret |= add(e);
}
return ret;
}
@Override
public boolean retainAll(Collection<?> c) {
throw new RuntimeException("Not implemented"); // TODO add later if required
}
@Override
public boolean removeAll(Collection<?> c) {
boolean ret = false;
for (Object e : c) {
ret |= remove(e);
}
return ret;
}
@Override
public void clear() {
delegate.clear();
}
}
| 2,703
| 23.807339
| 84
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/RLP.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import org.ethereum.db.ByteArrayWrapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.spongycastle.util.encoders.Hex;
import java.math.BigInteger;
import java.util.*;
import static java.util.Arrays.copyOfRange;
import static org.ethereum.util.ByteUtil.*;
import static org.spongycastle.util.Arrays.concatenate;
import static org.spongycastle.util.BigIntegers.asUnsignedByteArray;
/**
* Recursive Length Prefix (RLP) encoding.
* <p>
* The purpose of RLP is to encode arbitrarily nested arrays of binary data, and
* RLP is the main encoding method used to serialize objects in Ethereum. The
* only purpose of RLP is to encode structure; encoding specific atomic data
* types (eg. strings, integers, floats) is left up to higher-order protocols; in
* Ethereum the standard is that integers are represented in big endian binary
* form. If one wishes to use RLP to encode a dictionary, the two suggested
* canonical forms are to either use [[k1,v1],[k2,v2]...] with keys in
* lexicographic order or to use the higher-level Patricia Tree encoding as
* Ethereum does.
* <p>
* The RLP encoding function takes in an item. An item is defined as follows:
* <p>
* - A string (ie. byte array) is an item - A list of items is an item
* <p>
* For example, an empty string is an item, as is the string containing the word
* "cat", a list containing any number of strings, as well as more complex data
* structures like ["cat",["puppy","cow"],"horse",[[]],"pig",[""],"sheep"]. Note
* that in the context of the rest of this article, "string" will be used as a
* synonym for "a certain number of bytes of binary data"; no special encodings
* are used and no knowledge about the content of the strings is implied.
* <p>
* See: https://github.com/ethereum/wiki/wiki/%5BEnglish%5D-RLP
*
* @author Roman Mandeleil
* @since 01.04.2014
*/
public class RLP {
private static final Logger logger = LoggerFactory.getLogger("rlp");
public static final byte[] EMPTY_ELEMENT_RLP = encodeElement(new byte[0]);
private static final int MAX_DEPTH = 16;
/**
* Allow for content up to size of 2^64 bytes *
*/
private static final double MAX_ITEM_LENGTH = Math.pow(256, 8);
/**
* Reason for threshold according to Vitalik Buterin:
* - 56 bytes maximizes the benefit of both options
* - if we went with 60 then we would have only had 4 slots for long strings
* so RLP would not have been able to store objects above 4gb
* - if we went with 48 then RLP would be fine for 2^128 space, but that's way too much
* - so 56 and 2^64 space seems like the right place to put the cutoff
* - also, that's where Bitcoin's varint does the cutof
*/
private static final int SIZE_THRESHOLD = 56;
/** RLP encoding rules are defined as follows: */
/*
* For a single byte whose value is in the [0x00, 0x7f] range, that byte is
* its own RLP encoding.
*/
/**
* [0x80]
* If a string is 0-55 bytes long, the RLP encoding consists of a single
* byte with value 0x80 plus the length of the string followed by the
* string. The range of the first byte is thus [0x80, 0xb7].
*/
private static final int OFFSET_SHORT_ITEM = 0x80;
/**
* [0xb7]
* If a string is more than 55 bytes long, the RLP encoding consists of a
* single byte with value 0xb7 plus the length of the length of the string
* in binary form, followed by the length of the string, followed by the
* string. For example, a length-1024 string would be encoded as
* \xb9\x04\x00 followed by the string. The range of the first byte is thus
* [0xb8, 0xbf].
*/
private static final int OFFSET_LONG_ITEM = 0xb7;
/**
* [0xc0]
* If the total payload of a list (i.e. the combined length of all its
* items) is 0-55 bytes long, the RLP encoding consists of a single byte
* with value 0xc0 plus the length of the list followed by the concatenation
* of the RLP encodings of the items. The range of the first byte is thus
* [0xc0, 0xf7].
*/
private static final int OFFSET_SHORT_LIST = 0xc0;
/**
* [0xf7]
* If the total payload of a list is more than 55 bytes long, the RLP
* encoding consists of a single byte with value 0xf7 plus the length of the
* length of the list in binary form, followed by the length of the list,
* followed by the concatenation of the RLP encodings of the items. The
* range of the first byte is thus [0xf8, 0xff].
*/
private static final int OFFSET_LONG_LIST = 0xf7;
/* ******************************************************
* DECODING *
* ******************************************************/
private static byte decodeOneByteItem(byte[] data, int index) {
// null item
if ((data[index] & 0xFF) == OFFSET_SHORT_ITEM) {
return (byte) (data[index] - OFFSET_SHORT_ITEM);
}
// single byte item
if ((data[index] & 0xFF) < OFFSET_SHORT_ITEM) {
return data[index];
}
// single byte item
if ((data[index] & 0xFF) == OFFSET_SHORT_ITEM + 1) {
return data[index + 1];
}
return 0;
}
public static int decodeInt(byte[] data, int index) {
int value = 0;
// NOTE: From RLP doc:
// Ethereum integers must be represented in big endian binary form
// with no leading zeroes (thus making the integer value zero be
// equivalent to the empty byte array)
if (data[index] == 0x00) {
throw new RuntimeException("not a number");
} else if ((data[index] & 0xFF) < OFFSET_SHORT_ITEM) {
return data[index];
} else if ((data[index] & 0xFF) <= OFFSET_SHORT_ITEM + Integer.BYTES) {
byte length = (byte) (data[index] - OFFSET_SHORT_ITEM);
byte pow = (byte) (length - 1);
for (int i = 1; i <= length; ++i) {
// << (8 * pow) == bit shift to 0 (*1), 8 (*256) , 16 (*65..)..
value += (data[index + i] & 0xFF) << (8 * pow);
pow--;
}
} else {
// If there are more than 4 bytes, it is not going
// to decode properly into an int.
throw new RuntimeException("wrong decode attempt");
}
return value;
}
static short decodeShort(byte[] data, int index) {
short value = 0;
if (data[index] == 0x00) {
throw new RuntimeException("not a number");
} else if ((data[index] & 0xFF) < OFFSET_SHORT_ITEM) {
return data[index];
} else if ((data[index] & 0xFF) <= OFFSET_SHORT_ITEM + Short.BYTES) {
byte length = (byte) (data[index] - OFFSET_SHORT_ITEM);
byte pow = (byte) (length - 1);
for (int i = 1; i <= length; ++i) {
// << (8 * pow) == bit shift to 0 (*1), 8 (*256) , 16 (*65..)
value += (data[index + i] & 0xFF) << (8 * pow);
pow--;
}
} else {
// If there are more than 2 bytes, it is not going
// to decode properly into a short.
throw new RuntimeException("wrong decode attempt");
}
return value;
}
public static long decodeLong(byte[] data, int index) {
long value = 0;
if (data[index] == 0x00) {
throw new RuntimeException("not a number");
} else if ((data[index] & 0xFF) < OFFSET_SHORT_ITEM) {
return data[index];
} else if ((data[index] & 0xFF) <= OFFSET_SHORT_ITEM + Long.BYTES) {
byte length = (byte) (data[index] - OFFSET_SHORT_ITEM);
byte pow = (byte) (length - 1);
for (int i = 1; i <= length; ++i) {
// << (8 * pow) == bit shift to 0 (*1), 8 (*256) , 16 (*65..)..
value += (long) (data[index + i] & 0xFF) << (8 * pow);
pow--;
}
} else {
// If there are more than 8 bytes, it is not going
// to decode properly into a long.
throw new RuntimeException("wrong decode attempt");
}
return value;
}
private static String decodeStringItem(byte[] data, int index) {
final byte[] valueBytes = decodeItemBytes(data, index);
if (valueBytes.length == 0) {
// shortcut
return "";
} else {
return new String(valueBytes);
}
}
public static BigInteger decodeBigInteger(byte[] data, int index) {
final byte[] valueBytes = decodeItemBytes(data, index);
if (valueBytes.length == 0) {
// shortcut
return BigInteger.ZERO;
} else {
BigInteger res = new BigInteger(1, valueBytes);
return res;
}
}
private static byte[] decodeByteArray(byte[] data, int index) {
return decodeItemBytes(data, index);
}
private static int nextItemLength(byte[] data, int index) {
if (index >= data.length)
return -1;
// [0xf8, 0xff]
if ((data[index] & 0xFF) > OFFSET_LONG_LIST) {
byte lengthOfLength = (byte) (data[index] - OFFSET_LONG_LIST);
return calcLength(lengthOfLength, data, index);
}
// [0xc0, 0xf7]
if ((data[index] & 0xFF) >= OFFSET_SHORT_LIST
&& (data[index] & 0xFF) <= OFFSET_LONG_LIST) {
return (byte) ((data[index] & 0xFF) - OFFSET_SHORT_LIST);
}
// [0xb8, 0xbf]
if ((data[index] & 0xFF) > OFFSET_LONG_ITEM
&& (data[index] & 0xFF) < OFFSET_SHORT_LIST) {
byte lengthOfLength = (byte) (data[index] - OFFSET_LONG_ITEM);
return calcLength(lengthOfLength, data, index);
}
// [0x81, 0xb7]
if ((data[index] & 0xFF) > OFFSET_SHORT_ITEM
&& (data[index] & 0xFF) <= OFFSET_LONG_ITEM) {
return (byte) ((data[index] & 0xFF) - OFFSET_SHORT_ITEM);
}
// [0x00, 0x80]
if ((data[index] & 0xFF) <= OFFSET_SHORT_ITEM) {
return 1;
}
return -1;
}
public static byte[] decodeIP4Bytes(byte[] data, int index) {
int offset = 1;
final byte[] result = new byte[4];
for (int i = 0; i < 4; i++) {
result[i] = decodeOneByteItem(data, index + offset);
if ((data[index + offset] & 0xFF) > OFFSET_SHORT_ITEM)
offset += 2;
else
offset += 1;
}
// return IP address
return result;
}
public static int getFirstListElement(byte[] payload, int pos) {
if (pos >= payload.length)
return -1;
// [0xf8, 0xff]
if ((payload[pos] & 0xFF) > OFFSET_LONG_LIST) {
byte lengthOfLength = (byte) (payload[pos] - OFFSET_LONG_LIST);
return pos + lengthOfLength + 1;
}
// [0xc0, 0xf7]
if ((payload[pos] & 0xFF) >= OFFSET_SHORT_LIST
&& (payload[pos] & 0xFF) <= OFFSET_LONG_LIST) {
return pos + 1;
}
// [0xb8, 0xbf]
if ((payload[pos] & 0xFF) > OFFSET_LONG_ITEM
&& (payload[pos] & 0xFF) < OFFSET_SHORT_LIST) {
byte lengthOfLength = (byte) (payload[pos] - OFFSET_LONG_ITEM);
return pos + lengthOfLength + 1;
}
return -1;
}
public static int getNextElementIndex(byte[] payload, int pos) {
if (pos >= payload.length)
return -1;
// [0xf8, 0xff]
if ((payload[pos] & 0xFF) > OFFSET_LONG_LIST) {
byte lengthOfLength = (byte) (payload[pos] - OFFSET_LONG_LIST);
int length = calcLength(lengthOfLength, payload, pos);
return pos + lengthOfLength + length + 1;
}
// [0xc0, 0xf7]
if ((payload[pos] & 0xFF) >= OFFSET_SHORT_LIST
&& (payload[pos] & 0xFF) <= OFFSET_LONG_LIST) {
byte length = (byte) ((payload[pos] & 0xFF) - OFFSET_SHORT_LIST);
return pos + 1 + length;
}
// [0xb8, 0xbf]
if ((payload[pos] & 0xFF) > OFFSET_LONG_ITEM
&& (payload[pos] & 0xFF) < OFFSET_SHORT_LIST) {
byte lengthOfLength = (byte) (payload[pos] - OFFSET_LONG_ITEM);
int length = calcLength(lengthOfLength, payload, pos);
return pos + lengthOfLength + length + 1;
}
// [0x81, 0xb7]
if ((payload[pos] & 0xFF) > OFFSET_SHORT_ITEM
&& (payload[pos] & 0xFF) <= OFFSET_LONG_ITEM) {
byte length = (byte) ((payload[pos] & 0xFF) - OFFSET_SHORT_ITEM);
return pos + 1 + length;
}
// []0x80]
if ((payload[pos] & 0xFF) == OFFSET_SHORT_ITEM) {
return pos + 1;
}
// [0x00, 0x7f]
if ((payload[pos] & 0xFF) < OFFSET_SHORT_ITEM) {
return pos + 1;
}
return -1;
}
/**
* Parse length of long item or list.
* RLP supports lengths with up to 8 bytes long,
* but due to java limitation it returns either encoded length
* or {@link Integer#MAX_VALUE} in case if encoded length is greater
*
* @param lengthOfLength length of length in bytes
* @param msgData message
* @param pos position to parse from
*
* @return calculated length
*/
private static int calcLength(int lengthOfLength, byte[] msgData, int pos) {
byte pow = (byte) (lengthOfLength - 1);
int length = 0;
for (int i = 1; i <= lengthOfLength; ++i) {
int bt = msgData[pos + i] & 0xFF;
int shift = 8 * pow;
// no leading zeros are acceptable
if (bt == 0 && length == 0) {
throw new RuntimeException("RLP length contains leading zeros");
}
// return MAX_VALUE if index of highest bit is more than 31
if (32 - Integer.numberOfLeadingZeros(bt) + shift > 31) {
return Integer.MAX_VALUE;
}
length += bt << shift;
pow--;
}
// check that length is in payload bounds
verifyLength(length, msgData.length - pos - lengthOfLength);
return length;
}
public static byte getCommandCode(byte[] data) {
int index = getFirstListElement(data, 0);
final byte command = data[index];
return ((command & 0xFF) == OFFSET_SHORT_ITEM) ? 0 : command;
}
/**
* Parse wire byte[] message into RLP elements
*
* @param msgData - raw RLP data
* @param depthLimit - limits depth of decoding
* @return rlpList
* - outcome of recursive RLP structure
*/
public static RLPList decode2(byte[] msgData, int depthLimit) {
if (depthLimit < 1) {
throw new RuntimeException("Depth limit should be 1 or higher");
}
RLPList rlpList = new RLPList();
fullTraverse(msgData, 0, 0, msgData.length, rlpList, depthLimit);
return rlpList;
}
/**
* Parse wire byte[] message into RLP elements
*
* @param msgData - raw RLP data
* @return rlpList
* - outcome of recursive RLP structure
*/
public static RLPList decode2(byte[] msgData) {
RLPList rlpList = new RLPList();
fullTraverse(msgData, 0, 0, msgData.length, rlpList, Integer.MAX_VALUE);
return rlpList;
}
/**
* Decodes RLP with list without going deep after 1st level list
* (actually, 2nd as 1st level is wrap only)
*
* So assuming you've packed several byte[] with {@link #encodeList(byte[]...)},
* you could use this method to unpack them,
* getting RLPList with RLPItem's holding byte[] inside
* @param msgData rlp data
* @return list of RLPItems
*/
public static RLPList unwrapList(byte[] msgData) {
return (RLPList) decode2(msgData, 2).get(0);
}
public static RLPElement decode2OneItem(byte[] msgData, int startPos) {
RLPList rlpList = new RLPList();
fullTraverse(msgData, 0, startPos, startPos + 1, rlpList, Integer.MAX_VALUE);
return rlpList.get(0);
}
/**
* Get exactly one message payload
*/
static void fullTraverse(byte[] msgData, int level, int startPos,
int endPos, RLPList rlpList, int depth) {
if (level > MAX_DEPTH) {
throw new RuntimeException(String.format("Error: Traversing over max RLP depth (%s)", MAX_DEPTH));
}
try {
if (msgData == null || msgData.length == 0)
return;
int pos = startPos;
while (pos < endPos) {
logger.debug("fullTraverse: level: " + level + " startPos: " + pos + " endPos: " + endPos);
// It's a list with a payload more than 55 bytes
// data[0] - 0xF7 = how many next bytes allocated
// for the length of the list
if ((msgData[pos] & 0xFF) > OFFSET_LONG_LIST) {
byte lengthOfLength = (byte) (msgData[pos] - OFFSET_LONG_LIST);
int length = calcLength(lengthOfLength, msgData, pos);
if (length < SIZE_THRESHOLD) {
throw new RuntimeException("Short list has been encoded as long list");
}
// check that length is in payload bounds
verifyLength(length, msgData.length - pos - lengthOfLength);
byte[] rlpData = new byte[lengthOfLength + length + 1];
System.arraycopy(msgData, pos, rlpData, 0, lengthOfLength
+ length + 1);
if(level + 1 < depth) {
RLPList newLevelList = new RLPList();
newLevelList.setRLPData(rlpData);
fullTraverse(msgData, level + 1, pos + lengthOfLength + 1,
pos + lengthOfLength + length + 1, newLevelList, depth);
rlpList.add(newLevelList);
} else {
rlpList.add(new RLPItem(rlpData));
}
pos += lengthOfLength + length + 1;
continue;
}
// It's a list with a payload less than 55 bytes
if ((msgData[pos] & 0xFF) >= OFFSET_SHORT_LIST
&& (msgData[pos] & 0xFF) <= OFFSET_LONG_LIST) {
byte length = (byte) ((msgData[pos] & 0xFF) - OFFSET_SHORT_LIST);
byte[] rlpData = new byte[length + 1];
System.arraycopy(msgData, pos, rlpData, 0, length + 1);
if(level + 1 < depth) {
RLPList newLevelList = new RLPList();
newLevelList.setRLPData(rlpData);
if (length > 0)
fullTraverse(msgData, level + 1, pos + 1, pos + length + 1, newLevelList, depth);
rlpList.add(newLevelList);
} else {
rlpList.add(new RLPItem(rlpData));
}
pos += 1 + length;
continue;
}
// It's an item with a payload more than 55 bytes
// data[0] - 0xB7 = how much next bytes allocated for
// the length of the string
if ((msgData[pos] & 0xFF) > OFFSET_LONG_ITEM
&& (msgData[pos] & 0xFF) < OFFSET_SHORT_LIST) {
byte lengthOfLength = (byte) (msgData[pos] - OFFSET_LONG_ITEM);
int length = calcLength(lengthOfLength, msgData, pos);
if (length < SIZE_THRESHOLD) {
throw new RuntimeException("Short item has been encoded as long item");
}
// check that length is in payload bounds
verifyLength(length, msgData.length - pos - lengthOfLength);
// now we can parse an item for data[1]..data[length]
byte[] item = new byte[length];
System.arraycopy(msgData, pos + lengthOfLength + 1, item,
0, length);
RLPItem rlpItem = new RLPItem(item);
rlpList.add(rlpItem);
pos += lengthOfLength + length + 1;
continue;
}
// It's an item less than 55 bytes long,
// data[0] - 0x80 == length of the item
if ((msgData[pos] & 0xFF) > OFFSET_SHORT_ITEM
&& (msgData[pos] & 0xFF) <= OFFSET_LONG_ITEM) {
byte length = (byte) ((msgData[pos] & 0xFF) - OFFSET_SHORT_ITEM);
byte[] item = new byte[length];
System.arraycopy(msgData, pos + 1, item, 0, length);
if (length == 1 && (item[0] & 0xFF) < OFFSET_SHORT_ITEM) {
throw new RuntimeException("Single byte has been encoded as byte string");
}
RLPItem rlpItem = new RLPItem(item);
rlpList.add(rlpItem);
pos += 1 + length;
continue;
}
// null item
if ((msgData[pos] & 0xFF) == OFFSET_SHORT_ITEM) {
byte[] item = ByteUtil.EMPTY_BYTE_ARRAY;
RLPItem rlpItem = new RLPItem(item);
rlpList.add(rlpItem);
pos += 1;
continue;
}
// single byte item
if ((msgData[pos] & 0xFF) < OFFSET_SHORT_ITEM) {
byte[] item = {(byte) (msgData[pos] & 0xFF)};
RLPItem rlpItem = new RLPItem(item);
rlpList.add(rlpItem);
pos += 1;
}
}
} catch (Exception e) {
throw new RuntimeException("RLP wrong encoding (" + Hex.toHexString(msgData, startPos, endPos - startPos) + ")", e);
} catch (OutOfMemoryError e) {
throw new RuntimeException("Invalid RLP (excessive mem allocation while parsing) (" + Hex.toHexString(msgData, startPos, endPos - startPos) + ")", e);
}
}
/**
* Compares supplied length information with maximum possible
* @param suppliedLength Length info from header
* @param availableLength Length of remaining object
* @throws RuntimeException if supplied length is bigger than available
*/
private static void verifyLength(int suppliedLength, int availableLength) {
if (suppliedLength > availableLength) {
throw new RuntimeException(String.format("Length parsed from RLP (%s bytes) is greater " +
"than possible size of data (%s bytes)", suppliedLength, availableLength));
}
}
/**
* Reads any RLP encoded byte-array and returns all objects as byte-array or list of byte-arrays
*
* @param data RLP encoded byte-array
* @param pos position in the array to start reading
* @return DecodeResult encapsulates the decoded items as a single Object and the final read position
*/
public static DecodeResult decode(byte[] data, int pos) {
if (data == null || data.length < 1) {
return null;
}
int prefix = data[pos] & 0xFF;
if (prefix == OFFSET_SHORT_ITEM) { // 0x80
return new DecodeResult(pos + 1, ""); // means no length or 0
} else if (prefix < OFFSET_SHORT_ITEM) { // [0x00, 0x7f]
return new DecodeResult(pos + 1, new byte[]{data[pos]}); // byte is its own RLP encoding
} else if (prefix <= OFFSET_LONG_ITEM) { // [0x81, 0xb7]
int len = prefix - OFFSET_SHORT_ITEM; // length of the encoded bytes
return new DecodeResult(pos + 1 + len, copyOfRange(data, pos + 1, pos + 1 + len));
} else if (prefix < OFFSET_SHORT_LIST) { // [0xb8, 0xbf]
int lenlen = prefix - OFFSET_LONG_ITEM; // length of length the encoded bytes
int lenbytes = byteArrayToInt(copyOfRange(data, pos + 1, pos + 1 + lenlen)); // length of encoded bytes
// check that length is in payload bounds
verifyLength(lenbytes, data.length - pos - 1 - lenlen);
return new DecodeResult(pos + 1 + lenlen + lenbytes, copyOfRange(data, pos + 1 + lenlen, pos + 1 + lenlen
+ lenbytes));
} else if (prefix <= OFFSET_LONG_LIST) { // [0xc0, 0xf7]
int len = prefix - OFFSET_SHORT_LIST; // length of the encoded list
int prevPos = pos;
pos++;
return decodeList(data, pos, prevPos, len);
} else if (prefix <= 0xFF) { // [0xf8, 0xff]
int lenlen = prefix - OFFSET_LONG_LIST; // length of length the encoded list
int lenlist = byteArrayToInt(copyOfRange(data, pos + 1, pos + 1 + lenlen)); // length of encoded bytes
pos = pos + lenlen + 1; // start at position of first element in list
int prevPos = lenlist;
return decodeList(data, pos, prevPos, lenlist);
} else {
throw new RuntimeException("Only byte values between 0x00 and 0xFF are supported, but got: " + prefix);
}
}
public static final class LList {
private final byte[] rlp;
private final int[] offsets = new int[32];
private final int[] lens = new int[32];
private int cnt;
public LList(byte[] rlp) {
this.rlp = rlp;
}
public byte[] getEncoded() {
byte encoded[][] = new byte[cnt][];
for (int i = 0; i < cnt; i++) {
encoded[i] = encodeElement(getBytes(i));
}
return encodeList(encoded);
}
public void add(int off, int len, boolean isList) {
offsets[cnt] = off;
lens[cnt] = isList ? (-1 - len) : len;
cnt++;
}
public byte[] getBytes(int idx) {
int len = lens[idx];
len = len < 0 ? (-len - 1) : len;
byte[] ret = new byte[len];
System.arraycopy(rlp, offsets[idx], ret, 0, len);
return ret;
}
public LList getList(int idx) {
return decodeLazyList(rlp, offsets[idx], -lens[idx] - 1);
}
public boolean isList(int idx) {
return lens[idx] < 0;
}
public int size() {
return cnt;
}
}
public static LList decodeLazyList(byte[] data) {
return decodeLazyList(data, 0, data.length).getList(0);
}
public static LList decodeLazyList(byte[] data, int pos, int length) {
if (data == null || data.length < 1) {
return null;
}
LList ret = new LList(data);
int end = pos + length;
while(pos < end) {
int prefix = data[pos] & 0xFF;
if (prefix == OFFSET_SHORT_ITEM) { // 0x80
ret.add(pos, 0, false); // means no length or 0
pos++;
} else if (prefix < OFFSET_SHORT_ITEM) { // [0x00, 0x7f]
ret.add(pos, 1, false); // means no length or 0
pos++;
} else if (prefix <= OFFSET_LONG_ITEM) { // [0x81, 0xb7]
int len = prefix - OFFSET_SHORT_ITEM; // length of the encoded bytes
ret.add(pos + 1, len, false);
pos += len + 1;
} else if (prefix < OFFSET_SHORT_LIST) { // [0xb8, 0xbf]
int lenlen = prefix - OFFSET_LONG_ITEM; // length of length the encoded bytes
int lenbytes = byteArrayToInt(copyOfRange(data, pos + 1, pos + 1 + lenlen)); // length of encoded bytes
// check that length is in payload bounds
verifyLength(lenbytes, data.length - pos - 1 - lenlen);
ret.add(pos + 1 + lenlen, lenbytes, false);
pos += 1 + lenlen + lenbytes;
} else if (prefix <= OFFSET_LONG_LIST) { // [0xc0, 0xf7]
int len = prefix - OFFSET_SHORT_LIST; // length of the encoded list
ret.add(pos + 1, len, true);
pos += 1 + len;
} else if (prefix <= 0xFF) { // [0xf8, 0xff]
int lenlen = prefix - OFFSET_LONG_LIST; // length of length the encoded list
int lenlist = byteArrayToInt(copyOfRange(data, pos + 1, pos + 1 + lenlen)); // length of encoded bytes
// check that length is in payload bounds
verifyLength(lenlist, data.length - pos - 1 - lenlen);
ret.add(pos + 1 + lenlen, lenlist, true);
pos += 1 + lenlen + lenlist; // start at position of first element in list
} else {
throw new RuntimeException("Only byte values between 0x00 and 0xFF are supported, but got: " + prefix);
}
}
return ret;
}
private static DecodeResult decodeList(byte[] data, int pos, int prevPos, int len) {
// check that length is in payload bounds
verifyLength(len, data.length - pos);
List<Object> slice = new ArrayList<>();
for (int i = 0; i < len; ) {
// Get the next item in the data list and append it
DecodeResult result = decode(data, pos);
slice.add(result.getDecoded());
// Increment pos by the amount bytes in the previous read
prevPos = result.getPos();
i += (prevPos - pos);
pos = prevPos;
}
return new DecodeResult(pos, slice.toArray());
}
/* ******************************************************
* ENCODING *
* ******************************************************/
/**
* Turn Object into its RLP encoded equivalent of a byte-array
* Support for String, Integer, BigInteger and Lists of any of these types.
*
* @param input as object or List of objects
* @return byte[] RLP encoded
*/
public static byte[] encode(Object input) {
Value val = new Value(input);
if (val.isList()) {
List<Object> inputArray = val.asList();
if (inputArray.isEmpty()) {
return encodeLength(inputArray.size(), OFFSET_SHORT_LIST);
}
byte[] output = ByteUtil.EMPTY_BYTE_ARRAY;
for (Object object : inputArray) {
output = concatenate(output, encode(object));
}
byte[] prefix = encodeLength(output.length, OFFSET_SHORT_LIST);
return concatenate(prefix, output);
} else {
byte[] inputAsBytes = toBytes(input);
if (inputAsBytes.length == 1 && (inputAsBytes[0] & 0xff) <= 0x80) {
return inputAsBytes;
} else {
byte[] firstByte = encodeLength(inputAsBytes.length, OFFSET_SHORT_ITEM);
return concatenate(firstByte, inputAsBytes);
}
}
}
/**
* Integer limitation goes up to 2^31-1 so length can never be bigger than MAX_ITEM_LENGTH
*/
public static byte[] encodeLength(int length, int offset) {
if (length < SIZE_THRESHOLD) {
byte firstByte = (byte) (length + offset);
return new byte[]{firstByte};
} else if (length < MAX_ITEM_LENGTH) {
byte[] binaryLength;
if (length > 0xFF)
binaryLength = intToBytesNoLeadZeroes(length);
else
binaryLength = new byte[]{(byte) length};
byte firstByte = (byte) (binaryLength.length + offset + SIZE_THRESHOLD - 1);
return concatenate(new byte[]{firstByte}, binaryLength);
} else {
throw new RuntimeException("Input too long");
}
}
public static byte[] encodeByte(byte singleByte) {
if ((singleByte & 0xFF) == 0) {
return new byte[]{(byte) OFFSET_SHORT_ITEM};
} else if ((singleByte & 0xFF) <= 0x7F) {
return new byte[]{singleByte};
} else {
return new byte[]{(byte) (OFFSET_SHORT_ITEM + 1), singleByte};
}
}
public static byte[] encodeShort(short singleShort) {
if ((singleShort & 0xFF) == singleShort)
return encodeByte((byte) singleShort);
else {
return new byte[]{(byte) (OFFSET_SHORT_ITEM + 2),
(byte) (singleShort >> 8 & 0xFF),
(byte) (singleShort >> 0 & 0xFF)};
}
}
public static byte[] encodeInt(int singleInt) {
if ((singleInt & 0xFF) == singleInt)
return encodeByte((byte) singleInt);
else if ((singleInt & 0xFFFF) == singleInt)
return encodeShort((short) singleInt);
else if ((singleInt & 0xFFFFFF) == singleInt)
return new byte[]{(byte) (OFFSET_SHORT_ITEM + 3),
(byte) (singleInt >>> 16),
(byte) (singleInt >>> 8),
(byte) singleInt};
else {
return new byte[]{(byte) (OFFSET_SHORT_ITEM + 4),
(byte) (singleInt >>> 24),
(byte) (singleInt >>> 16),
(byte) (singleInt >>> 8),
(byte) singleInt};
}
}
public static byte[] encodeString(String srcString) {
return encodeElement(srcString.getBytes());
}
public static byte[] encodeBigInteger(BigInteger srcBigInteger) {
if (srcBigInteger.compareTo(BigInteger.ZERO) < 0) throw new RuntimeException("negative numbers are not allowed");
if (srcBigInteger.equals(BigInteger.ZERO))
return encodeByte((byte) 0);
else
return encodeElement(asUnsignedByteArray(srcBigInteger));
}
public static byte[] encodeElement(byte[] srcData) {
// [0x80]
if (isNullOrZeroArray(srcData)) {
return new byte[]{(byte) OFFSET_SHORT_ITEM};
// [0x00]
} else if (isSingleZero(srcData)) {
return srcData;
// [0x01, 0x7f] - single byte, that byte is its own RLP encoding
} else if (srcData.length == 1 && (srcData[0] & 0xFF) < 0x80) {
return srcData;
// [0x80, 0xb7], 0 - 55 bytes
} else if (srcData.length < SIZE_THRESHOLD) {
// length = 8X
byte length = (byte) (OFFSET_SHORT_ITEM + srcData.length);
byte[] data = Arrays.copyOf(srcData, srcData.length + 1);
System.arraycopy(data, 0, data, 1, srcData.length);
data[0] = length;
return data;
// [0xb8, 0xbf], 56+ bytes
} else {
// length of length = BX
// prefix = [BX, [length]]
int tmpLength = srcData.length;
byte lengthOfLength = 0;
while (tmpLength != 0) {
++lengthOfLength;
tmpLength = tmpLength >> 8;
}
// set length Of length at first byte
byte[] data = new byte[1 + lengthOfLength + srcData.length];
data[0] = (byte) (OFFSET_LONG_ITEM + lengthOfLength);
// copy length after first byte
tmpLength = srcData.length;
for (int i = lengthOfLength; i > 0; --i) {
data[i] = (byte) (tmpLength & 0xFF);
tmpLength = tmpLength >> 8;
}
// at last copy the number bytes after its length
System.arraycopy(srcData, 0, data, 1 + lengthOfLength, srcData.length);
return data;
}
}
public static int calcElementPrefixSize(byte[] srcData) {
if (isNullOrZeroArray(srcData))
return 0;
else if (isSingleZero(srcData))
return 0;
else if (srcData.length == 1 && (srcData[0] & 0xFF) < 0x80) {
return 0;
} else if (srcData.length < SIZE_THRESHOLD) {
return 1;
} else {
// length of length = BX
// prefix = [BX, [length]]
int tmpLength = srcData.length;
byte byteNum = 0;
while (tmpLength != 0) {
++byteNum;
tmpLength = tmpLength >> 8;
}
return 1 + byteNum;
}
}
public static byte[] encodeListHeader(int size) {
if (size == 0) {
return new byte[]{(byte) OFFSET_SHORT_LIST};
}
int totalLength = size;
byte[] header;
if (totalLength < SIZE_THRESHOLD) {
header = new byte[1];
header[0] = (byte) (OFFSET_SHORT_LIST + totalLength);
} else {
// length of length = BX
// prefix = [BX, [length]]
int tmpLength = totalLength;
byte byteNum = 0;
while (tmpLength != 0) {
++byteNum;
tmpLength = tmpLength >> 8;
}
tmpLength = totalLength;
byte[] lenBytes = new byte[byteNum];
for (int i = 0; i < byteNum; ++i) {
lenBytes[byteNum - 1 - i] = (byte) ((tmpLength >> (8 * i)) & 0xFF);
}
// first byte = F7 + bytes.length
header = new byte[1 + lenBytes.length];
header[0] = (byte) (OFFSET_LONG_LIST + byteNum);
System.arraycopy(lenBytes, 0, header, 1, lenBytes.length);
}
return header;
}
public static byte[] encodeLongElementHeader(int length) {
if (length < SIZE_THRESHOLD) {
if (length == 0)
return new byte[]{(byte) 0x80};
else
return new byte[]{(byte) (0x80 + length)};
} else {
// length of length = BX
// prefix = [BX, [length]]
int tmpLength = length;
byte byteNum = 0;
while (tmpLength != 0) {
++byteNum;
tmpLength = tmpLength >> 8;
}
byte[] lenBytes = new byte[byteNum];
for (int i = 0; i < byteNum; ++i) {
lenBytes[byteNum - 1 - i] = (byte) ((length >> (8 * i)) & 0xFF);
}
// first byte = F7 + bytes.length
byte[] header = new byte[1 + lenBytes.length];
header[0] = (byte) (OFFSET_LONG_ITEM + byteNum);
System.arraycopy(lenBytes, 0, header, 1, lenBytes.length);
return header;
}
}
public static byte[] encodeSet(Set<ByteArrayWrapper> data) {
int dataLength = 0;
Set<byte[]> encodedElements = new HashSet<>();
for (ByteArrayWrapper element : data) {
byte[] encodedElement = RLP.encodeElement(element.getData());
dataLength += encodedElement.length;
encodedElements.add(encodedElement);
}
byte[] listHeader = encodeListHeader(dataLength);
byte[] output = new byte[listHeader.length + dataLength];
System.arraycopy(listHeader, 0, output, 0, listHeader.length);
int cummStart = listHeader.length;
for (byte[] element : encodedElements) {
System.arraycopy(element, 0, output, cummStart, element.length);
cummStart += element.length;
}
return output;
}
/**
* A handy shortcut for {@link #encodeElement(byte[])} + {@link #encodeList(byte[]...)}
* <p>
* Encodes each data element and wraps them all into a list.
*/
public static byte[] wrapList(byte[] ... data) {
byte[][] elements = new byte[data.length][];
for (int i = 0; i < data.length; i++) {
elements[i] = encodeElement(data[i]);
}
return encodeList(elements);
}
public static byte[] encodeList(byte[]... elements) {
if (elements == null) {
return new byte[]{(byte) OFFSET_SHORT_LIST};
}
int totalLength = 0;
for (byte[] element1 : elements) {
totalLength += element1.length;
}
byte[] data;
int copyPos;
if (totalLength < SIZE_THRESHOLD) {
data = new byte[1 + totalLength];
data[0] = (byte) (OFFSET_SHORT_LIST + totalLength);
copyPos = 1;
} else {
// length of length = BX
// prefix = [BX, [length]]
int tmpLength = totalLength;
byte byteNum = 0;
while (tmpLength != 0) {
++byteNum;
tmpLength = tmpLength >> 8;
}
tmpLength = totalLength;
byte[] lenBytes = new byte[byteNum];
for (int i = 0; i < byteNum; ++i) {
lenBytes[byteNum - 1 - i] = (byte) ((tmpLength >> (8 * i)) & 0xFF);
}
// first byte = F7 + bytes.length
data = new byte[1 + lenBytes.length + totalLength];
data[0] = (byte) (OFFSET_LONG_LIST + byteNum);
System.arraycopy(lenBytes, 0, data, 1, lenBytes.length);
copyPos = lenBytes.length + 1;
}
for (byte[] element : elements) {
System.arraycopy(element, 0, data, copyPos, element.length);
copyPos += element.length;
}
return data;
}
/*
* Utility function to convert Objects into byte arrays
*/
private static byte[] toBytes(Object input) {
if (input instanceof byte[]) {
return (byte[]) input;
} else if (input instanceof String) {
String inputString = (String) input;
return inputString.getBytes();
} else if (input instanceof Long) {
Long inputLong = (Long) input;
return (inputLong == 0) ? ByteUtil.EMPTY_BYTE_ARRAY : asUnsignedByteArray(BigInteger.valueOf(inputLong));
} else if (input instanceof Integer) {
Integer inputInt = (Integer) input;
return (inputInt == 0) ? ByteUtil.EMPTY_BYTE_ARRAY : asUnsignedByteArray(BigInteger.valueOf(inputInt));
} else if (input instanceof BigInteger) {
BigInteger inputBigInt = (BigInteger) input;
return (inputBigInt.equals(BigInteger.ZERO)) ? ByteUtil.EMPTY_BYTE_ARRAY : asUnsignedByteArray(inputBigInt);
} else if (input instanceof Value) {
Value val = (Value) input;
return toBytes(val.asObj());
}
throw new RuntimeException("Unsupported type: Only accepting String, Integer and BigInteger for now");
}
private static byte[] decodeItemBytes(byte[] data, int index) {
final int length = calculateItemLength(data, index);
// [0x80]
if (length == 0) {
return new byte[0];
// [0x00, 0x7f] - single byte with item
} else if ((data[index] & 0xFF) < OFFSET_SHORT_ITEM) {
byte[] valueBytes = new byte[1];
System.arraycopy(data, index, valueBytes, 0, 1);
return valueBytes;
// [0x01, 0xb7] - 1-55 bytes item
} else if ((data[index] & 0xFF) <= OFFSET_LONG_ITEM) {
byte[] valueBytes = new byte[length];
System.arraycopy(data, index+1, valueBytes, 0, length);
return valueBytes;
// [0xb8, 0xbf] - 56+ bytes item
} else if ((data[index] & 0xFF) > OFFSET_LONG_ITEM
&& (data[index] & 0xFF) < OFFSET_SHORT_LIST) {
byte lengthOfLength = (byte) (data[index] - OFFSET_LONG_ITEM);
byte[] valueBytes = new byte[length];
System.arraycopy(data, index + 1 + lengthOfLength, valueBytes, 0, length);
return valueBytes;
} else {
throw new RuntimeException("wrong decode attempt");
}
}
private static int calculateItemLength(byte[] data, int index) {
// [0xb8, 0xbf] - 56+ bytes item
if ((data[index] & 0xFF) > OFFSET_LONG_ITEM
&& (data[index] & 0xFF) < OFFSET_SHORT_LIST) {
byte lengthOfLength = (byte) (data[index] - OFFSET_LONG_ITEM);
return calcLength(lengthOfLength, data, index);
// [0x81, 0xb7] - 0-55 bytes item
} else if ((data[index] & 0xFF) > OFFSET_SHORT_ITEM
&& (data[index] & 0xFF) <= OFFSET_LONG_ITEM) {
return (byte) (data[index] - OFFSET_SHORT_ITEM);
// [0x80] - item = 0 itself
} else if ((data[index] & 0xFF) == OFFSET_SHORT_ITEM) {
return (byte) 0;
// [0x00, 0x7f] - 1 byte item, no separate length representation
} else if ((data[index] & 0xFF) < OFFSET_SHORT_ITEM) {
return (byte) 1;
} else {
throw new RuntimeException("wrong decode attempt");
}
}
}
| 45,922
| 36.154531
| 162
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/DecodeResult.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import org.spongycastle.util.encoders.Hex;
import java.io.Serializable;
@SuppressWarnings("serial")
public class DecodeResult implements Serializable {
private int pos;
private Object decoded;
public DecodeResult(int pos, Object decoded) {
this.pos = pos;
this.decoded = decoded;
}
public int getPos() {
return pos;
}
public Object getDecoded() {
return decoded;
}
public String toString() {
return asString(this.decoded);
}
private String asString(Object decoded) {
if (decoded instanceof String) {
return (String) decoded;
} else if (decoded instanceof byte[]) {
return Hex.toHexString((byte[]) decoded);
} else if (decoded instanceof Object[]) {
StringBuilder result = new StringBuilder();
for (Object item : (Object[]) decoded) {
result.append(asString(item));
}
return result.toString();
}
throw new RuntimeException("Not a valid type. Should not occur");
}
}
| 1,907
| 29.774194
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/ByteArrayMap.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import org.ethereum.db.ByteArrayWrapper;
import java.util.*;
/**
* Created by Anton Nashatyrev on 06.10.2016.
*/
public class ByteArrayMap<V> implements Map<byte[], V> {
private final Map<ByteArrayWrapper, V> delegate;
public ByteArrayMap() {
this(new HashMap<ByteArrayWrapper, V>());
}
public ByteArrayMap(Map<ByteArrayWrapper, V> delegate) {
this.delegate = delegate;
}
@Override
public int size() {
return delegate.size();
}
@Override
public boolean isEmpty() {
return delegate.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return delegate.containsKey(new ByteArrayWrapper((byte[]) key));
}
@Override
public boolean containsValue(Object value) {
return delegate.containsValue(value);
}
@Override
public V get(Object key) {
return delegate.get(new ByteArrayWrapper((byte[]) key));
}
@Override
public V put(byte[] key, V value) {
return delegate.put(new ByteArrayWrapper(key), value);
}
@Override
public V remove(Object key) {
return delegate.remove(new ByteArrayWrapper((byte[]) key));
}
@Override
public void putAll(Map<? extends byte[], ? extends V> m) {
for (Entry<? extends byte[], ? extends V> entry : m.entrySet()) {
delegate.put(new ByteArrayWrapper(entry.getKey()), entry.getValue());
}
}
@Override
public void clear() {
delegate.clear();
}
@Override
public Set<byte[]> keySet() {
return new ByteArraySet(new SetAdapter<>(delegate));
}
@Override
public Collection<V> values() {
return delegate.values();
}
@Override
public Set<Entry<byte[], V>> entrySet() {
return new MapEntrySet(delegate.entrySet());
}
@Override
public boolean equals(Object o) {
return delegate.equals(o);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
@Override
public String toString() {
return delegate.toString();
}
private class MapEntrySet implements Set<Map.Entry<byte[], V>> {
private final Set<Map.Entry<ByteArrayWrapper, V>> delegate;
private MapEntrySet(Set<Entry<ByteArrayWrapper, V>> delegate) {
this.delegate = delegate;
}
@Override
public int size() {
return delegate.size();
}
@Override
public boolean isEmpty() {
return delegate.isEmpty();
}
@Override
public boolean contains(Object o) {
throw new RuntimeException("Not implemented");
}
@Override
public Iterator<Entry<byte[], V>> iterator() {
final Iterator<Entry<ByteArrayWrapper, V>> it = delegate.iterator();
return new Iterator<Entry<byte[], V>>() {
@Override
public boolean hasNext() {
return it.hasNext();
}
@Override
public Entry<byte[], V> next() {
Entry<ByteArrayWrapper, V> next = it.next();
return new AbstractMap.SimpleImmutableEntry(next.getKey().getData(), next.getValue());
}
@Override
public void remove() {
it.remove();
}
};
}
@Override
public Object[] toArray() {
throw new RuntimeException("Not implemented");
}
@Override
public <T> T[] toArray(T[] a) {
throw new RuntimeException("Not implemented");
}
@Override
public boolean add(Entry<byte[], V> vEntry) {
throw new RuntimeException("Not implemented");
}
@Override
public boolean remove(Object o) {
throw new RuntimeException("Not implemented");
}
@Override
public boolean containsAll(Collection<?> c) {
throw new RuntimeException("Not implemented");
}
@Override
public boolean addAll(Collection<? extends Entry<byte[], V>> c) {
throw new RuntimeException("Not implemented");
}
@Override
public boolean retainAll(Collection<?> c) {
throw new RuntimeException("Not implemented");
}
@Override
public boolean removeAll(Collection<?> c) {
throw new RuntimeException("Not implemented");
}
@Override
public void clear() {
throw new RuntimeException("Not implemented");
}
}
}
| 5,500
| 25.574879
| 106
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/TimeUtils.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
/**
* @author Mikhail Kalinin
* @since 10.08.2015
*/
public class TimeUtils {
/**
* Converts minutes to millis
*
* @param minutes time in minutes
* @return corresponding millis value
*/
public static long minutesToMillis(long minutes) {
return minutes * 60 * 1000;
}
/**
* Converts seconds to millis
*
* @param seconds time in seconds
* @return corresponding millis value
*/
public static long secondsToMillis(long seconds) {
return seconds * 1000;
}
/**
* Converts millis to minutes
*
* @param millis time in millis
* @return time in minutes
*/
public static long millisToMinutes(long millis) {
return Math.round(millis / 60.0 / 1000.0);
}
/**
* Converts millis to seconds
*
* @param millis time in millis
* @return time in seconds
*/
public static long millisToSeconds(long millis) {
return Math.round(millis / 1000.0);
}
/**
* Returns timestamp in the future after some millis passed from now
*
* @param millis millis count
* @return future timestamp
*/
public static long timeAfterMillis(long millis) {
return System.currentTimeMillis() + millis;
}
}
| 2,106
| 26.723684
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/ALock.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import java.util.concurrent.locks.Lock;
/**
* AutoClosable Lock wrapper. Use case:
*
* try (ALock l = wLock.lock()) {
* // do smth under lock
* }
*
* Created by Anton Nashatyrev on 27.01.2017.
*/
public final class ALock implements AutoCloseable {
private final Lock lock;
public ALock(Lock l) {
this.lock = l;
}
public final ALock lock() {
this.lock.lock();
return this;
}
public final void close() {
this.lock.unlock();
}
}
| 1,320
| 27.106383
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/FastByteComparisons.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ethereum.util;
import com.google.common.primitives.UnsignedBytes;
/**
* Utility code to do optimized byte-array comparison.
* This is borrowed and slightly modified from Guava's {@link UnsignedBytes}
* class to be able to compare arrays that start at non-zero offsets.
*/
@SuppressWarnings("restriction")
public abstract class FastByteComparisons {
public static boolean equal(byte[] b1, byte[] b2) {
return b1.length == b2.length && compareTo(b1, 0, b1.length, b2, 0, b2.length) == 0;
}
/**
* Lexicographically compare two byte arrays.
*
* @param b1 buffer1
* @param s1 offset1
* @param l1 length1
* @param b2 buffer2
* @param s2 offset2
* @param l2 length2
* @return int
*/
public static int compareTo(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return LexicographicalComparerHolder.BEST_COMPARER.compareTo(
b1, s1, l1, b2, s2, l2);
}
private interface Comparer<T> {
int compareTo(T buffer1, int offset1, int length1,
T buffer2, int offset2, int length2);
}
private static Comparer<byte[]> lexicographicalComparerJavaImpl() {
return LexicographicalComparerHolder.PureJavaComparer.INSTANCE;
}
/**
*
* <p>Uses reflection to gracefully fall back to the Java implementation if
* {@code Unsafe} isn't available.
*/
private static class LexicographicalComparerHolder {
static final String UNSAFE_COMPARER_NAME =
LexicographicalComparerHolder.class.getName() + "$UnsafeComparer";
static final Comparer<byte[]> BEST_COMPARER = getBestComparer();
/**
* Returns the Unsafe-using Comparer, or falls back to the pure-Java
* implementation if unable to do so.
*/
static Comparer<byte[]> getBestComparer() {
try {
Class<?> theClass = Class.forName(UNSAFE_COMPARER_NAME);
// yes, UnsafeComparer does implement Comparer<byte[]>
@SuppressWarnings("unchecked")
Comparer<byte[]> comparer =
(Comparer<byte[]>) theClass.getEnumConstants()[0];
return comparer;
} catch (Throwable t) { // ensure we really catch *everything*
return lexicographicalComparerJavaImpl();
}
}
private enum PureJavaComparer implements Comparer<byte[]> {
INSTANCE;
@Override
public int compareTo(byte[] buffer1, int offset1, int length1,
byte[] buffer2, int offset2, int length2) {
// Short circuit equal case
if (buffer1 == buffer2 &&
offset1 == offset2 &&
length1 == length2) {
return 0;
}
int end1 = offset1 + length1;
int end2 = offset2 + length2;
for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) {
int a = (buffer1[i] & 0xff);
int b = (buffer2[j] & 0xff);
if (a != b) {
return a - b;
}
}
return length1 - length2;
}
}
}
}
| 4,964
| 36.052239
| 92
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/FileUtil.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import java.io.File;
import java.io.IOException;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.List;
public class FileUtil {
public static List<String> recursiveList(String path) throws IOException {
final List<String> files = new ArrayList<>();
Files.walkFileTree(Paths.get(path), new FileVisitor<Path>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
files.add(file.toString());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
});
return files;
}
public static boolean recursiveDelete(String fileName) {
File file = new File(fileName);
if (file.exists()) {
//check if the file is a directory
if (file.isDirectory()) {
if ((file.list()).length > 0) {
for(String s:file.list()){
//call deletion of file individually
recursiveDelete(fileName + System.getProperty("file.separator") + s);
}
}
}
file.setWritable(true);
boolean result = file.delete();
return result;
} else {
return false;
}
}
}
| 2,760
| 33.08642
| 110
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/CompactEncoder.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import static java.util.Arrays.*;
import static org.ethereum.util.ByteUtil.appendByte;
import static org.spongycastle.util.Arrays.concatenate;
import static org.spongycastle.util.encoders.Hex.encode;
/**
* Compact encoding of hex sequence with optional terminator
*
* The traditional compact way of encoding a hex string is to convert it into binary
* - that is, a string like 0f1248 would become three bytes 15, 18, 72. However,
* this approach has one slight problem: what if the length of the hex string is odd?
* In that case, there is no way to distinguish between, say, 0f1248 and f1248.
*
* Additionally, our application in the Merkle Patricia tree requires the additional feature
* that a hex string can also have a special "terminator symbol" at the end (denoted by the 'T').
* A terminator symbol can occur only once, and only at the end.
*
* An alternative way of thinking about this to not think of there being a terminator symbol,
* but instead treat bit specifying the existence of the terminator symbol as a bit specifying
* that the given node encodes a final node, where the value is an actual value, rather than
* the hash of yet another node.
*
* To solve both of these issues, we force the first nibble of the final byte-stream to encode
* two flags, specifying oddness of length (ignoring the 'T' symbol) and terminator status;
* these are placed, respectively, into the two lowest significant bits of the first nibble.
* In the case of an even-length hex string, we must introduce a second nibble (of value zero)
* to ensure the hex-string is even in length and thus is representable by a whole number of bytes.
*
* Examples:
* > [ 1, 2, 3, 4, 5 ]
* '\x11\x23\x45'
* > [ 0, 1, 2, 3, 4, 5 ]
* '\x00\x01\x23\x45'
* > [ 0, 15, 1, 12, 11, 8, T ]
* '\x20\x0f\x1c\xb8'
* > [ 15, 1, 12, 11, 8, T ]
* '\x3f\x1c\xb8'
*/
public class CompactEncoder {
private final static byte TERMINATOR = 16;
private final static Map<Character, Byte> hexMap = new HashMap<>();
static {
hexMap.put('0', (byte) 0x0);
hexMap.put('1', (byte) 0x1);
hexMap.put('2', (byte) 0x2);
hexMap.put('3', (byte) 0x3);
hexMap.put('4', (byte) 0x4);
hexMap.put('5', (byte) 0x5);
hexMap.put('6', (byte) 0x6);
hexMap.put('7', (byte) 0x7);
hexMap.put('8', (byte) 0x8);
hexMap.put('9', (byte) 0x9);
hexMap.put('a', (byte) 0xa);
hexMap.put('b', (byte) 0xb);
hexMap.put('c', (byte) 0xc);
hexMap.put('d', (byte) 0xd);
hexMap.put('e', (byte) 0xe);
hexMap.put('f', (byte) 0xf);
}
/**
* Pack nibbles to binary
*
* @param nibbles sequence. may have a terminator
* @return hex-encoded byte array
*/
public static byte[] packNibbles(byte[] nibbles) {
int terminator = 0;
if (nibbles[nibbles.length - 1] == TERMINATOR) {
terminator = 1;
nibbles = copyOf(nibbles, nibbles.length - 1);
}
int oddlen = nibbles.length % 2;
int flag = 2 * terminator + oddlen;
if (oddlen != 0) {
byte[] flags = new byte[]{(byte) flag};
nibbles = concatenate(flags, nibbles);
} else {
byte[] flags = new byte[]{(byte) flag, 0};
nibbles = concatenate(flags, nibbles);
}
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
for (int i = 0; i < nibbles.length; i += 2) {
buffer.write(16 * nibbles[i] + nibbles[i + 1]);
}
return buffer.toByteArray();
}
public static boolean hasTerminator(byte[] packedKey) {
return ((packedKey[0] >> 4) & 2) != 0;
}
/**
* Unpack a binary string to its nibbles equivalent
*
* @param str of binary data
* @return array of nibbles in byte-format
*/
public static byte[] unpackToNibbles(byte[] str) {
byte[] base = binToNibbles(str);
base = copyOf(base, base.length - 1);
if (base[0] >= 2) {
base = appendByte(base, TERMINATOR);
}
if (base[0] % 2 == 1) {
base = copyOfRange(base, 1, base.length);
} else {
base = copyOfRange(base, 2, base.length);
}
return base;
}
/**
* Transforms a binary array to hexadecimal format + terminator
*
* @param str byte[]
* @return array with each individual nibble adding a terminator at the end
*/
public static byte[] binToNibbles(byte[] str) {
byte[] hexEncoded = encode(str);
byte[] hexEncodedTerminated = Arrays.copyOf(hexEncoded, hexEncoded.length + 1);
for (int i = 0; i < hexEncoded.length; ++i){
byte b = hexEncodedTerminated[i];
hexEncodedTerminated[i] = hexMap.get((char) b);
}
hexEncodedTerminated[hexEncodedTerminated.length - 1] = TERMINATOR;
return hexEncodedTerminated;
}
public static byte[] binToNibblesNoTerminator(byte[] str) {
byte[] hexEncoded = encode(str);
for (int i = 0; i < hexEncoded.length; ++i){
byte b = hexEncoded[i];
hexEncoded[i] = hexMap.get((char) b);
}
return hexEncoded;
}
}
| 6,227
| 34.386364
| 99
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/AdvancedDeviceUtils.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.util.ContextInitializer;
import ch.qos.logback.core.joran.spi.JoranException;
import org.ethereum.config.SystemProperties;
import org.slf4j.LoggerFactory;
import java.net.URL;
/**
* @author Roman Mandeleil
* @since 25.07.2014
*/
public class AdvancedDeviceUtils {
public static void adjustDetailedTracing(SystemProperties config, long blockNum) {
// here we can turn on the detail tracing in the middle of the chain
if (blockNum >= config.traceStartBlock() && config.traceStartBlock() != -1) {
final URL configFile = ClassLoader.getSystemResource("logback-detailed.xml");
final LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
final ContextInitializer ci = new ContextInitializer(loggerContext);
loggerContext.reset();
try {
ci.configureByResource(configFile);
} catch (Exception e) {
System.out.println("Error applying new config " + e.getMessage());
}
}
}
}
| 1,940
| 37.82
| 98
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/RLPItem.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util;
/**
* @author Roman Mandeleil
* @since 21.04.14
*/
public class RLPItem implements RLPElement {
private final byte[] rlpData;
public RLPItem(byte[] rlpData) {
this.rlpData = rlpData;
}
public byte[] getRLPData() {
if (rlpData.length == 0)
return null;
return rlpData;
}
}
| 1,152
| 29.342105
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/EtherUtil.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
import java.math.BigInteger;
/**
* Created by Anton Nashatyrev on 22.06.2016.
*/
public class EtherUtil {
public enum Unit {
WEI(BigInteger.valueOf(1)),
GWEI(BigInteger.valueOf(1_000_000_000)),
SZABO(BigInteger.valueOf(1_000_000_000_000L)),
FINNEY(BigInteger.valueOf(1_000_000_000_000_000L)),
ETHER(BigInteger.valueOf(1_000_000_000_000_000_000L));
BigInteger i;
Unit(BigInteger i) {
this.i = i;
}
}
public static BigInteger convert(long amount, Unit unit) {
return BigInteger.valueOf(amount).multiply(unit.i);
}
}
| 1,448
| 32.697674
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/SolidityStorage.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
/**
* The class is intended for accessing contract storage with respect to
* Solidity members layout.
* E.g. an individual member of a structure or mapping element can be accessed via
* names or/and indexes
*
* Created by Anton Nashatyrev on 24.03.2016.
*/
public interface SolidityStorage extends ContractStorage {
// TODO: to be implemented
}
| 1,185
| 37.258065
| 82
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/SolidityContract.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
import org.ethereum.core.Block;
import java.math.BigInteger;
/**
* Interface to Ethereum contract compiled with Solidity with
* respect to language function signatures encoding and
* storage layout
*
* Below is Java <=> Solidity types mapping:
*
* Input arguments Java -> Solidity mapping is the following:
* Number, BigInteger, String (hex) -> any integer type
* byte[], String (hex) -> bytesN, byte[]
* String -> string
* Java array of the above types -> Solidity dynamic array of the corresponding type
*
* Output arguments Solidity -> Java mapping:
* any integer type -> BigInteger
* string -> String
* bytesN, byte[] -> byte[]
* Solidity dynamic array -> Java array
*
* Created by Anton Nashatyrev on 23.03.2016.
*/
public interface SolidityContract extends Contract {
/**
* Submits the transaction which invokes the specified contract function
* with corresponding arguments
*
* TODO: either return pending transaction execution result
* or return Future which is available upon block including trnasaction
* or combine both approaches
*/
SolidityCallResult callFunction(String functionName, Object ... args);
/**
* Submits the transaction which invokes the specified contract function
* with corresponding arguments and sends the specified value to the contract
*/
default SolidityCallResult callFunction(long value, String functionName, Object ... args) {
return callFunction(BigInteger.valueOf(value), functionName, args);
}
/**
* Submits the transaction which invokes the specified contract function
* with corresponding arguments and sends the specified value to the contract
*/
SolidityCallResult callFunction(BigInteger value, String functionName, Object ... args);
/**
* Call the function without submitting a transaction and without
* modifying the contract state.
* Synchronously returns function execution result
* (see output argument mapping in class doc)
*/
Object[] callConstFunction(String functionName, Object ... args);
/**
* Call the function without submitting a transaction and without
* modifying the contract state. The function is executed with the
* contract state actual after including the specified block.
*
* Synchronously returns function execution result
* (see output argument mapping in class doc)
*/
Object[] callConstFunction(Block callBlock, String functionName, Object... args);
/**
* Gets the contract function. This object can be passed as a call argument for another
* function with a function type parameter
*/
SolidityFunction getFunction(String name);
/**
* Returns the Solidity JSON ABI (Application Binary Interface)
*/
String getABI();
}
| 3,692
| 35.93
| 95
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/StandaloneBlockchain.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
import org.ethereum.config.BlockchainConfig;
import org.ethereum.config.BlockchainNetConfig;
import org.ethereum.config.SystemProperties;
import org.ethereum.config.blockchain.ByzantiumConfig;
import org.ethereum.config.blockchain.DaoHFConfig;
import org.ethereum.config.blockchain.DaoNoHFConfig;
import org.ethereum.config.blockchain.FrontierConfig;
import org.ethereum.config.blockchain.HomesteadConfig;
import org.ethereum.config.blockchain.PetersburgConfig;
import org.ethereum.core.*;
import org.ethereum.core.genesis.GenesisLoader;
import org.ethereum.crypto.ECKey;
import org.ethereum.datasource.*;
import org.ethereum.datasource.inmem.HashMapDB;
import org.ethereum.db.PruneManager;
import org.ethereum.db.RepositoryRoot;
import org.ethereum.db.ByteArrayWrapper;
import org.ethereum.db.IndexedBlockStore;
import org.ethereum.listener.CompositeEthereumListener;
import org.ethereum.listener.EthereumListener;
import org.ethereum.listener.EthereumListenerAdapter;
import org.ethereum.mine.Ethash;
import org.ethereum.solidity.compiler.CompilationResult;
import org.ethereum.solidity.compiler.CompilationResult.ContractMetadata;
import org.ethereum.solidity.compiler.SolidityCompiler;
import org.ethereum.sync.SyncManager;
import org.ethereum.util.ByteUtil;
import org.ethereum.util.FastByteComparisons;
import org.ethereum.validator.DependentBlockHeaderRuleAdapter;
import org.ethereum.vm.DataWord;
import org.ethereum.vm.LogInfo;
import org.ethereum.vm.hook.VMHook;
import org.ethereum.vm.program.invoke.ProgramInvokeFactoryImpl;
import org.iq80.leveldb.DBException;
import org.spongycastle.util.encoders.Hex;
import java.io.IOException;
import java.math.BigInteger;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutionException;
import static org.ethereum.util.ByteUtil.wrap;
/**
* Created by Anton Nashatyrev on 23.03.2016.
*/
public class StandaloneBlockchain implements LocalBlockchain {
Genesis genesis;
byte[] coinbase;
BlockchainImpl blockchain;
PendingStateImpl pendingState;
CompositeEthereumListener listener;
ECKey txSender;
long gasPrice;
long gasLimit;
boolean autoBlock;
long dbDelay = 0;
long totalDbHits = 0;
BlockchainNetConfig netConfig;
int blockGasIncreasePercent = 0;
long time = 0;
long timeIncrement = 13;
private HashMapDB<byte[]> stateDS;
JournalSource<byte[]> pruningStateDS;
PruneManager pruneManager;
private BlockSummary lastSummary;
private VMHook vmHook = VMHook.EMPTY;
class PendingTx {
ECKey sender;
byte[] toAddress;
BigInteger value;
byte[] data;
SolidityContractImpl createdContract;
SolidityContractImpl targetContract;
Transaction customTx;
TransactionResult txResult = new TransactionResult();
public PendingTx(byte[] toAddress, BigInteger value, byte[] data) {
this.sender = txSender;
this.toAddress = toAddress;
this.value = value;
this.data = data;
}
public PendingTx(byte[] toAddress, BigInteger value, byte[] data,
SolidityContractImpl createdContract, SolidityContractImpl targetContract, TransactionResult res) {
this.sender = txSender;
this.toAddress = toAddress;
this.value = value;
this.data = data;
this.createdContract = createdContract;
this.targetContract = targetContract;
this.txResult = res;
}
public PendingTx(Transaction customTx) {
this.customTx = customTx;
}
}
List<PendingTx> submittedTxes = new CopyOnWriteArrayList<>();
public StandaloneBlockchain() {
Genesis genesis = GenesisLoader.loadGenesis(
getClass().getResourceAsStream("/genesis/genesis-light-sb.json"));
withGenesis(genesis);
withGasPrice(50_000_000_000L);
withGasLimit(5_000_000L);
withMinerCoinbase(Hex.decode("ffffffffffffffffffffffffffffffffffffffff"));
setSender(ECKey.fromPrivate(Hex.decode("3ec771c31cac8c0dba77a69e503765701d3c2bb62435888d4ffa38fed60c445c")));
// withAccountBalance(txSender.getAddress(), new BigInteger("100000000000000000000000000"));
}
public StandaloneBlockchain withGenesis(Genesis genesis) {
this.genesis = genesis;
return this;
}
public StandaloneBlockchain withMinerCoinbase(byte[] coinbase) {
this.coinbase = coinbase;
return this;
}
public StandaloneBlockchain withNetConfig(BlockchainNetConfig netConfig) {
this.netConfig = netConfig;
return this;
}
public StandaloneBlockchain withAccountBalance(byte[] address, BigInteger weis) {
AccountState state = new AccountState(BigInteger.ZERO, weis);
genesis.addPremine(wrap(address), state);
genesis.setStateRoot(GenesisLoader.generateRootHash(genesis.getPremine()));
return this;
}
public StandaloneBlockchain withGasPrice(long gasPrice) {
this.gasPrice = gasPrice;
return this;
}
public StandaloneBlockchain withGasLimit(long gasLimit) {
this.gasLimit = gasLimit;
return this;
}
public StandaloneBlockchain withAutoblock(boolean autoblock) {
this.autoBlock = autoblock;
return this;
}
public StandaloneBlockchain withCurrentTime(Date date) {
this.time = date.getTime() / 1000;
return this;
}
/**
* [-100, 100]
* 0 - the same block gas limit as parent
* 100 - max available increase from parent gas limit
* -100 - max available decrease from parent gas limit
*/
public StandaloneBlockchain withBlockGasIncrease(int blockGasIncreasePercent) {
this.blockGasIncreasePercent = blockGasIncreasePercent;
return this;
}
public StandaloneBlockchain withDbDelay(long dbDelay) {
this.dbDelay = dbDelay;
return this;
}
public StandaloneBlockchain withVmHook(VMHook vmHook) {
this.vmHook = vmHook;
return this;
}
private Map<PendingTx, Transaction> createTransactions(Block parent) {
Map<PendingTx, Transaction> txes = new LinkedHashMap<>();
Map<ByteArrayWrapper, Long> nonces = new HashMap<>();
Repository repoSnapshot = getBlockchain().getRepository().getSnapshotTo(parent.getStateRoot());
for (PendingTx tx : submittedTxes) {
Transaction transaction;
if (tx.customTx == null) {
ByteArrayWrapper senderW = new ByteArrayWrapper(tx.sender.getAddress());
Long nonce = nonces.get(senderW);
if (nonce == null) {
BigInteger bcNonce = repoSnapshot.getNonce(tx.sender.getAddress());
nonce = bcNonce.longValue();
}
nonces.put(senderW, nonce + 1);
byte[] toAddress = tx.targetContract != null ? tx.targetContract.getAddress() : tx.toAddress;
transaction = createTransaction(tx.sender, nonce, toAddress, tx.value, tx.data);
if (tx.createdContract != null) {
tx.createdContract.setAddress(transaction.getContractAddress());
}
} else {
transaction = tx.customTx;
}
txes.put(tx, transaction);
}
return txes;
}
public PendingStateImpl getPendingState() {
return pendingState;
}
public void generatePendingTransactions() {
pendingState.addPendingTransactions(new ArrayList<>(createTransactions(getBlockchain().getBestBlock()).values()));
}
@Override
public Block createBlock() {
return createForkBlock(getBlockchain().getBestBlock());
}
@Override
public Block createForkBlock(Block parent) {
try {
Map<PendingTx, Transaction> txes = createTransactions(parent);
time += timeIncrement;
Block b = getBlockchain().createNewBlock(parent, new ArrayList<>(txes.values()), Collections.EMPTY_LIST, time);
int GAS_LIMIT_BOUND_DIVISOR = SystemProperties.getDefault().getBlockchainConfig().
getCommonConstants().getGAS_LIMIT_BOUND_DIVISOR();
BigInteger newGas = ByteUtil.bytesToBigInteger(parent.getGasLimit())
.multiply(BigInteger.valueOf(GAS_LIMIT_BOUND_DIVISOR * 100 + blockGasIncreasePercent))
.divide(BigInteger.valueOf(GAS_LIMIT_BOUND_DIVISOR * 100));
b.getHeader().setGasLimit(ByteUtil.bigIntegerToBytes(newGas));
Ethash.getForBlock(SystemProperties.getDefault(), b.getNumber()).mineLight(b).get();
ImportResult importResult = getBlockchain().tryToConnect(b);
if (importResult != ImportResult.IMPORTED_BEST && importResult != ImportResult.IMPORTED_NOT_BEST) {
throw new RuntimeException("Invalid block import result " + importResult + " for block " + b);
}
List<PendingTx> pendingTxes = new ArrayList<>(txes.keySet());
for (int i = 0; i < lastSummary.getReceipts().size(); i++) {
pendingTxes.get(i).txResult.receipt = lastSummary.getReceipts().get(i);
pendingTxes.get(i).txResult.executionSummary = getTxSummary(lastSummary, i);
}
submittedTxes.clear();
return b;
} catch (InterruptedException|ExecutionException e) {
throw new RuntimeException(e);
}
}
private TransactionExecutionSummary getTxSummary(BlockSummary bs, int idx) {
TransactionReceipt txReceipt = bs.getReceipts().get(idx);
for (TransactionExecutionSummary summary : bs.getSummaries()) {
if (FastByteComparisons.equal(txReceipt.getTransaction().getHash(), summary.getTransaction().getHash())) {
return summary;
}
}
return null;
}
public Transaction createTransaction(long nonce, byte[] toAddress, long value, byte[] data) {
return createTransaction(getSender(), nonce, toAddress, BigInteger.valueOf(value), data);
}
public Transaction createTransaction(ECKey sender, long nonce, byte[] toAddress, BigInteger value, byte[] data) {
Transaction transaction = new Transaction(ByteUtil.longToBytesNoLeadZeroes(nonce),
ByteUtil.longToBytesNoLeadZeroes(gasPrice),
ByteUtil.longToBytesNoLeadZeroes(gasLimit),
toAddress, ByteUtil.bigIntegerToBytes(value),
data,
null);
transaction.sign(sender);
return transaction;
}
public void resetSubmittedTransactions() {
submittedTxes.clear();
}
@Override
public void setSender(ECKey senderPrivateKey) {
txSender = senderPrivateKey;
// if (!getBlockchain().getRepository().isExist(senderPrivateKey.getAddress())) {
// Repository repository = getBlockchain().getRepository();
// Repository track = repository.startTracking();
// track.createAccount(senderPrivateKey.getAddress());
// track.commit();
// }
}
public ECKey getSender() {
return txSender;
}
@Override
public void sendEther(byte[] toAddress, BigInteger weis) {
submitNewTx(new PendingTx(toAddress, weis, new byte[0]));
}
public void submitTransaction(Transaction tx) {
submitNewTx(new PendingTx(tx));
}
@Override
public SolidityContract submitNewContract(String soliditySrc, Object... constructorArgs) {
return submitNewContract(soliditySrc, null, constructorArgs);
}
@Override
public SolidityContract submitNewContract(String soliditySrc, String contractName, Object... constructorArgs) {
SolidityContractImpl contract = createContract(soliditySrc, contractName);
return submitNewContract(contract, constructorArgs);
}
@Override
public SolidityContract submitNewContractFromJson(String json, Object... constructorArgs) {
return submitNewContractFromJson(json, null, constructorArgs);
}
@Override
public SolidityContract submitNewContractFromJson(String json, String contractName, Object... constructorArgs) {
SolidityContractImpl contract;
try {
contract = createContractFromJson(contractName, json);
return submitNewContract(contract, constructorArgs);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public SolidityContract submitNewContract(ContractMetadata contractMetaData, Object... constructorArgs) {
SolidityContractImpl contract = createContract(contractMetaData);
return submitNewContract(contract, constructorArgs);
}
private SolidityContract submitNewContract(SolidityContractImpl contract, Object... constructorArgs) {
CallTransaction.Function constructor = contract.contract.getConstructor();
if (constructor == null && constructorArgs.length > 0) {
throw new RuntimeException("No constructor with params found");
}
byte[] argsEncoded = constructor == null ? new byte[0] : constructor.encodeArguments(constructorArgs);
submitNewTx(new PendingTx(new byte[0], BigInteger.ZERO,
ByteUtil.merge(Hex.decode(contract.getBinary()), argsEncoded), contract, null,
new TransactionResult()));
return contract;
}
private SolidityContractImpl createContract(String soliditySrc, String contractName) {
try {
SolidityCompiler.Result compileRes = SolidityCompiler.compile(soliditySrc.getBytes(), true, SolidityCompiler.Options.ABI, SolidityCompiler.Options.BIN);
if (compileRes.isFailed()) throw new RuntimeException("Compile result: " + compileRes.errors);
return createContractFromJson(contractName, compileRes.output);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private SolidityContractImpl createContractFromJson(String contractName, String json) throws IOException {
CompilationResult result = CompilationResult.parse(json);
if (contractName == null) {
contractName = result.getContractName();
}
return createContract(contractName, result);
}
/**
* @param contractName
* @param result
* @return
*/
private SolidityContractImpl createContract(String contractName, CompilationResult result) {
ContractMetadata cMetaData = result.getContract(contractName);
SolidityContractImpl contract = new SolidityContractImpl(cMetaData);
for (CompilationResult.ContractMetadata metadata : result.getContracts()) {
contract.addRelatedContract(metadata.abi);
}
return contract;
}
private SolidityContractImpl createContract(ContractMetadata contractData) {
SolidityContractImpl contract = new SolidityContractImpl(contractData);
contract.addRelatedContract(contractData.abi);
return contract;
}
@Override
public SolidityContract createExistingContractFromSrc(String soliditySrc, String contractName, byte[] contractAddress) {
SolidityContractImpl contract = createContract(soliditySrc, contractName);
contract.setAddress(contractAddress);
return contract;
}
@Override
public SolidityContract createExistingContractFromSrc(String soliditySrc, byte[] contractAddress) {
return createExistingContractFromSrc(soliditySrc, null, contractAddress);
}
@Override
public SolidityContract createExistingContractFromABI(String ABI, byte[] contractAddress) {
SolidityContractImpl contract = new SolidityContractImpl(ABI);
contract.setAddress(contractAddress);
return contract;
}
@Override
public BlockchainImpl getBlockchain() {
if (blockchain == null) {
blockchain = createBlockchain(genesis);
blockchain.setMinerCoinbase(coinbase);
addEthereumListener(new EthereumListenerAdapter() {
@Override
public void onBlock(BlockSummary blockSummary) {
lastSummary = blockSummary;
}
});
}
return blockchain;
}
public void addEthereumListener(EthereumListener listener) {
getBlockchain();
this.listener.addListener(listener);
}
private void submitNewTx(PendingTx tx) {
getBlockchain();
submittedTxes.add(tx);
if (autoBlock) {
createBlock();
}
}
public HashMapDB<byte[]> getStateDS() {
return stateDS;
}
public Source<byte[], byte[]> getPruningStateDS() {
return pruningStateDS;
}
public long getTotalDbHits() {
return totalDbHits;
}
private BlockchainImpl createBlockchain(Genesis genesis) {
SystemProperties.getDefault().setBlockchainConfig(netConfig != null ? netConfig : getEasyMiningConfig());
IndexedBlockStore blockStore = new IndexedBlockStore();
blockStore.init(new HashMapDB<byte[]>(), new HashMapDB<byte[]>());
stateDS = new HashMapDB<>();
pruningStateDS = new JournalSource<>(stateDS);
pruneManager = new PruneManager(blockStore, pruningStateDS,
stateDS, SystemProperties.getDefault().databasePruneDepth());
final RepositoryRoot repository = new RepositoryRoot(pruningStateDS);
ProgramInvokeFactoryImpl programInvokeFactory = new ProgramInvokeFactoryImpl();
listener = new CompositeEthereumListener();
BlockchainImpl blockchain = new BlockchainImpl(blockStore, repository)
.withEthereumListener(listener)
.withSyncManager(new SyncManager())
.withVmHook(vmHook);
blockchain.setParentHeaderValidator(new DependentBlockHeaderRuleAdapter());
blockchain.setProgramInvokeFactory(programInvokeFactory);
blockchain.setPruneManager(pruneManager);
blockchain.byTest = true;
pendingState = new PendingStateImpl(listener);
pendingState.setBlockchain(blockchain);
blockchain.setPendingState(pendingState);
Genesis.populateRepository(repository, genesis);
repository.commit();
blockStore.saveBlock(genesis, genesis.getDifficultyBI(), true);
blockchain.setBestBlock(genesis);
blockchain.setTotalDifficulty(genesis.getDifficultyBI());
pruneManager.blockCommitted(genesis.getHeader());
return blockchain;
}
public class SolidityFunctionImpl implements SolidityFunction {
SolidityContractImpl contract;
CallTransaction.Function abi;
public SolidityFunctionImpl(SolidityContractImpl contract, CallTransaction.Function abi) {
this.contract = contract;
this.abi = abi;
}
@Override
public SolidityContract getContract() {
return contract;
}
@Override
public CallTransaction.Function getInterface() {
return abi;
}
}
public class SolidityContractImpl implements SolidityContract {
byte[] address;
public CompilationResult.ContractMetadata compiled;
public CallTransaction.Contract contract;
public List<CallTransaction.Contract> relatedContracts = new ArrayList<>();
public SolidityContractImpl(String abi) {
contract = new CallTransaction.Contract(abi);
}
public SolidityContractImpl(CompilationResult.ContractMetadata result) {
this(result.abi);
compiled = result;
}
public void addRelatedContract(String abi) {
CallTransaction.Contract c = new CallTransaction.Contract(abi);
relatedContracts.add(c);
}
void setAddress(byte[] address) {
this.address = address;
}
@Override
public byte[] getAddress() {
if (address == null) {
throw new RuntimeException("Contract address will be assigned only after block inclusion. Call createBlock() first.");
}
return address;
}
@Override
public SolidityCallResult callFunction(String functionName, Object... args) {
return callFunction(0, functionName, args);
}
@Override
public SolidityCallResult callFunction(BigInteger value, String functionName, Object... args) {
CallTransaction.Function function = contract.getByName(functionName);
byte[] data = function.encode(convertArgs(args));
SolidityCallResult res = new SolidityCallResultImpl(this, function);
submitNewTx(new PendingTx(null, value, data, null, this, res));
return res;
}
@Override
public Object[] callConstFunction(String functionName, Object... args) {
return callConstFunction(getBlockchain().getBestBlock(), functionName, args);
}
@Override
public Object[] callConstFunction(Block callBlock, String functionName, Object... args) {
CallTransaction.Function func = contract.getByName(functionName);
if (func == null) throw new RuntimeException("No function with name '" + functionName + "'");
Transaction tx = CallTransaction.createCallTransaction(0, 0, 100000000000000L,
Hex.toHexString(getAddress()), 0, func, convertArgs(args));
tx.sign(ECKey.DUMMY);
Repository repository = getBlockchain().getRepository().getSnapshotTo(callBlock.getStateRoot()).startTracking();
try {
org.ethereum.core.TransactionExecutor executor = new org.ethereum.core.TransactionExecutor
(tx, callBlock.getCoinbase(), repository, getBlockchain().getBlockStore(),
getBlockchain().getProgramInvokeFactory(), callBlock)
.setLocalCall(true);
executor.init();
executor.execute();
executor.go();
executor.finalization();
return func.decodeResult(executor.getResult().getHReturn());
} finally {
repository.rollback();
}
}
private Object[] convertArgs(Object[] args) {
Object[] ret = new Object[args.length];
for (int i = 0; i < args.length; i++) {
if (args[i] instanceof SolidityFunction) {
SolidityFunction f = (SolidityFunction) args[i];
ret[i] = ByteUtil.merge(f.getContract().getAddress(), f.getInterface().encodeSignature());
} else {
ret[i] = args[i];
}
}
return ret;
}
@Override
public SolidityStorage getStorage() {
return new SolidityStorageImpl(getAddress());
}
@Override
public String getABI() {
return compiled.abi;
}
@Override
public String getBinary() {
return compiled.bin;
}
@Override
public void call(byte[] callData) {
// for this we need cleaner separation of EasyBlockchain to
// Abstract and Solidity specific
throw new UnsupportedOperationException();
}
@Override
public SolidityFunction getFunction(String name) {
return new SolidityFunctionImpl(this, contract.getByName(name));
}
}
public class SolidityCallResultImpl extends SolidityCallResult {
SolidityContractImpl contract;
CallTransaction.Function function;
SolidityCallResultImpl(SolidityContractImpl contract, CallTransaction.Function function) {
this.contract = contract;
this.function = function;
}
@Override
public CallTransaction.Function getFunction() {
return function;
}
public List<CallTransaction.Invocation> getEvents() {
List<CallTransaction.Invocation> ret = new ArrayList<>();
for (LogInfo logInfo : getReceipt().getLogInfoList()) {
for (CallTransaction.Contract c : contract.relatedContracts) {
CallTransaction.Invocation event = c.parseEvent(logInfo);
if (event != null) {
ret.add(event);
break;
}
}
}
return ret;
}
@Override
public String toString() {
String ret = "SolidityCallResult{" +
function + ": " +
(isIncluded() ? "EXECUTED" : "PENDING") + ", ";
if (isIncluded()) {
ret += isSuccessful() ? "SUCCESS" : ("ERR (" + getReceipt().getError() + ")");
ret += ", ";
if (isSuccessful()) {
ret += "Ret: " + Arrays.toString(getReturnValues()) + ", ";
ret += "Events: " + getEvents() + ", ";
}
}
return ret + "}";
}
}
class SolidityStorageImpl implements SolidityStorage {
byte[] contractAddr;
public SolidityStorageImpl(byte[] contractAddr) {
this.contractAddr = contractAddr;
}
@Override
public byte[] getStorageSlot(long slot) {
return getStorageSlot(DataWord.of(slot).getData());
}
@Override
public byte[] getStorageSlot(byte[] slot) {
DataWord ret = getBlockchain().getRepository().getContractDetails(contractAddr).get(DataWord.of(slot));
return ret.getData();
}
}
class SlowHashMapDB extends HashMapDB<byte[]> {
private void sleep(int cnt) {
totalDbHits += cnt;
if (dbDelay == 0) return;
try {
Thread.sleep(dbDelay * cnt);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
@Override
public synchronized void delete(byte[] arg0) throws DBException {
super.delete(arg0);
sleep(1);
}
@Override
public synchronized byte[] get(byte[] arg0) throws DBException {
sleep(1);
return super.get(arg0);
}
@Override
public synchronized void put(byte[] key, byte[] value) throws DBException {
sleep(1);
super.put(key, value);
}
@Override
public synchronized void updateBatch(Map<byte[], byte[]> rows) {
sleep(rows.size() / 2);
super.updateBatch(rows);
}
}
// Override blockchain net config for fast mining
public static PetersburgConfig getEasyMiningConfig() {
return new PetersburgConfig(new DaoNoHFConfig(new HomesteadConfig(new HomesteadConfig.HomesteadConstants() {
@Override
public BigInteger getMINIMUM_DIFFICULTY() {
return BigInteger.ONE;
}
}), 0));
}
}
| 28,011
| 35.005141
| 164
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/Contract.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
/**
* Abstract Ethereum contract
*
* Created by Anton Nashatyrev on 01.04.2016.
*/
public interface Contract {
/**
* Address of the contract. If the contract creation transaction is
* still in pending state (not included to a block) the address can be missed
*/
byte[] getAddress();
/**
* Submits contract invocation transaction
*/
void call(byte[] callData);
/**
* Returns the interface for accessing contract storage
*/
ContractStorage getStorage();
/**
* Returns the contract code binary Hex encoded
*/
String getBinary();
}
| 1,442
| 29.0625
| 81
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/EasyBlockchain.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
import org.ethereum.core.Blockchain;
import org.ethereum.crypto.ECKey;
import org.ethereum.solidity.compiler.CompilationResult.ContractMetadata;
import java.math.BigInteger;
/**
* Interface for easy blockchain interaction
*
* Created by Anton Nashatyrev on 23.03.2016.
*/
public interface EasyBlockchain {
/**
* Set the current sender key which all transactions (value transfer or
* contract creation/invocation) will be signed with
* The sender should have enough balance value
*/
void setSender(ECKey senderPrivateKey);
/**
* Sends the value from the current sender to the specified recipient address
*/
void sendEther(byte[] toAddress, BigInteger weis);
/**
* Creates and sends the transaction with the Solidity contract creation code
* If the soliditySrc has more than one contract the {@link #submitNewContract(String, String, Object[])}
* method should be used. This method will generate exception in this case
*/
SolidityContract submitNewContract(String soliditySrc, Object... constructorArgs);
/**
* Creates and sends the transaction with the Solidity contract creation code
* The contract name is specified when the soliditySrc has more than one contract
*/
SolidityContract submitNewContract(String soliditySrc, String contractName, Object... constructorArgs);
/**
* Creates and sends the transaction with the Solidity contract creation code from a compiled json.
* If the soliditySrc has more than one contract the {@link #submitNewContract(String, String, Object[])}
* method should be used. This method will generate exception in this case
*/
SolidityContract submitNewContractFromJson(String json, Object... constructorArgs);
/**
* Creates and sends the transaction with the Solidity contract creation code from a compiled json.
* The contract name is specified when the soliditySrc has more than one contract
*/
SolidityContract submitNewContractFromJson(String json, String contractName, Object... constructorArgs);
/**
* Creates and sends the transaction with the Solidity contract creation code from the contractMetaData.
*/
SolidityContract submitNewContract(ContractMetadata contractMetaData, Object... constructorArgs);
/**
* Creates an interface to the Solidity contract already existing on the blockchain.
* The contract source in that case is required only as an interface
* @param soliditySrc Source which describes the existing contract interface
* This could be an abstract contract without function implementations
* @param contractAddress The address of the existing contract
*/
SolidityContract createExistingContractFromSrc(String soliditySrc, byte[] contractAddress);
/**
* The same as the previous method with specification of the exact contract
* in the Solidity source
*/
SolidityContract createExistingContractFromSrc(String soliditySrc, String contractName, byte[] contractAddress);
/**
* Creates an interface to the Solidity contract already existing on the blockchain.
* @param ABI Contract JSON ABI string
* @param contractAddress The address of the existing contract
*/
SolidityContract createExistingContractFromABI(String ABI, byte[] contractAddress);
/**
* Returns underlying Blockchain instance
*/
Blockchain getBlockchain();
}
| 4,315
| 40.902913
| 116
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/TransactionResult.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
import org.ethereum.core.TransactionExecutionSummary;
import org.ethereum.core.TransactionReceipt;
/**
* Created by Anton Nashatyrev on 26.07.2016.
*/
public class TransactionResult {
TransactionReceipt receipt;
TransactionExecutionSummary executionSummary;
public boolean isIncluded() {
return receipt != null;
}
public TransactionReceipt getReceipt() {
return receipt;
}
public TransactionExecutionSummary getExecutionSummary() {
return executionSummary;
}
}
| 1,352
| 31.214286
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/ContractStorage.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
/**
* Represents the contract storage which is effectively the
* mapping( uint256 => uint256 )
*
* Created by Anton Nashatyrev on 23.03.2016.
*/
public interface ContractStorage {
byte[] getStorageSlot(long slot);
byte[] getStorageSlot(byte[] slot);
}
| 1,094
| 35.5
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/SolidityFunction.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
import org.ethereum.core.CallTransaction;
/**
* Created by Anton Nashatyrev on 02.03.2017.
*/
public interface SolidityFunction {
SolidityContract getContract();
CallTransaction.Function getInterface();
}
| 1,047
| 32.806452
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/LocalBlockchain.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
import org.ethereum.core.Block;
/**
* This interface is implemented by the locally created blockchain
* where block issuance can be controlled.
*
* All the pending transactions submitted via EasyBlockchain are
* buffered and become part of the blockchain as soon as
* a new block is generated
*
* Created by Anton Nashatyrev on 24.03.2016.
*/
public interface LocalBlockchain extends EasyBlockchain {
/**
* Creates a new block which includes all the transactions
* created via EasyBlockchain since the last created block
* The pending transaction list is cleared.
* The current best block on the chain becomes a parent of the
* created block
*/
Block createBlock();
/**
* The same as previous but the block parent is specified explicitly
* This is handy for test/experiments with the chain fork branches
*/
Block createForkBlock(Block parent);
}
| 1,748
| 34.693878
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/util/blockchain/SolidityCallResult.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.util.blockchain;
import org.ethereum.core.CallTransaction;
import java.util.Arrays;
import java.util.List;
/**
* Created by Anton Nashatyrev on 26.07.2016.
*/
public abstract class SolidityCallResult extends TransactionResult {
public Object getReturnValue() {
Object[] returnValues = getReturnValues();
return isIncluded() && returnValues.length > 0 ? returnValues[0] : null;
}
public Object[] getReturnValues() {
if (!isIncluded()) return null;
byte[] executionResult = getReceipt().getExecutionResult();
return getFunction().decodeResult(executionResult);
}
public abstract CallTransaction.Function getFunction();
public boolean isSuccessful() {
return isIncluded() && getReceipt().isSuccessful();
}
public abstract List<CallTransaction.Invocation> getEvents();
@Override
public String toString() {
String ret = "SolidityCallResult{" +
getFunction() + ": " +
(isIncluded() ? "EXECUTED" : "PENDING") + ", ";
if (isIncluded()) {
ret += isSuccessful() ? "SUCCESS" : ("ERR (" + getReceipt().getError() + ")");
ret += ", ";
if (isSuccessful()) {
ret += "Ret: " + Arrays.toString(getReturnValues()) + ", ";
ret += "Events: " + getEvents() + ", ";
}
}
return ret + "}";
}
}
| 2,227
| 33.8125
| 90
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/PeerState.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
/**
* @author Mikhail Kalinin
* @since 14.07.2015
*/
public enum PeerState {
// Common
IDLE,
HEADER_RETRIEVING,
BLOCK_RETRIEVING,
NODE_RETRIEVING,
RECEIPT_RETRIEVING,
// Peer
DONE_HASH_RETRIEVING
}
| 1,053
| 28.277778
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/FastSyncManager.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import org.apache.commons.lang3.tuple.Pair;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.*;
import org.ethereum.crypto.HashUtil;
import org.ethereum.datasource.DbSource;
import org.ethereum.datasource.NodeKeyCompositor;
import org.ethereum.datasource.rocksdb.RocksDbDataSource;
import org.ethereum.db.DbFlushManager;
import org.ethereum.db.HeaderStore;
import org.ethereum.db.IndexedBlockStore;
import org.ethereum.db.StateSource;
import org.ethereum.facade.SyncStatus;
import org.ethereum.listener.CompositeEthereumListener;
import org.ethereum.listener.EthereumListener;
import org.ethereum.listener.EthereumListenerAdapter;
import org.ethereum.net.client.Capability;
import org.ethereum.net.eth.handler.Eth63;
import org.ethereum.net.message.ReasonCode;
import org.ethereum.net.server.Channel;
import org.ethereum.trie.TrieKey;
import org.ethereum.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.ApplicationContext;
import org.springframework.stereotype.Component;
import java.math.BigInteger;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import static org.ethereum.listener.EthereumListener.SyncState.COMPLETE;
import static org.ethereum.listener.EthereumListener.SyncState.SECURE;
import static org.ethereum.listener.EthereumListener.SyncState.UNSECURE;
import static org.ethereum.trie.TrieKey.fromPacked;
import static org.ethereum.util.CompactEncoder.hasTerminator;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* Created by Anton Nashatyrev on 24.10.2016.
*/
@Component
public class FastSyncManager {
private final static Logger logger = LoggerFactory.getLogger("sync");
private final static long REQUEST_TIMEOUT = 5 * 1000;
private final static int REQUEST_MAX_NODES = 384;
private final static int NODE_QUEUE_BEST_SIZE = 100_000;
private final static int MIN_PEERS_FOR_PIVOT_SELECTION = 5;
private final static int FORCE_SYNC_TIMEOUT = 60 * 1000;
private final static int PIVOT_DISTANCE_FROM_HEAD = 1024;
private final static int MSX_DB_QUEUE_SIZE = 20000;
private static final Capability ETH63_CAPABILITY = new Capability(Capability.ETH, (byte) 63);
public static final byte[] FASTSYNC_DB_KEY_SYNC_STAGE = HashUtil.sha3("Key in state DB indicating fastsync stage in progress".getBytes());
public static final byte[] FASTSYNC_DB_KEY_PIVOT = HashUtil.sha3("Key in state DB with encoded selected pivot block".getBytes());
@Autowired
private SystemProperties config;
@Autowired
private SyncPool pool;
@Autowired
private BlockchainImpl blockchain;
@Autowired
private IndexedBlockStore blockStore;
@Autowired
private SyncManager syncManager;
@Autowired
@Qualifier("blockchainDB")
DbSource<byte[]> blockchainDB;
@Autowired
private StateSource stateSource;
@Autowired
DbFlushManager dbFlushManager;
@Autowired
CompositeEthereumListener listener;
@Autowired
ApplicationContext applicationContext;
int nodesInserted = 0;
private boolean fastSyncInProgress = false;
private BlockingQueue<TrieNodeRequest> dbWriteQueue = new LinkedBlockingQueue<>();
private Thread dbWriterThread;
private Thread fastSyncThread;
private int dbQueueSizeMonitor = -1;
private BlockHeader pivot;
private HeadersDownloader headersDownloader;
private BlockBodiesDownloader blockBodiesDownloader;
private ReceiptsDownloader receiptsDownloader;
private long forceSyncRemains;
private void waitDbQueueSizeBelow(int size) {
synchronized (this) {
try {
dbQueueSizeMonitor = size;
while (dbWriteQueue.size() > size) wait();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
dbQueueSizeMonitor = -1;
}
}
}
void init() {
dbWriterThread = new Thread(() -> {
try {
while (!Thread.currentThread().isInterrupted()) {
synchronized (FastSyncManager.this) {
if (dbQueueSizeMonitor >= 0 && dbWriteQueue.size() <= dbQueueSizeMonitor) {
FastSyncManager.this.notifyAll();
}
}
TrieNodeRequest request = dbWriteQueue.take();
nodesInserted++;
request.storageHashes().forEach(hash -> stateSource.getNoJournalSource().put(hash, request.response));
if (nodesInserted % 1000 == 0) {
dbFlushManager.commit();
logger.debug("FastSyncDBWriter: commit: dbWriteQueue.size = " + dbWriteQueue.size());
}
}
} catch (InterruptedException e) {
} catch (Exception e) {
logger.error("Fatal FastSync error while writing data", e);
}
}, "FastSyncDBWriter");
dbWriterThread.start();
fastSyncThread = new Thread(() -> {
try {
main();
} catch (Exception e) {
logger.error("Fatal FastSync loop error", e);
}
}, "FastSyncLoop");
fastSyncThread.start();
}
public SyncStatus getSyncState() {
if (!isFastSyncInProgress()) return new SyncStatus(SyncStatus.SyncStage.Complete, 0, 0);
if (pivot == null) {
return new SyncStatus(SyncStatus.SyncStage.PivotBlock,
(FORCE_SYNC_TIMEOUT - forceSyncRemains) / 1000, FORCE_SYNC_TIMEOUT / 1000);
}
EthereumListener.SyncState syncStage = getSyncStage();
switch (syncStage) {
case UNSECURE:
return new SyncStatus(SyncStatus.SyncStage.StateNodes, nodesInserted,
nodesQueue.size() + pendingNodes.size() + nodesInserted);
case SECURE:
if (headersDownloader != null) {
return new SyncStatus(SyncStatus.SyncStage.Headers, headersDownloader.getHeadersLoaded(),
pivot.getNumber());
} else {
return new SyncStatus(SyncStatus.SyncStage.Headers, pivot.getNumber(), pivot.getNumber());
}
case COMPLETE:
if (receiptsDownloader != null) {
return new SyncStatus(SyncStatus.SyncStage.Receipts,
receiptsDownloader.getDownloadedBlocksCount(), pivot.getNumber());
} else if (blockBodiesDownloader != null) {
return new SyncStatus(SyncStatus.SyncStage.BlockBodies,
blockBodiesDownloader.getDownloadedCount(), pivot.getNumber());
} else {
return new SyncStatus(SyncStatus.SyncStage.Receipts, pivot.getNumber(), pivot.getNumber());
}
}
return new SyncStatus(SyncStatus.SyncStage.Complete, 0, 0);
}
enum TrieNodeType {
STATE,
STORAGE,
CODE
}
int stateNodesCnt = 0;
int codeNodesCnt = 0;
int storageNodesCnt = 0;
private class TrieNodeRequest {
TrieNodeType type;
byte[] nodeHash;
byte[] response;
final Map<Long, Long> requestSent = new HashMap<>();
TrieKey nodePath = TrieKey.empty(false);
private final Set<byte[]> accounts = new ByteArraySet();
TrieNodeRequest(TrieNodeType type, byte[] nodeHash) {
this.type = type;
this.nodeHash = nodeHash;
switch (type) {
case STATE: stateNodesCnt++; break;
case CODE: codeNodesCnt++; break;
case STORAGE: storageNodesCnt++; break;
}
}
TrieNodeRequest(TrieNodeType type, byte[] nodeHash, byte[] accountKey) {
this(type, nodeHash);
this.accounts.add(accountKey);
}
TrieNodeRequest(TrieNodeType type, byte[] nodeHash, TrieKey nodePath, Set<byte[]> accounts) {
this(type, nodeHash);
this.nodePath = nodePath;
this.accounts.addAll(accounts);
}
List<TrieNodeRequest> createChildRequests() {
if (type == TrieNodeType.CODE) {
return Collections.emptyList();
}
List<Object> node = Value.fromRlpEncoded(response).asList();
List<TrieNodeRequest> ret = new ArrayList<>();
if (type == TrieNodeType.STATE) {
if (node.size() == 2 && hasTerminator((byte[]) node.get(0))) {
byte[] nodeValue = (byte[]) node.get(1);
AccountState state = new AccountState(nodeValue);
TrieKey accountKey = nodePath.concat(fromPacked((byte[]) node.get(0)));
if (!FastByteComparisons.equal(HashUtil.EMPTY_DATA_HASH, state.getCodeHash())) {
ret.add(new TrieNodeRequest(TrieNodeType.CODE, state.getCodeHash(), accountKey.toNormal()));
}
if (!FastByteComparisons.equal(HashUtil.EMPTY_TRIE_HASH, state.getStateRoot())) {
ret.add(new TrieNodeRequest(TrieNodeType.STORAGE, state.getStateRoot(), accountKey.toNormal()));
}
return ret;
}
}
if (node.size() == 2) {
Value val = new Value(node.get(1));
if (val.isHashCode() && !hasTerminator((byte[]) node.get(0))) {
TrieKey childPath = nodePath.concat(fromPacked((byte[]) node.get(0)));
ret.add(new TrieNodeRequest(type, val.asBytes(), childPath, accountsSnapshot()));
}
} else {
for (int j = 0; j < 16; ++j) {
Value val = new Value(node.get(j));
if (val.isHashCode()) {
TrieKey childPath = nodePath.concat(TrieKey.singleHex(j));
ret.add(new TrieNodeRequest(type, val.asBytes(), childPath, accountsSnapshot()));
}
}
}
return ret;
}
public void reqSent(Long requestId) {
synchronized (FastSyncManager.this) {
Long timestamp = System.currentTimeMillis();
requestSent.put(requestId, timestamp);
}
}
public Set<Long> requestIdsSnapshot() {
synchronized (FastSyncManager.this) {
return new HashSet<Long>(requestSent.keySet());
}
}
public List<byte[]> storageHashes() {
if (type == TrieNodeType.STATE) {
return Collections.singletonList(nodeHash);
} else {
return accountsSnapshot().stream().map(key -> NodeKeyCompositor.compose(nodeHash, key))
.collect(Collectors.toList());
}
}
public Set<byte[]> accountsSnapshot() {
synchronized (FastSyncManager.this) {
return new HashSet<>(accounts);
}
}
public void merge(TrieNodeRequest other) {
synchronized (FastSyncManager.this) {
accounts.addAll(other.accounts);
}
}
@Override
public String toString() {
return "TrieNodeRequest{" +
"type=" + type +
", nodeHash=" + toHexString(nodeHash) +
", nodePath=" + nodePath +
'}';
}
}
Deque<TrieNodeRequest> nodesQueue = new LinkedBlockingDeque<>();
ByteArrayMap<TrieNodeRequest> pendingNodes = new ByteArrayMap<>();
Long requestId = 0L;
private synchronized void purgePending(byte[] hash) {
TrieNodeRequest request = pendingNodes.get(hash);
if (request.requestSent.isEmpty()) pendingNodes.remove(hash);
}
synchronized void processTimeouts() {
long cur = System.currentTimeMillis();
for (TrieNodeRequest request : new ArrayList<>(pendingNodes.values())) {
Iterator<Map.Entry<Long, Long>> reqIterator = request.requestSent.entrySet().iterator();
while (reqIterator.hasNext()) {
Map.Entry<Long, Long> requestEntry = reqIterator.next();
if (cur - requestEntry.getValue() > REQUEST_TIMEOUT) {
reqIterator.remove();
purgePending(request.nodeHash);
nodesQueue.addFirst(request);
}
}
}
}
synchronized void processResponse(TrieNodeRequest req) {
dbWriteQueue.add(req);
for (TrieNodeRequest childRequest : req.createChildRequests()) {
if (nodesQueue.size() > NODE_QUEUE_BEST_SIZE) {
// reducing queue by traversing tree depth-first
nodesQueue.addFirst(childRequest);
} else {
// enlarging queue by traversing tree breadth-first
nodesQueue.add(childRequest);
}
}
}
boolean requestNextNodes(int cnt) {
final Channel idle = pool.getAnyIdle();
if (idle != null) {
final List<byte[]> hashes = new ArrayList<>();
final List<TrieNodeRequest> requestsSent = new ArrayList<>();
final Set<Long> sentRequestIds = new HashSet<>();
synchronized (this) {
for (int i = 0; i < cnt && !nodesQueue.isEmpty(); i++) {
TrieNodeRequest req = nodesQueue.poll();
hashes.add(req.nodeHash);
TrieNodeRequest request = pendingNodes.get(req.nodeHash);
if (request == null) {
pendingNodes.put(req.nodeHash, req);
request = req;
} else {
request.merge(req);
}
sentRequestIds.add(requestId);
request.reqSent(requestId);
requestId++;
requestsSent.add(request);
}
}
if (hashes.size() > 0) {
logger.trace("Requesting " + hashes.size() + " nodes from peer: " + idle);
ListenableFuture<List<Pair<byte[], byte[]>>> nodes = ((Eth63) idle.getEthHandler()).requestTrieNodes(hashes);
final long reqTime = System.currentTimeMillis();
Futures.addCallback(nodes, new FutureCallback<List<Pair<byte[], byte[]>>>() {
@Override
public void onSuccess(List<Pair<byte[], byte[]>> result) {
try {
synchronized (FastSyncManager.this) {
logger.trace("Received " + result.size() + " nodes (of " + hashes.size() + ") from peer: " + idle);
idle.getNodeStatistics().eth63NodesRequested.add(hashes.size());
idle.getNodeStatistics().eth63NodesRetrieveTime.add(System.currentTimeMillis() - reqTime);
for (Pair<byte[], byte[]> pair : result) {
TrieNodeRequest request = pendingNodes.get(pair.getKey());
if (request == null) {
long t = System.currentTimeMillis();
logger.debug("Received node which was not requested: " + toHexString(pair.getKey()) + " from " + idle);
idle.disconnect(ReasonCode.TOO_MANY_PEERS); // We need better peers for this stage
return;
}
Set<Long> intersection = request.requestIdsSnapshot();
intersection.retainAll(sentRequestIds);
if (!intersection.isEmpty()) {
Long inter = intersection.iterator().next();
request.requestSent.remove(inter);
purgePending(pair.getKey());
request.response = pair.getValue();
processResponse(request);
}
}
FastSyncManager.this.notifyAll();
idle.getNodeStatistics().eth63NodesReceived.add(result.size());
}
} catch (Exception e) {
logger.error("Unexpected error processing nodes", e);
}
}
@Override
public void onFailure(Throwable t) {
logger.warn("Error with Trie Node request: " + t);
idle.getNodeStatistics().eth63NodesRequested.add(hashes.size());
idle.getNodeStatistics().eth63NodesRetrieveTime.add(System.currentTimeMillis() - reqTime);
synchronized (FastSyncManager.this) {
for (byte[] hash : hashes) {
final TrieNodeRequest request = pendingNodes.get(hash);
if (request == null) continue;
Set<Long> intersection = request.requestIdsSnapshot();
intersection.retainAll(sentRequestIds);
if (!intersection.isEmpty()) {
Long inter = intersection.iterator().next();
request.requestSent.remove(inter);
nodesQueue.addFirst(request);
purgePending(hash);
}
}
FastSyncManager.this.notifyAll();
}
}
});
return true;
} else {
// idle.getEthHandler().setStatus(SyncState.IDLE);
return false;
}
} else {
return false;
}
}
void retrieveLoop() {
try {
while (!nodesQueue.isEmpty() || !pendingNodes.isEmpty()) {
try {
processTimeouts();
while (requestNextNodes(REQUEST_MAX_NODES)) ;
synchronized (this) {
wait(10);
}
waitDbQueueSizeBelow(MSX_DB_QUEUE_SIZE);
logStat();
} catch (InterruptedException e) {
throw e;
} catch (Throwable t) {
logger.error("Error", t);
}
}
waitDbQueueSizeBelow(0);
dbWriterThread.interrupt();
} catch (InterruptedException e) {
logger.warn("Main fast sync loop was interrupted", e);
}
}
long last = 0;
long lastNodeCount = 0;
private void logStat() {
long cur = System.currentTimeMillis();
if (cur - last > 5000) {
logger.info("FastSync: received: " + nodesInserted + ", known: " + nodesQueue.size() + ", pending: " + pendingNodes.size()
+ String.format(", nodes/sec: %1$.2f", 1000d * (nodesInserted - lastNodeCount) / (cur - last)));
last = cur;
lastNodeCount = nodesInserted;
}
}
private void setSyncStage(EthereumListener.SyncState stage) {
if (stage == null) {
blockchainDB.delete(FASTSYNC_DB_KEY_SYNC_STAGE);
} else {
blockchainDB.put(FASTSYNC_DB_KEY_SYNC_STAGE, new byte[]{(byte) stage.ordinal()});
}
}
private EthereumListener.SyncState getSyncStage() {
byte[] bytes = blockchainDB.get(FASTSYNC_DB_KEY_SYNC_STAGE);
if (bytes == null) return UNSECURE;
return EthereumListener.SyncState.values()[bytes[0]];
}
private void syncUnsecure(BlockHeader pivot) {
byte[] pivotStateRoot = pivot.getStateRoot();
TrieNodeRequest request = new TrieNodeRequest(TrieNodeType.STATE, pivotStateRoot);
nodesQueue.add(request);
logger.info("FastSync: downloading state trie at pivot block: " + pivot.getShortDescr());
setSyncStage(UNSECURE);
retrieveLoop();
logger.info("FastSync: state trie download complete! (Nodes count: state: " + stateNodesCnt + ", storage: " +storageNodesCnt + ", code: " +codeNodesCnt + ")");
last = 0;
logStat();
logger.info("FastSync: downloading 256 blocks prior to pivot block (" + pivot.getShortDescr() + ")");
FastSyncDownloader downloader = applicationContext.getBean(FastSyncDownloader.class);
downloader.startImporting(pivot, 260);
downloader.waitForStop();
logger.info("FastSync: complete downloading 256 blocks prior to pivot block (" + pivot.getShortDescr() + ")");
blockchain.setBestBlock(blockStore.getBlockByHash(pivot.getHash()));
logger.info("FastSync: proceeding to regular sync...");
final CountDownLatch syncDoneLatch = new CountDownLatch(1);
listener.addListener(new EthereumListenerAdapter() {
@Override
public void onSyncDone(SyncState state) {
syncDoneLatch.countDown();
}
});
syncManager.initRegularSync(UNSECURE);
logger.info("FastSync: waiting for regular sync to reach the blockchain head...");
// try {
// syncDoneLatch.await();
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
blockchainDB.put(FASTSYNC_DB_KEY_PIVOT, pivot.getEncoded());
dbFlushManager.commit();
dbFlushManager.flush();
logger.info("FastSync: regular sync reached the blockchain head.");
}
private void syncSecure() {
pivot = new BlockHeader(blockchainDB.get(FASTSYNC_DB_KEY_PIVOT));
logger.info("FastSync: downloading headers from pivot down to genesis block for ensure pivot block (" + pivot.getShortDescr() + ") is secure...");
headersDownloader = applicationContext.getBean(HeadersDownloader.class);
headersDownloader.init(pivot.getHash());
setSyncStage(EthereumListener.SyncState.SECURE);
if (config.fastSyncBackupState()) {
if (blockchainDB instanceof RocksDbDataSource) {
dbFlushManager.flushSync();
((RocksDbDataSource) blockchainDB).backup();
}
}
headersDownloader.waitForStop();
if (!FastByteComparisons.equal(headersDownloader.getGenesisHash(), config.getGenesis().getHash())) {
logger.error("FASTSYNC FATAL ERROR: after downloading header chain starting from the pivot block (" +
pivot.getShortDescr() + ") obtained genesis block doesn't match ours: " + toHexString(headersDownloader.getGenesisHash()));
logger.error("Can't recover and exiting now. You need to restart from scratch (all DBs will be reset)");
System.exit(-666);
}
dbFlushManager.commit();
dbFlushManager.flush();
headersDownloader = null;
logger.info("FastSync: all headers downloaded. The state is SECURE now.");
}
private void syncBlocksReceipts() {
pivot = new BlockHeader(blockchainDB.get(FASTSYNC_DB_KEY_PIVOT));
if (!config.fastSyncSkipHistory()) {
logger.info("FastSync: Downloading Block bodies up to pivot block (" + pivot.getShortDescr() + ")...");
blockBodiesDownloader = applicationContext.getBean(BlockBodiesDownloader.class);
setSyncStage(EthereumListener.SyncState.COMPLETE);
blockBodiesDownloader.startImporting();
blockBodiesDownloader.waitForStop();
blockBodiesDownloader = null;
logger.info("FastSync: Block bodies downloaded");
} else {
logger.info("FastSync: skip bodies downloading");
logger.info("Fixing total difficulty which is usually updated during block bodies download");
fixTotalDiff();
logger.info("Total difficulty fixed for full blocks");
blockchain.setHeaderStore(applicationContext.getBean(HeaderStore.class));
}
if (!config.fastSyncSkipHistory()) {
logger.info("FastSync: Downloading receipts...");
receiptsDownloader = applicationContext.getBean
(ReceiptsDownloader.class, 1, pivot.getNumber() + 1);
receiptsDownloader.startImporting();
receiptsDownloader.waitForStop();
receiptsDownloader = null;
logger.info("FastSync: receipts downloaded");
} else {
logger.info("FastSync: skip receipts downloading");
}
logger.info("FastSync: updating totDifficulties starting from the pivot block...");
blockchain.updateBlockTotDifficulties(pivot.getNumber());
synchronized (blockchain) {
Block bestBlock = blockchain.getBestBlock();
BigInteger totalDifficulty = blockchain.getTotalDifficulty();
logger.info("FastSync: totDifficulties updated: bestBlock: " + bestBlock.getShortDescr() + ", totDiff: " + totalDifficulty);
}
setSyncStage(null);
blockchainDB.delete(FASTSYNC_DB_KEY_PIVOT);
dbFlushManager.commit();
dbFlushManager.flush();
removeHeadersDb(logger);
}
/**
* Fixing total difficulty which is usually updated during block bodies download
* Executed if {@link BlockBodiesDownloader} stage is skipped
*/
private void fixTotalDiff() {
long firstFullBlockNum = pivot.getNumber();
while (blockStore.getChainBlockByNumber(firstFullBlockNum - 1) != null) {
--firstFullBlockNum;
}
Block firstFullBlock = blockStore.getChainBlockByNumber(firstFullBlockNum);
HeaderStore headersStore = applicationContext.getBean(HeaderStore.class);
BigInteger totalDifficulty = blockStore.getChainBlockByNumber(0).getDifficultyBI();
for (int i = 1; i < firstFullBlockNum; ++i) {
totalDifficulty = totalDifficulty.add(headersStore.getHeaderByNumber(i).getDifficultyBI());
}
blockStore.saveBlock(firstFullBlock, totalDifficulty.add(firstFullBlock.getDifficultyBI()), true);
blockchain.updateBlockTotDifficulties(firstFullBlockNum + 1);
}
/**
* Physically removes headers DB if fast sync was performed without skipHistory
*/
public boolean removeHeadersDb(Logger logger) {
if (blockStore.getBestBlock().getNumber() > 0 &&
blockStore.getChainBlockByNumber(1) != null) {
// Everything is cool but maybe we could remove unused DB?
Path headersDbPath = Paths.get(config.databaseDir(), "headers");
if (Files.exists(headersDbPath)) {
logger.info("Headers DB was used during FastSync but not required any more. Removing.");
DbSource<byte[]> headerSource = (DbSource<byte[]>) applicationContext.getBean("headerSource");
headerSource.close();
FileUtil.recursiveDelete(headersDbPath.toString());
logger.info("Headers DB removed.");
return true;
}
}
return false;
}
public void main() {
if (blockchain.getBestBlock().getNumber() == 0 || getSyncStage() == SECURE || getSyncStage() == COMPLETE) {
// either no DB at all (clear sync or DB was deleted due to UNSECURE stage while initializing
// or we have incomplete headers/blocks/receipts download
fastSyncInProgress = true;
pool.setNodesSelector(handler -> handler.getNodeStatistics().capabilities.contains(ETH63_CAPABILITY));
try {
EthereumListener.SyncState origSyncStage = getSyncStage();
switch (origSyncStage) {
case UNSECURE:
pivot = getPivotBlock();
if (pivot.getNumber() == 0) {
logger.info("FastSync: too short blockchain, proceeding with regular sync...");
syncManager.initRegularSync(EthereumListener.SyncState.COMPLETE);
return;
}
syncUnsecure(pivot); // regularSync should be inited here
case SECURE:
if (origSyncStage == SECURE) {
logger.info("FastSync: UNSECURE sync was completed prior to this run, proceeding with next stage...");
logger.info("Initializing regular sync");
syncManager.initRegularSync(EthereumListener.SyncState.UNSECURE);
}
syncSecure();
fireSyncDone(SECURE);
case COMPLETE:
if (origSyncStage == COMPLETE) {
logger.info("FastSync: SECURE sync was completed prior to this run, proceeding with next stage...");
logger.info("Initializing regular sync");
syncManager.initRegularSync(EthereumListener.SyncState.SECURE);
}
syncBlocksReceipts();
fireSyncDone(COMPLETE);
}
logger.info("FastSync: Full sync done.");
} catch (InterruptedException ex) {
logger.info("Shutting down due to interruption");
} finally {
fastSyncInProgress = false;
pool.setNodesSelector(null);
}
} else {
logger.info("FastSync: fast sync was completed, best block: (" + blockchain.getBestBlock().getShortDescr() + "). " +
"Continue with regular sync...");
syncManager.initRegularSync(EthereumListener.SyncState.COMPLETE);
}
}
private void fireSyncDone(EthereumListener.SyncState state) {
// prevent early state notification when sync is not yet done
syncManager.setSyncDoneType(state);
if (syncManager.isSyncDone()) {
listener.onSyncDone(state);
}
}
public boolean isFastSyncInProgress() {
return fastSyncInProgress;
}
private BlockHeader getPivotBlock() throws InterruptedException {
byte[] pivotBlockHash = config.getFastSyncPivotBlockHash();
long pivotBlockNumber = 0;
long start = System.currentTimeMillis();
long s = start;
if (pivotBlockHash != null) {
logger.info("FastSync: fetching trusted pivot block with hash " + toHexString(pivotBlockHash));
} else {
logger.info("FastSync: looking for best block number...");
BlockIdentifier bestKnownBlock;
while (true) {
List<Channel> allIdle = pool.getAllIdle();
forceSyncRemains = FORCE_SYNC_TIMEOUT - (System.currentTimeMillis() - start);
if (allIdle.size() >= MIN_PEERS_FOR_PIVOT_SELECTION || forceSyncRemains < 0 && !allIdle.isEmpty()) {
Channel bestPeer = allIdle.get(0);
for (Channel channel : allIdle) {
if (bestPeer.getEthHandler().getBestKnownBlock().getNumber() < channel.getEthHandler().getBestKnownBlock().getNumber()) {
bestPeer = channel;
}
}
bestKnownBlock = bestPeer.getEthHandler().getBestKnownBlock();
if (bestKnownBlock.getNumber() > 1000) {
logger.info("FastSync: best block " + bestKnownBlock + " found with peer " + bestPeer);
break;
}
}
long t = System.currentTimeMillis();
if (t - s > 5000) {
logger.info("FastSync: waiting for at least " + MIN_PEERS_FOR_PIVOT_SELECTION + " peers or " + forceSyncRemains / 1000 + " sec to select pivot block... ("
+ allIdle.size() + " peers so far)");
s = t;
}
Thread.sleep(500);
}
pivotBlockNumber = Math.max(bestKnownBlock.getNumber() - PIVOT_DISTANCE_FROM_HEAD, 0);
logger.info("FastSync: fetching pivot block #" + pivotBlockNumber);
}
try {
while (true) {
BlockHeader result = null;
if (pivotBlockHash != null) {
result = getPivotHeaderByHash(pivotBlockHash);
} else {
Pair<BlockHeader, Long> pivotResult = getPivotHeaderByNumber(pivotBlockNumber);
if (pivotResult != null) {
if (pivotResult.getRight() != null) {
pivotBlockNumber = pivotResult.getRight();
if (pivotBlockNumber == 0) {
throw new RuntimeException("Cannot fastsync with current set of peers");
}
} else {
result = pivotResult.getLeft();
}
}
}
if (result != null) return result;
long t = System.currentTimeMillis();
if (t - s > 5000) {
logger.info("FastSync: waiting for a peer to fetch pivot block...");
s = t;
}
Thread.sleep(500);
}
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
logger.error("Unexpected", e);
throw new RuntimeException(e);
}
}
private BlockHeader getPivotHeaderByHash(byte[] pivotBlockHash) throws Exception {
Channel bestIdle = pool.getAnyIdle();
if (bestIdle != null) {
try {
ListenableFuture<List<BlockHeader>> future =
bestIdle.getEthHandler().sendGetBlockHeaders(pivotBlockHash, 1, 0, false);
List<BlockHeader> blockHeaders = future.get(3, TimeUnit.SECONDS);
if (!blockHeaders.isEmpty()) {
BlockHeader ret = blockHeaders.get(0);
if (FastByteComparisons.equal(pivotBlockHash, ret.getHash())) {
logger.info("Pivot header fetched: " + ret.getShortDescr());
return ret;
}
logger.warn("Peer " + bestIdle + " returned pivot block with another hash: " +
toHexString(ret.getHash()) + " Dropping the peer.");
bestIdle.disconnect(ReasonCode.USELESS_PEER);
} else {
logger.warn("Peer " + bestIdle + " doesn't returned correct pivot block. Dropping the peer.");
bestIdle.getNodeStatistics().wrongFork = true;
bestIdle.disconnect(ReasonCode.USELESS_PEER);
}
} catch (TimeoutException e) {
logger.debug("Timeout waiting for answer", e);
}
}
return null;
}
/**
* 1. Get pivotBlockNumber blocks from all peers
* 2. Ensure that pivot block available from 50% + 1 peer
* 3. Otherwise proposes new pivotBlockNumber (stepped back)
* @param pivotBlockNumber Pivot block number
* @return null - if no peers available
* null, newPivotBlockNumber - if it's better to try other pivot block number
* BlockHeader, null - if pivot successfully fetched and verified by majority of peers
*/
private Pair<BlockHeader, Long> getPivotHeaderByNumber(long pivotBlockNumber) throws Exception {
List<Channel> allIdle = pool.getAllIdle();
if (!allIdle.isEmpty()) {
try {
List<ListenableFuture<List<BlockHeader>>> result = new ArrayList<>();
for (Channel channel : allIdle) {
ListenableFuture<List<BlockHeader>> future =
channel.getEthHandler().sendGetBlockHeaders(pivotBlockNumber, 1, false);
result.add(future);
}
ListenableFuture<List<List<BlockHeader>>> successfulRequests = Futures.successfulAsList(result);
List<List<BlockHeader>> results = successfulRequests.get(3, TimeUnit.SECONDS);
Map<BlockHeader, Integer> pivotMap = new HashMap<>();
for (List<BlockHeader> blockHeaders : results) {
if (!blockHeaders.isEmpty()) {
BlockHeader currentHeader = blockHeaders.get(0);
if (pivotMap.containsKey(currentHeader)) {
pivotMap.put(currentHeader, pivotMap.get(currentHeader) + 1);
} else {
pivotMap.put(currentHeader, 1);
}
}
}
int peerCount = allIdle.size();
for (Map.Entry<BlockHeader, Integer> pivotEntry : pivotMap.entrySet()) {
// Require 50% + 1 peer to trust pivot
if (pivotEntry.getValue() * 2 > peerCount) {
logger.info("Pivot header fetched: " + pivotEntry.getKey().getShortDescr());
return Pair.of(pivotEntry.getKey(), null);
}
}
Long newPivotBlockNumber = Math.max(0, pivotBlockNumber - 1000);
logger.info("Current pivot candidate not verified by majority of peers, " +
"stepping back to block #{}", newPivotBlockNumber);
return Pair.of(null, newPivotBlockNumber);
} catch (TimeoutException e) {
logger.debug("Timeout waiting for answer", e);
}
}
return null;
}
public boolean isInProgress() {
return blockchainDB.get(FASTSYNC_DB_KEY_PIVOT) != null;
}
public void close() {
logger.info("Closing FastSyncManager");
try {
fastSyncThread.interrupt();
fastSyncInProgress = false;
dbWriterThread.interrupt();
dbFlushManager.commit();
dbFlushManager.flushSync();
fastSyncThread.join(10 * 1000);
dbWriterThread.join(10 * 1000);
} catch (Exception e) {
logger.warn("Problems closing FastSyncManager", e);
}
}
}
| 40,435
| 41.385744
| 174
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/SyncManager.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.*;
import org.ethereum.core.Blockchain;
import org.ethereum.facade.SyncStatus;
import org.ethereum.listener.CompositeEthereumListener;
import org.ethereum.listener.EthereumListener;
import org.ethereum.net.server.Channel;
import org.ethereum.net.server.ChannelManager;
import org.ethereum.util.ExecutorPipeline;
import org.ethereum.validator.BlockHeaderValidator;
import org.ethereum.validator.DependentBlockHeaderRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import static java.lang.Math.max;
import static java.util.Collections.singletonList;
import static org.ethereum.core.ImportResult.*;
import static org.ethereum.util.Utils.longToTimePeriod;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* @author Mikhail Kalinin
* @since 14.07.2015
*/
@Component
public class SyncManager extends BlockDownloader {
private final static Logger logger = LoggerFactory.getLogger("sync");
// Transaction.getSender() is quite heavy operation so we are prefetching this value on several threads
// to unload the main block importing cycle
private ExecutorPipeline<BlockWrapper,BlockWrapper> exec1 = new ExecutorPipeline<>
(4, 1000, true, blockWrapper -> {
for (Transaction tx : blockWrapper.getBlock().getTransactionsList()) {
tx.getSender();
}
return blockWrapper;
}, throwable -> logger.error("Unexpected exception: ", throwable));
private ExecutorPipeline<BlockWrapper, Void> exec2 = exec1.add(1, 1, new Consumer<BlockWrapper>() {
@Override
public void accept(BlockWrapper blockWrapper) {
blockQueue.add(blockWrapper);
estimateBlockSize(blockWrapper);
}
});
/**
* Queue with validated blocks to be added to the blockchain
*/
private BlockingQueue<BlockWrapper> blockQueue = new LinkedBlockingQueue<>();
@Autowired
private Blockchain blockchain;
@Autowired
private CompositeEthereumListener compositeEthereumListener;
@Autowired
private FastSyncManager fastSyncManager;
@Autowired
private DependentBlockHeaderRule parentHeaderValidator;
ChannelManager channelManager;
private SystemProperties config;
private SyncPool pool;
private SyncQueueImpl syncQueue;
private Thread syncQueueThread;
private long blockBytesLimit = 32 * 1024 * 1024;
private long lastKnownBlockNumber = 0;
private boolean syncDone = false;
private AtomicLong importIdleTime = new AtomicLong();
private long importStart;
private EthereumListener.SyncState syncDoneType = EthereumListener.SyncState.COMPLETE;
private ScheduledExecutorService logExecutor = Executors.newSingleThreadScheduledExecutor();
private LocalDateTime initRegularTime;
private AtomicInteger blocksInMem = new AtomicInteger(0);
public SyncManager() {
super(null);
}
@Autowired
public SyncManager(final SystemProperties config, BlockHeaderValidator validator) {
super(validator);
this.config = config;
blockBytesLimit = config.blockQueueSize();
setHeaderQueueLimit(config.headerQueueSize() / BlockHeader.MAX_HEADER_SIZE);
}
public void init(final ChannelManager channelManager, final SyncPool pool) {
if (this.channelManager == null) { // First init
this.pool = pool;
this.channelManager = channelManager;
logExecutor.scheduleAtFixedRate(() -> {
try {
logger.info("Sync state: " + getSyncStatus() +
(isSyncDone() || importStart == 0 ? "" : "; Import idle time " +
longToTimePeriod(importIdleTime.get()) + " of total " + longToTimePeriod(System.currentTimeMillis() - importStart)));
} catch (Exception e) {
logger.error("Unexpected", e);
}
}, 10, 10, TimeUnit.SECONDS);
}
if (!config.isSyncEnabled()) {
logger.info("Sync Manager: OFF");
return;
}
logger.info("Sync Manager: ON");
if (pool.getChannelManager() == null) { // Never were on this stage of init
logger.info("Initializing SyncManager.");
pool.init(channelManager, blockchain);
if (config.isFastSyncEnabled()) {
fastSyncManager.init();
} else {
initRegularSync(EthereumListener.SyncState.COMPLETE);
}
}
}
void initRegularSync(EthereumListener.SyncState syncDoneType) {
logger.info("Initializing SyncManager regular sync.");
this.syncDoneType = syncDoneType;
syncQueue = new SyncQueueImpl(blockchain)
.withParentHeaderValidator(parentHeaderValidator);
super.init(syncQueue, pool, "RegularSync");
Runnable queueProducer = this::produceQueue;
syncQueueThread = new Thread (queueProducer, "SyncQueueThread");
syncQueueThread.start();
if (config.makeDoneByTimeout() >= 0) {
logger.info("Custom long sync done timeout set to {} second(s)", config.makeDoneByTimeout());
this.initRegularTime = LocalDateTime.now();
ScheduledExecutorService shortSyncAwait = Executors.newSingleThreadScheduledExecutor();
shortSyncAwait.scheduleAtFixedRate(() -> {
try {
if (LocalDateTime.now().minusSeconds(config.makeDoneByTimeout()).isAfter(initRegularTime) &&
getLastKnownBlockNumber() == blockchain.getBestBlock().getNumber()) {
logger.info("Sync done triggered by timeout");
makeSyncDone();
shortSyncAwait.shutdown();
} else if (syncDone) {
shortSyncAwait.shutdown();
}
} catch (Exception e) {
logger.error("Unexpected", e);
}
}, 0, 2, TimeUnit.SECONDS);
}
}
void setSyncDoneType(EthereumListener.SyncState syncDoneType) {
this.syncDoneType = syncDoneType;
}
public SyncStatus getSyncStatus() {
if (config.isFastSyncEnabled()) {
SyncStatus syncStatus = fastSyncManager.getSyncState();
if (syncStatus.getStage() == SyncStatus.SyncStage.Complete) {
return getSyncStateImpl();
} else {
return new SyncStatus(syncStatus, blockchain.getBestBlock().getNumber(), getLastKnownBlockNumber());
}
} else {
return getSyncStateImpl();
}
}
private SyncStatus getSyncStateImpl() {
if (!config.isSyncEnabled())
return new SyncStatus(SyncStatus.SyncStage.Off, 0, 0, blockchain.getBestBlock().getNumber(),
blockchain.getBestBlock().getNumber());
return new SyncStatus(isSyncDone() ? SyncStatus.SyncStage.Complete : SyncStatus.SyncStage.Regular,
0, 0, blockchain.getBestBlock().getNumber(), getLastKnownBlockNumber());
}
@Override
protected void pushBlocks(List<BlockWrapper> blockWrappers) {
if (!exec1.isShutdown()) {
exec1.pushAll(blockWrappers);
blocksInMem.addAndGet(blockWrappers.size());
}
}
@Override
protected void pushHeaders(List<BlockHeaderWrapper> headers) {}
@Override
protected int getBlockQueueFreeSize() {
return getBlockQueueLimit();
}
@Override
protected int getMaxHeadersInQueue() {
if (getEstimatedBlockSize() == 0) {
// accurately exploring the net
if (syncQueue.getHeadersCount() < 2 * MAX_IN_REQUEST) {
return 2 * MAX_IN_REQUEST;
} else {
return 0;
}
}
int inMem = blocksInMem.get();
int slotsLeft = Math.max(0, (int) (blockBytesLimit / getEstimatedBlockSize()) - inMem);
if (slotsLeft + inMem < MAX_IN_REQUEST) {
slotsLeft = MAX_IN_REQUEST;
}
// adding 2 * MAX_IN_REQUEST to overcome dark zone buffer
return Math.min(slotsLeft + 2 * MAX_IN_REQUEST, getHeaderQueueLimit());
}
/**
* Processing the queue adding blocks to the chain.
*/
private void produceQueue() {
DecimalFormat timeFormat = new DecimalFormat("0.000");
timeFormat.setDecimalFormatSymbols(DecimalFormatSymbols.getInstance(Locale.US));
while (!Thread.currentThread().isInterrupted()) {
BlockWrapper wrapper = null;
try {
long stale = !isSyncDone() && importStart > 0 && blockQueue.isEmpty() ? System.nanoTime() : 0;
wrapper = blockQueue.take();
blocksInMem.decrementAndGet();
if (stale > 0) {
importIdleTime.addAndGet((System.nanoTime() - stale) / 1_000_000);
}
if (importStart == 0) importStart = System.currentTimeMillis();
logger.debug("BlockQueue size: {}, headers queue size: {}, blocks in mem: {} (~{}mb)",
blockQueue.size(), syncQueue.getHeadersCount(), blocksInMem.get(),
blocksInMem.get() * getEstimatedBlockSize() / 1024 / 1024);
long s = System.nanoTime();
long sl;
ImportResult importResult;
synchronized (blockchain) {
sl = System.nanoTime();
importResult = blockchain.tryToConnect(wrapper.getBlock());
}
long f = System.nanoTime();
long t = (f - s) / 1_000_000;
String ts = timeFormat.format(t / 1000d) + "s";
t = (sl - s) / 1_000_000;
ts += t < 10 ? "" : " (lock: " + timeFormat.format(t / 1000d) + "s)";
if (importResult == IMPORTED_BEST) {
logger.info("Success importing BEST: block.number: {}, block.hash: {}, tx.size: {}, time: {}",
wrapper.getNumber(), wrapper.getBlock().getShortHash(),
wrapper.getBlock().getTransactionsList().size(), ts);
if (wrapper.isNewBlock() && !syncDone) {
makeSyncDone();
}
}
if (importResult == IMPORTED_NOT_BEST)
logger.info("Success importing NOT_BEST: block.number: {}, block.hash: {}, tx.size: {}, time: {}",
wrapper.getNumber(), wrapper.getBlock().getShortHash(),
wrapper.getBlock().getTransactionsList().size(), ts);
if (syncDone && (importResult == IMPORTED_BEST || importResult == IMPORTED_NOT_BEST)) {
if (logger.isDebugEnabled()) logger.debug("Block dump: " + toHexString(wrapper.getBlock().getEncoded()));
// Propagate block to the net after successful import asynchronously
if (wrapper.isNewBlock()) channelManager.onNewForeignBlock(wrapper);
}
// In case we don't have a parent on the chain
// return the try and wait for more blocks to come.
if (importResult == NO_PARENT) {
logger.error("No parent on the chain for block.number: {} block.hash: {}",
wrapper.getNumber(), wrapper.getBlock().getShortHash());
}
} catch (InterruptedException e) {
break;
} catch (Throwable e) {
if (wrapper != null) {
logger.error("Error processing block {}: ", wrapper.getBlock().getShortDescr(), e);
logger.error("Block dump: {}", toHexString(wrapper.getBlock().getEncoded()));
} else {
logger.error("Error processing unknown block", e);
}
}
}
}
private synchronized void makeSyncDone() {
if (syncDone) return;
syncDone = true;
channelManager.onSyncDone(true);
compositeEthereumListener.onSyncDone(syncDoneType);
}
public CompletableFuture<Void> switchToShortSync() {
final CompletableFuture<Void> syncDoneF = new CompletableFuture<>();
if(!syncDone && config.isSyncEnabled()) {
new Thread(() -> {
while(!blockQueue.isEmpty() && !syncDone) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
syncDoneF.completeExceptionally(e);
}
}
makeSyncDone();
syncDoneF.complete(null);
}).start();
} else {
syncDoneF.complete(null);
}
return syncDoneF;
}
/**
* Adds NEW block to the queue
*
* @param block new block
* @param nodeId nodeId of the remote peer which this block is received from
*
* @return true if block passed validations and was added to the queue,
* otherwise it returns false
*/
public boolean validateAndAddNewBlock(Block block, byte[] nodeId) {
if (syncQueue == null) return true;
// run basic checks
if (!isValid(block.getHeader())) {
return false;
}
lastKnownBlockNumber = block.getNumber();
// skip too distant blocks
if (block.getNumber() > syncQueue.maxNum + MAX_IN_REQUEST * 2) {
return true;
}
// skip if memory limit is already hit
if ((blocksInMem.get() * getEstimatedBlockSize()) > blockBytesLimit) {
return true;
}
logger.debug("Adding new block to sync queue: " + block.getShortDescr());
SyncQueueIfc.ValidatedHeaders res = syncQueue.addHeadersAndValidate(
singletonList(new BlockHeaderWrapper(block.getHeader(), nodeId)));
dropIfValidationFailed(res);
synchronized (this) {
List<Block> newBlocks = syncQueue.addBlocks(singletonList(block));
List<BlockWrapper> wrappers = new ArrayList<>();
for (Block b : newBlocks) {
boolean newBlock = Arrays.equals(block.getHash(), b.getHash());
BlockWrapper wrapper = new BlockWrapper(b, newBlock, nodeId);
wrapper.setReceivedAt(System.currentTimeMillis());
wrappers.add(wrapper);
}
logger.debug("Pushing " + wrappers.size() + " new blocks to import queue: " + (wrappers.isEmpty() ? "" :
wrappers.get(0).getBlock().getShortDescr() + " ... " + wrappers.get(wrappers.size() - 1).getBlock().getShortDescr()));
pushBlocks(wrappers);
}
logger.debug("Blocks waiting to be proceed: queue.size: [{}] lastBlock.number: [{}]",
blockQueue.size(),
block.getNumber());
return true;
}
public boolean isSyncDone() {
return syncDone;
}
public boolean isFastSyncRunning() {
return fastSyncManager.isFastSyncInProgress();
}
public long getLastKnownBlockNumber() {
long ret = max(blockchain.getBestBlock().getNumber(), lastKnownBlockNumber);
for (Channel channel : pool.getActivePeers()) {
BlockIdentifier bestKnownBlock = channel.getEthHandler().getBestKnownBlock();
if (bestKnownBlock != null) {
ret = max(bestKnownBlock.getNumber(), ret);
}
}
return ret;
}
public void close() {
try {
logger.info("Shutting down SyncManager");
exec1.shutdown();
exec1.join();
logExecutor.shutdown();
pool.close();
if (syncQueueThread != null) {
syncQueueThread.interrupt();
syncQueueThread.join(10 * 1000);
}
if (config.isFastSyncEnabled()) fastSyncManager.close();
} catch (Exception e) {
logger.warn("Problems closing SyncManager", e);
}
super.close();
}
}
| 17,646
| 36.950538
| 145
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/SyncQueueImpl.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import org.ethereum.core.Block;
import org.ethereum.core.BlockHeader;
import org.ethereum.core.BlockHeaderWrapper;
import org.ethereum.core.Blockchain;
import org.ethereum.db.ByteArrayWrapper;
import org.ethereum.util.ByteArrayMap;
import org.ethereum.validator.DependentBlockHeaderRule;
import java.util.*;
import java.util.function.Function;
import static java.lang.Math.min;
import static org.ethereum.sync.BlockDownloader.MAX_IN_REQUEST;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* Created by Anton Nashatyrev on 27.05.2016.
*/
public class SyncQueueImpl implements SyncQueueIfc {
static int MAX_CHAIN_LEN = MAX_IN_REQUEST;
static class HeadersRequestImpl implements HeadersRequest {
public HeadersRequestImpl(long start, int count, boolean reverse) {
this.start = start;
this.count = count;
this.reverse = reverse;
}
public HeadersRequestImpl(byte[] hash, int count, boolean reverse) {
this.hash = hash;
this.count = count;
this.reverse = reverse;
}
public HeadersRequestImpl(byte[] hash, int count, boolean reverse, int step) {
this.hash = hash;
this.count = count;
this.reverse = reverse;
this.step = step;
}
private long start;
private byte[] hash;
private int count;
private boolean reverse;
private int step = 0;
@Override
public List<HeadersRequest> split(int maxCount) {
if (this.hash != null) return Collections.<HeadersRequest>singletonList(this);
List<HeadersRequest> ret = new ArrayList<>();
int remaining = count;
while(remaining > 0) {
int reqSize = min(maxCount, remaining);
ret.add(new HeadersRequestImpl(start, reqSize, reverse));
remaining -= reqSize;
start = reverse ? start - reqSize : start + reqSize;
}
return ret;
}
@Override
public String toString() {
return "HeadersRequest{" +
(hash == null ? "start=" + getStart() : "hash=" + toHexString(hash).substring(0, 8))+
", count=" + getCount() +
", reverse=" + isReverse() +
", step=" + getStep() +
'}';
}
@Override
public long getStart() {
return start;
}
public long getEnd() { return getStart() + getCount(); }
@Override
public byte[] getHash() {
return hash;
}
@Override
public int getCount() {
return count;
}
@Override
public boolean isReverse() {
return reverse;
}
@Override
public int getStep() {
return step;
}
}
static class BlocksRequestImpl implements BlocksRequest {
private List<BlockHeaderWrapper> blockHeaders = new ArrayList<>();
public BlocksRequestImpl() {
}
public BlocksRequestImpl(List<BlockHeaderWrapper> blockHeaders) {
this.blockHeaders = blockHeaders;
}
@Override
public List<BlocksRequest> split(int count) {
List<BlocksRequest> ret = new ArrayList<>();
int start = 0;
while(start < getBlockHeaders().size()) {
count = min(getBlockHeaders().size() - start, count);
ret.add(new BlocksRequestImpl(getBlockHeaders().subList(start, start + count)));
start += count;
}
return ret;
}
@Override
public List<BlockHeaderWrapper> getBlockHeaders() {
return blockHeaders;
}
}
class HeaderElement {
BlockHeaderWrapper header;
Block block;
boolean exported;
public HeaderElement(BlockHeaderWrapper header) {
this.header = header;
}
public HeaderElement getParent() {
Map<ByteArrayWrapper, HeaderElement> genHeaders = headers.get(header.getNumber() - 1);
if (genHeaders == null) return null;
return genHeaders.get(new ByteArrayWrapper(header.getHeader().getParentHash()));
}
public List<HeaderElement> getChildren() {
List<HeaderElement> ret = new ArrayList<>();
Map<ByteArrayWrapper, HeaderElement> childGenHeaders = headers.get(header.getNumber() + 1);
if (childGenHeaders != null) {
for (HeaderElement child : childGenHeaders.values()) {
if (Arrays.equals(child.header.getHeader().getParentHash(), header.getHash())) {
ret.add(child);
}
}
}
return ret;
}
}
Map<Long, Map<ByteArrayWrapper, HeaderElement>> headers = new HashMap<>();
long minNum = Integer.MAX_VALUE;
long maxNum = 0;
long darkZoneNum = 0;
Long endBlockNumber = null;
Random rnd = new Random(); // ;)
DependentBlockHeaderRule parentHeaderValidator = null;
public SyncQueueImpl(List<Block> initBlocks) {
init(initBlocks);
}
public SyncQueueImpl(Blockchain bc) {
Block bestBlock = bc.getBestBlock();
long start = bestBlock.getNumber() - MAX_CHAIN_LEN + 1;
start = start < 0 ? 0 : start;
List<Block> initBlocks = new ArrayList<>();
for (long i = start; i <= bestBlock.getNumber(); i++) {
initBlocks.add(bc.getBlockByNumber(i));
}
init(initBlocks);
}
/**
* Init with blockchain and download until endBlockNumber (included)
* @param bc Blockchain
* @param endBlockNumber last block to download
*/
public SyncQueueImpl(Blockchain bc, Long endBlockNumber) {
this(bc);
this.endBlockNumber = endBlockNumber;
}
private void init(List<Block> initBlocks) {
if (initBlocks.size() < MAX_CHAIN_LEN && initBlocks.get(0).getNumber() != 0) {
throw new RuntimeException("Queue should be initialized with a chain of at least " + MAX_CHAIN_LEN + " size or with the first genesis block");
}
for (Block block : initBlocks) {
addHeaderPriv(new BlockHeaderWrapper(block.getHeader(), null));
addBlock(block).exported = true;
}
darkZoneNum = initBlocks.get(0).getNumber();
}
private void putGenHeaders(long num, Map<ByteArrayWrapper, HeaderElement> genHeaders) {
minNum = min(minNum, num);
maxNum = Math.max(maxNum, num);
headers.put(num, genHeaders);
}
List<HeaderElement> getLongestChain() {
Map<ByteArrayWrapper, HeaderElement> lastValidatedGen = headers.get(darkZoneNum);
assert lastValidatedGen.size() == 1;
HeaderElement lastHeader = lastValidatedGen.values().iterator().next();
Map<byte[], HeaderElement> chainedParents = new ByteArrayMap<>();
chainedParents.put(lastHeader.header.getHash(), lastHeader);
for(long curNum = darkZoneNum + 1; ; curNum++) {
// keep track of blocks chained to lastHeader until no children
Map<byte[], HeaderElement> chainedBlocks = new ByteArrayMap<>();
Map<ByteArrayWrapper, HeaderElement> curLevel = headers.get(curNum);
if (curLevel == null) break;
for (HeaderElement element : curLevel.values()) {
if (chainedParents.containsKey(element.header.getHeader().getParentHash())) {
chainedBlocks.put(element.header.getHash(), element);
}
}
if (chainedBlocks.isEmpty()) break;
chainedParents = chainedBlocks;
}
// reconstruct the chain back from the last block in the longest path
List<HeaderElement> ret = new ArrayList<>();
for (HeaderElement el = chainedParents.values().iterator().next(); el != lastHeader.getParent(); el = el.getParent()) {
ret.add(0, el);
}
return ret;
}
private boolean hasGaps() {
List<HeaderElement> longestChain = getLongestChain();
return longestChain.get(longestChain.size() - 1).header.getNumber() < maxNum;
}
private void trimChain() {
List<HeaderElement> longestChain = getLongestChain();
trimChainImpl(longestChain);
}
private void trimChainImpl(List<HeaderElement> longestChain) {
if (longestChain.size() > MAX_CHAIN_LEN) {
long newTrimNum = getLongestChain().get(longestChain.size() - MAX_CHAIN_LEN).header.getNumber();
for (int i = 0; darkZoneNum < newTrimNum; darkZoneNum++, i++) {
ByteArrayWrapper wHash = new ByteArrayWrapper(longestChain.get(i).header.getHash());
putGenHeaders(darkZoneNum, Collections.singletonMap(wHash, longestChain.get(i)));
}
darkZoneNum--;
}
}
private void trimExported() {
for (; minNum < darkZoneNum; minNum++) {
Map<ByteArrayWrapper, HeaderElement> genHeaders = headers.get(minNum);
assert genHeaders.size() == 1;
HeaderElement headerElement = genHeaders.values().iterator().next();
if (headerElement.exported) {
headers.remove(minNum);
} else {
break;
}
}
}
private boolean addHeader(BlockHeaderWrapper header) {
long num = header.getNumber();
if (num <= darkZoneNum || num > maxNum + MAX_CHAIN_LEN * 128) {
// dropping too distant headers
return false;
}
return addHeaderPriv(header);
}
private boolean addHeaderPriv(BlockHeaderWrapper header) {
long num = header.getNumber();
Map<ByteArrayWrapper, HeaderElement> genHeaders = headers.get(num);
if (genHeaders == null) {
genHeaders = new HashMap<>();
putGenHeaders(num, genHeaders);
}
ByteArrayWrapper wHash = new ByteArrayWrapper(header.getHash());
HeaderElement headerElement = genHeaders.get(wHash);
if (headerElement != null) return false;
headerElement = new HeaderElement(header);
genHeaders.put(wHash, headerElement);
return true;
}
@Override
public synchronized List<HeadersRequest> requestHeaders(int maxSize, int maxRequests, int maxTotalHeaders) {
return requestHeadersImpl(maxSize, maxRequests, maxTotalHeaders);
}
private List<HeadersRequest> requestHeadersImpl(int count, int maxRequests, int maxTotHeaderCount) {
List<HeadersRequest> ret = new ArrayList<>();
long startNumber;
if (hasGaps()) {
List<HeaderElement> longestChain = getLongestChain();
startNumber = longestChain.get(longestChain.size() - 1).header.getNumber();
boolean reverse = rnd.nextBoolean();
ret.add(new HeadersRequestImpl(startNumber, MAX_CHAIN_LEN, reverse));
startNumber += reverse ? 1 : MAX_CHAIN_LEN;
// if (maxNum - startNumber > 2000) return ret;
} else {
startNumber = maxNum + 1;
}
while (ret.size() <= maxRequests && getHeadersCount() <= maxTotHeaderCount) {
HeadersRequestImpl nextReq = getNextReq(startNumber, count);
if (nextReq.getEnd() > minNum + maxTotHeaderCount) break;
ret.add(nextReq);
startNumber = nextReq.getEnd();
}
return ret;
}
private HeadersRequestImpl getNextReq(long startFrom, int maxCount) {
while(headers.containsKey(startFrom)) startFrom++;
if (endBlockNumber != null && maxCount > endBlockNumber - startFrom + 1) {
maxCount = (int) (endBlockNumber - startFrom + 1);
}
return new HeadersRequestImpl(startFrom, maxCount, false);
}
@Override
public synchronized List<BlockHeaderWrapper> addHeaders(Collection<BlockHeaderWrapper> headers) {
for (BlockHeaderWrapper header : headers) {
addHeader(header);
}
trimChain();
return null;
}
@Override
public synchronized ValidatedHeaders addHeadersAndValidate(Collection<BlockHeaderWrapper> headers) {
for (BlockHeaderWrapper header : headers) {
addHeader(header);
}
List<HeaderElement> longestChain = getLongestChain();
// do not run the payload if chain is too short
if (longestChain.size() > MAX_CHAIN_LEN) {
ValidatedHeaders result = validateChain(longestChain);
if (result.isValid()) {
trimChainImpl(longestChain);
} else {
// erase chain starting from first invalid header
eraseChain(longestChain, result.getHeaders().get(0).getNumber());
}
return result;
}
return ValidatedHeaders.Empty;
}
/**
* Runs parent header validation and returns after first occurrence of invalid header
*/
ValidatedHeaders validateChain(List<HeaderElement> chain) {
if (parentHeaderValidator == null)
return ValidatedHeaders.Empty;
for (int i = 1; i < chain.size(); i++) {
BlockHeaderWrapper parent = chain.get(i - 1).header;
BlockHeaderWrapper header = chain.get(i).header;
if (!parentHeaderValidator.validate(header.getHeader(), parent.getHeader())) {
return new ValidatedHeaders(Collections.singletonList(header), false,
parentHeaderValidator.getErrors().isEmpty() ? "" : parentHeaderValidator.getErrors().get(0));
}
}
return ValidatedHeaders.Empty;
}
void eraseChain(List<HeaderElement> chain, long startFrom) {
if (chain.isEmpty())
return;
// prevent from going beyond dark zone
startFrom = Math.max(darkZoneNum + 1, startFrom);
HeaderElement head = chain.get(chain.size() - 1);
for (int i = chain.size() - 1; i >= 0; i--) {
HeaderElement el = chain.get(i);
if (el.header.getNumber() < startFrom) break; // erase up to startFrom number
Map<ByteArrayWrapper, HeaderElement> gen = headers.get(el.header.getNumber());
gen.remove(new ByteArrayWrapper(el.header.getHash()));
// clean empty gens
if (gen.isEmpty()) {
headers.remove(el.header.getNumber());
}
}
// adjust maxNum
if (head.header.getNumber() == maxNum) {
Map<ByteArrayWrapper, HeaderElement> lastValidatedGen = headers.get(darkZoneNum);
assert lastValidatedGen.size() == 1;
long maxNotEmptyGen = lastValidatedGen.values().iterator().next().header.getNumber();
// find new maxNum after chain has been erased
for (long num = head.header.getNumber(); num >= darkZoneNum; num--) {
Map<ByteArrayWrapper, HeaderElement> gen = headers.get(num);
if (gen != null && !gen.isEmpty() && num > maxNotEmptyGen) {
maxNotEmptyGen = num;
break;
}
}
maxNum = maxNotEmptyGen;
}
}
@Override
public synchronized int getHeadersCount() {
return (int) (maxNum - minNum);
}
@Override
public synchronized BlocksRequest requestBlocks(int maxSize) {
BlocksRequest ret = new BlocksRequestImpl();
outer:
for (long i = minNum; i <= maxNum; i++) {
Map<ByteArrayWrapper, HeaderElement> gen = headers.get(i);
if (gen != null) {
for (HeaderElement element : gen.values()) {
if (element.block == null) {
ret.getBlockHeaders().add(element.header);
if (ret.getBlockHeaders().size() >= maxSize) break outer;
}
}
}
}
return ret;
}
HeaderElement findHeaderElement(BlockHeader bh) {
Map<ByteArrayWrapper, HeaderElement> genHeaders = headers.get(bh.getNumber());
if (genHeaders == null) return null;
return genHeaders.get(new ByteArrayWrapper(bh.getHash()));
}
private HeaderElement addBlock(Block block) {
HeaderElement headerElement = findHeaderElement(block.getHeader());
if (headerElement != null) {
headerElement.block = block;
}
return headerElement;
}
@Override
public synchronized List<Block> addBlocks(Collection<Block> blocks) {
for (Block block : blocks) {
addBlock(block);
}
return exportBlocks();
}
private List<Block> exportBlocks() {
List<Block> ret = new ArrayList<>();
for (long i = minNum; i <= maxNum; i++) {
Map<ByteArrayWrapper, HeaderElement> gen = headers.get(i);
if (gen == null) break;
boolean hasAny = false;
for (HeaderElement element : gen.values()) {
HeaderElement parent = element.getParent();
if (element.block != null && (i == minNum || parent != null && parent.exported)) {
if (!element.exported) {
exportNewBlock(element.block);
ret.add(element.block);
element.exported = true;
}
hasAny = true;
}
}
if (!hasAny) break;
}
trimExported();
return ret;
}
protected void exportNewBlock(Block block) {
}
public synchronized List<Block> pollBlocks() {
return null;
}
public SyncQueueImpl withParentHeaderValidator(DependentBlockHeaderRule validator) {
this.parentHeaderValidator = validator;
return this;
}
interface Visitor<T> {
T visit(HeaderElement el, List<T> childrenRes);
}
class ChildVisitor<T> {
private Visitor<T> handler;
boolean downUp = true;
public ChildVisitor(Function<HeaderElement, List<T>> handler) {
// this.handler = handler;
}
public T traverse(HeaderElement el) {
List<T> childrenRet = new ArrayList<>();
for (HeaderElement child : el.getChildren()) {
T res = traverse(child);
childrenRet.add(res);
}
return handler.visit(el, childrenRet);
}
}
}
| 19,500
| 34.073741
| 154
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/SyncQueueIfc.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import org.ethereum.core.Block;
import org.ethereum.core.BlockHeader;
import org.ethereum.core.BlockHeaderWrapper;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
/**
* Created by Anton Nashatyrev on 27.05.2016.
*/
public interface SyncQueueIfc {
/**
* Wanted headers
*/
interface HeadersRequest {
long getStart();
byte[] getHash();
int getCount();
boolean isReverse();
List<HeadersRequest> split(int maxCount);
int getStep();
}
/**
* Wanted blocks
*/
interface BlocksRequest {
List<BlocksRequest> split(int count);
List<BlockHeaderWrapper> getBlockHeaders();
}
/**
* Handles result of {@link #addHeadersAndValidate(Collection)} invocation.
*
* <p>
* If {@code valid} is true then validation passed successfully
* and {@code headers} list contains the same result as if {@link #addHeaders(Collection)} was called.
* Otherwise, the list contains invalid headers.
*/
class ValidatedHeaders {
public static final ValidatedHeaders Empty = new ValidatedHeaders(Collections.emptyList(), true);
private final List<BlockHeaderWrapper> headers;
private final boolean valid;
private final String reason;
public ValidatedHeaders(List<BlockHeaderWrapper> headers, boolean valid, String reason) {
this.headers = headers;
this.valid = valid;
this.reason = reason;
}
public ValidatedHeaders(List<BlockHeaderWrapper> headers, boolean valid) {
this(headers, valid, "");
}
public boolean isValid() {
return valid;
}
@Nonnull
public List<BlockHeaderWrapper> getHeaders() {
return headers;
}
public String getReason() {
return reason;
}
@Nullable
public byte[] getNodeId() {
if (headers.isEmpty()) return null;
return headers.get(0).getNodeId();
}
@Nullable
public BlockHeader getHeader() {
if (headers.isEmpty()) return null;
return headers.get(0).getHeader();
}
}
/**
* Returns wanted headers requests
* @param maxSize Maximum number of headers in a singles request
* @param maxRequests Maximum number of requests
* @param maxTotalHeaders The total maximum of cached headers in the implementation
* @return null if the end of headers reached (e.g. when download is limited with a block number)
* empty list if no headers for now (e.g. max allowed number of cached headers reached)
*/
List<HeadersRequest> requestHeaders(int maxSize, int maxRequests, int maxTotalHeaders);
/**
* Adds received headers.
* Headers themselves need to be verified (except parent hash)
* The list can be in any order and shouldn't correspond to prior headers request
* @return If this is 'header-only' SyncQueue then the next chain of headers
* is popped from SyncQueue and returned
* The reverse implementation should return headers in revers order (N, N-1, ...)
* If this instance is for headers+blocks downloading then null returned
*/
List<BlockHeaderWrapper> addHeaders(Collection<BlockHeaderWrapper> headers);
/**
* In general, does the same work as {@link #addHeaders(Collection)} does.
* But before trimming, the longest chain is checked with parent header validator.
* If validation is failed, the chain is erased from the queue.
*
* <p>
* <b>Note:</b> in reverse queue falls to {@link #addHeaders(Collection)} invocation
*
* @return check {@link ValidatedHeaders} for details
*/
ValidatedHeaders addHeadersAndValidate(Collection<BlockHeaderWrapper> headers);
/**
* Returns wanted blocks hashes
*/
BlocksRequest requestBlocks(int maxSize);
/**
* Adds new received blocks to the queue
* The blocks need to be verified but can be passed in any order and need not correspond
* to prior returned block request
* @return blocks ready to be imported in the valid import order.
*/
List<Block> addBlocks(Collection<Block> blocks);
/**
* Returns approximate header count waiting for their blocks
*/
int getHeadersCount();
}
| 5,317
| 31.426829
| 110
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/BlockBodiesDownloader.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.*;
import org.ethereum.crypto.HashUtil;
import org.ethereum.db.DbFlushManager;
import org.ethereum.db.HeaderStore;
import org.ethereum.db.IndexedBlockStore;
import org.ethereum.net.server.Channel;
import org.ethereum.util.FastByteComparisons;
import org.ethereum.validator.BlockHeaderValidator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Created by Anton Nashatyrev on 27.10.2016.
*/
@Component
@Scope("prototype")
public class BlockBodiesDownloader extends BlockDownloader {
private final static Logger logger = LoggerFactory.getLogger("sync");
public final static byte[] EMPTY_BODY = new byte[] {-62, -64, -64};
@Autowired
SyncPool syncPool;
@Autowired
IndexedBlockStore blockStore;
@Autowired
HeaderStore headerStore;
@Autowired
DbFlushManager dbFlushManager;
long t;
SyncQueueIfc syncQueue;
int curBlockIdx = 1;
BigInteger curTotalDiff;
Thread headersThread;
int downloadCnt = 0;
private long blockBytesLimit = 32 * 1024 * 1024;
@Autowired
public BlockBodiesDownloader(final SystemProperties config, BlockHeaderValidator headerValidator) {
super(headerValidator);
blockBytesLimit = config.blockQueueSize();
}
public void startImporting() {
Block genesis = blockStore.getChainBlockByNumber(0);
syncQueue = new SyncQueueImpl(Collections.singletonList(genesis));
curTotalDiff = genesis.getDifficultyBI();
headersThread = new Thread(this::headerLoop, "FastsyncHeadersFetchThread");
headersThread.start();
setHeadersDownload(false);
init(syncQueue, syncPool, "BlockBodiesDownloader");
}
private void headerLoop() {
while (curBlockIdx < headerStore.size() && !Thread.currentThread().isInterrupted()) {
List<BlockHeaderWrapper> wrappers = new ArrayList<>();
List<BlockHeader> emptyBodyHeaders = new ArrayList<>();
for (int i = 0; i < getMaxHeadersInQueue() - syncQueue.getHeadersCount() && curBlockIdx < headerStore.size(); i++) {
BlockHeader header = headerStore.getHeaderByNumber(curBlockIdx);
++curBlockIdx;
wrappers.add(new BlockHeaderWrapper(header, new byte[0]));
// Skip bodies download for blocks with empty body
boolean emptyBody = FastByteComparisons.equal(header.getTxTrieRoot(), HashUtil.EMPTY_TRIE_HASH);
emptyBody &= FastByteComparisons.equal(header.getUnclesHash(), HashUtil.EMPTY_LIST_HASH);
if (emptyBody) emptyBodyHeaders.add(header);
}
synchronized (this) {
syncQueue.addHeaders(wrappers);
if (!emptyBodyHeaders.isEmpty()) {
addEmptyBodyBlocks(emptyBodyHeaders);
}
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
headersDownloadComplete = true;
}
private void addEmptyBodyBlocks(List<BlockHeader> blockHeaders) {
logger.debug("Adding {} empty body blocks to sync queue: {} ... {}", blockHeaders.size(),
blockHeaders.get(0).getShortDescr(), blockHeaders.get(blockHeaders.size() - 1).getShortDescr());
List<Block> finishedBlocks = new ArrayList<>();
for (BlockHeader header : blockHeaders) {
Block block = new Block.Builder()
.withHeader(header)
.withBody(EMPTY_BODY)
.create();
finishedBlocks.add(block);
}
List<Block> startTrimmedBlocks = syncQueue.addBlocks(finishedBlocks);
List<BlockWrapper> trimmedBlockWrappers = new ArrayList<>();
for (Block b : startTrimmedBlocks) {
trimmedBlockWrappers.add(new BlockWrapper(b, null));
}
pushBlocks(trimmedBlockWrappers);
}
@Override
protected void pushBlocks(List<BlockWrapper> blockWrappers) {
if (!blockWrappers.isEmpty()) {
for (BlockWrapper blockWrapper : blockWrappers) {
curTotalDiff = curTotalDiff.add(blockWrapper.getBlock().getDifficultyBI());
blockStore.saveBlock(blockWrapper.getBlock(), curTotalDiff, true);
downloadCnt++;
}
dbFlushManager.commit();
estimateBlockSize(blockWrappers);
logger.debug("{}: header queue size {} (~{}mb)", name, syncQueue.getHeadersCount(),
syncQueue.getHeadersCount() * getEstimatedBlockSize() / 1024 / 1024);
long c = System.currentTimeMillis();
if (c - t > 5000) {
t = c;
logger.info("FastSync: downloaded blocks. Last: " + blockWrappers.get(blockWrappers.size() - 1).getBlock().getShortDescr());
}
}
}
/**
* Download could block chain synchronization occupying all peers
* Prevents this by leaving one peer without work
* Fallbacks to any peer when low number of active peers available
*/
@Override
Channel getAnyPeer() {
return syncPool.getActivePeersCount() > 2 ? syncPool.getNotLastIdle() : syncPool.getAnyIdle();
}
@Override
protected void pushHeaders(List<BlockHeaderWrapper> headers) {}
@Override
protected int getBlockQueueFreeSize() {
return Integer.MAX_VALUE;
}
@Override
protected int getMaxHeadersInQueue() {
if (getEstimatedBlockSize() == 0) {
return getHeaderQueueLimit();
}
int slotsLeft = Math.max((int) (blockBytesLimit / getEstimatedBlockSize()), MAX_IN_REQUEST);
return Math.min(slotsLeft + MAX_IN_REQUEST, getHeaderQueueLimit());
}
public int getDownloadedCount() {
return downloadCnt;
}
@Override
public void stop() {
headersThread.interrupt();
super.stop();
}
@Override
protected void finishDownload() {
stop();
}
}
| 7,205
| 33.151659
| 140
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/FastSyncDownloader.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.BlockHeader;
import org.ethereum.core.BlockHeaderWrapper;
import org.ethereum.core.BlockWrapper;
import org.ethereum.db.IndexedBlockStore;
import org.ethereum.validator.BlockHeaderValidator;
import org.ethereum.validator.EthashRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
import java.math.BigInteger;
import java.util.List;
/**
* Created by Anton Nashatyrev on 27.10.2016.
*/
@Component
@Scope("prototype")
public class FastSyncDownloader extends BlockDownloader {
private final static Logger logger = LoggerFactory.getLogger("sync");
@Autowired
SyncPool syncPool;
@Autowired
IndexedBlockStore blockStore;
private SyncQueueReverseImpl syncQueueReverse;
private EthashRule reverseEthashRule;
int counter;
int maxCount;
long t;
@Autowired
public FastSyncDownloader(BlockHeaderValidator headerValidator, SystemProperties systemProperties) {
super(headerValidator);
reverseEthashRule = EthashRule.createReverse(systemProperties);
}
public void startImporting(BlockHeader start, int count) {
this.maxCount = count <= 0 ? Integer.MAX_VALUE : count;
setHeaderQueueLimit(maxCount);
setBlockQueueLimit(maxCount);
syncQueueReverse = new SyncQueueReverseImpl(start.getHash(), start.getNumber() - count);
init(syncQueueReverse, syncPool, "FastSync");
}
@Override
protected void pushBlocks(List<BlockWrapper> blockWrappers) {
if (!blockWrappers.isEmpty()) {
for (BlockWrapper blockWrapper : blockWrappers) {
blockStore.saveBlock(blockWrapper.getBlock(), BigInteger.ZERO, true);
counter++;
if (counter >= maxCount) {
logger.info("FastSync: All requested " + counter + " blocks are downloaded. (last " +
blockWrapper.getBlock().getShortDescr() + ")");
stop();
break;
}
}
long c = System.currentTimeMillis();
if (c - t > 5000) {
t = c;
logger.info("FastSync: downloaded " + counter + " blocks so far. Last: " +
blockWrappers.get(blockWrappers.size() - 1).getBlock().getShortDescr());
blockStore.flush();
}
}
}
@Override
protected void pushHeaders(List<BlockHeaderWrapper> headers) {}
@Override
protected int getBlockQueueFreeSize() {
return Math.max(maxCount - counter, MAX_IN_REQUEST);
}
@Override
protected int getMaxHeadersInQueue() {
return Math.max(maxCount - syncQueueReverse.getValidatedHeadersCount(), 0);
}
// TODO: receipts loading here
public int getDownloadedBlocksCount() {
return counter;
}
@Override
protected void finishDownload() {
blockStore.flush();
}
@Override
protected boolean isValid(BlockHeader header) {
return super.isValid(header) && reverseEthashRule.validateAndLog(header, logger);
}
}
| 4,128
| 31.511811
| 105
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/ReceiptsDownloader.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import org.apache.commons.collections4.queue.CircularFifoQueue;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.*;
import org.ethereum.crypto.HashUtil;
import org.ethereum.db.ByteArrayWrapper;
import org.ethereum.db.DbFlushManager;
import org.ethereum.db.IndexedBlockStore;
import org.ethereum.db.TransactionStore;
import org.ethereum.net.eth.handler.Eth63;
import org.ethereum.net.server.Channel;
import org.ethereum.util.FastByteComparisons;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import static java.lang.Math.max;
import static java.lang.Math.min;
/**
* Created by Anton Nashatyrev on 27.10.2016.
*/
@Component
@Scope("prototype")
public class ReceiptsDownloader {
private final static Logger logger = LoggerFactory.getLogger("sync");
private static final long REQUEST_TIMEOUT = 5 * 1000;
private static final int MAX_IN_REQUEST = 100;
private static final int MIN_IN_REQUEST = 10;
private int requestLimit = 2000;
@Autowired
SyncPool syncPool;
@Autowired
IndexedBlockStore blockStore;
@Autowired
DbFlushManager dbFlushManager;
@Autowired
TransactionStore txStore;
long fromBlock, toBlock;
LinkedHashMap<ByteArrayWrapper, QueuedBlock> queuedBlocks = new LinkedHashMap<>();
AtomicInteger blocksInMem = new AtomicInteger(0);
long t;
int cnt;
Thread retrieveThread;
private CountDownLatch stopLatch = new CountDownLatch(1);
private long blockBytesLimit = 32 * 1024 * 1024;
private long estimatedBlockSize = 0;
private final CircularFifoQueue<Long> lastBlockSizes = new CircularFifoQueue<>(requestLimit);
public ReceiptsDownloader(long fromBlock, long toBlock) {
this.fromBlock = fromBlock;
this.toBlock = toBlock;
}
public void startImporting() {
retrieveThread = new Thread(this::retrieveLoop, "FastsyncReceiptsFetchThread");
retrieveThread.start();
}
private synchronized List<byte[]> getHashesForRequest(int maxSize) {
List<byte[]> ret = new ArrayList<>();
for (; fromBlock < toBlock && maxSize > 0; fromBlock++) {
BlockHeader header = blockStore.getChainBlockByNumber(fromBlock).getHeader();
// Skipping download for blocks with no transactions
if (FastByteComparisons.equal(header.getReceiptsRoot(), HashUtil.EMPTY_TRIE_HASH)) {
finalizeBlock();
continue;
}
ret.add(header.getHash());
maxSize--;
}
return ret;
}
private synchronized void processQueue() {
Iterator<QueuedBlock> it = queuedBlocks.values().iterator();
while (it.hasNext()) {
QueuedBlock queuedBlock = it.next();
List<TransactionReceipt> receipts = queuedBlock.receipts;
if (receipts != null) {
Block block = blockStore.getBlockByHash(queuedBlock.hash);
if (validate(block, receipts)) {
for (int i = 0; i < queuedBlock.receipts.size(); i++) {
TransactionReceipt receipt = receipts.get(i);
TransactionInfo txInfo = new TransactionInfo(receipt, block.getHash(), i);
txInfo.setTransaction(block.getTransactionsList().get(i));
txStore.put(txInfo);
}
estimateBlockSize(receipts, block.getNumber());
it.remove();
blocksInMem.decrementAndGet();
finalizeBlock();
} else {
queuedBlock.reset();
}
}
}
}
private synchronized void processDownloaded(byte[] blockHash, List<TransactionReceipt> receipts) {
QueuedBlock block = queuedBlocks.get(new ByteArrayWrapper(blockHash));
if (block != null) {
block.receipts = receipts;
}
}
private void finalizeBlock() {
synchronized (this) {
if (fromBlock >= toBlock && queuedBlocks.isEmpty())
finishDownload();
cnt++;
if (cnt % 1000 == 0) logger.info("FastSync: downloaded receipts for " + cnt + " blocks.");
}
dbFlushManager.commit();
}
private boolean validate(Block block, List<TransactionReceipt> receipts) {
byte[] receiptsRoot = BlockchainImpl.calcReceiptsTrie(receipts);
return FastByteComparisons.equal(receiptsRoot, block.getReceiptsRoot());
}
private void retrieveLoop() {
List<List<byte[]>> toDownload = Collections.emptyList();
long t = 0;
while (!Thread.currentThread().isInterrupted()) {
try {
if (toDownload.isEmpty()) {
if (fillBlockQueue() > 0 || System.currentTimeMillis() - t > REQUEST_TIMEOUT) {
toDownload = getToDownload();
t = System.currentTimeMillis();
}
}
Channel idle = getAnyPeer();
if (idle != null && !toDownload.isEmpty()) {
List<byte[]> list = toDownload.remove(0);
ListenableFuture<List<List<TransactionReceipt>>> future =
((Eth63) idle.getEthHandler()).requestReceipts(list);
if (future != null) {
Futures.addCallback(future, new FutureCallback<List<List<TransactionReceipt>>>() {
@Override
public void onSuccess(List<List<TransactionReceipt>> result) {
for (int i = 0; i < result.size(); i++) {
processDownloaded(list.get(i), result.get(i));
}
processQueue();
}
@Override
public void onFailure(Throwable t) {}
});
}
} else {
try {
Thread.sleep(200);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
} catch (Exception e) {
logger.warn("Unexpected during receipts downloading", e);
}
}
}
private List<List<byte[]>> getToDownload() {
List<List<byte[]>> ret = new ArrayList<>();
int reqSize = getRequestSize();
synchronized (this) {
List<byte[]> req = new ArrayList<>();
for (QueuedBlock b : queuedBlocks.values()) {
if (!b.hasResponse()) {
req.add(b.hash);
if (req.size() >= reqSize) {
ret.add(req);
req = new ArrayList<>();
}
}
}
if (!req.isEmpty()) {
ret.add(req);
}
}
logger.debug("ReceiptsDownloader: queue broke down to {} requests, {} blocks in each", ret.size(), reqSize);
return ret;
}
private int getRequestSize() {
int reqCnt = max(syncPool.getActivePeersCount() * 3 / 4, 1);
int optimalReqSz = queuedBlocks.size() / reqCnt;
if (optimalReqSz <= MIN_IN_REQUEST) {
return MIN_IN_REQUEST;
} else if (optimalReqSz >= MAX_IN_REQUEST) {
return MAX_IN_REQUEST;
} else {
return optimalReqSz;
}
}
private int fillBlockQueue() {
int blocksToAdd = getTargetBlocksInMem() - blocksInMem.get();
if (blocksToAdd < MAX_IN_REQUEST)
return 0;
List<byte[]> blockHashes = getHashesForRequest(blocksToAdd);
synchronized (this) {
blockHashes.forEach(hash -> queuedBlocks.put(new ByteArrayWrapper(hash), new QueuedBlock(hash)));
}
blocksInMem.addAndGet(blockHashes.size());
logger.debug("ReceiptsDownloader: blocks added {}, in queue {}, in memory {} (~{}mb)",
blockHashes.size(), queuedBlocks.size(), blocksInMem.get(),
blocksInMem.get() * estimatedBlockSize / 1024 / 1024);
return blockHashes.size();
}
private int getTargetBlocksInMem() {
if (estimatedBlockSize == 0) {
return requestLimit;
}
int slotsInMem = max((int) (blockBytesLimit / estimatedBlockSize), MAX_IN_REQUEST);
return min(slotsInMem, requestLimit);
}
/**
* Download could block chain synchronization occupying all peers
* Prevents this by leaving one peer without work
* Fallbacks to any peer when low number of active peers available
*/
Channel getAnyPeer() {
return syncPool.getActivePeersCount() > 2 ? syncPool.getNotLastIdle() : syncPool.getAnyIdle();
}
public int getDownloadedBlocksCount() {
return cnt;
}
public void stop() {
retrieveThread.interrupt();
stopLatch.countDown();
}
public void waitForStop() {
try {
stopLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
protected void finishDownload() {
stop();
}
private void estimateBlockSize(List<TransactionReceipt> receipts, long number) {
if (receipts.isEmpty())
return;
long blockSize = receipts.stream().mapToLong(TransactionReceipt::estimateMemSize).sum();
synchronized (lastBlockSizes) {
lastBlockSizes.add(blockSize);
estimatedBlockSize = lastBlockSizes.stream().mapToLong(Long::longValue).sum() / lastBlockSizes.size();
}
if (number % 1000 == 0)
logger.debug("ReceiptsDownloader: estimated block size: {}", estimatedBlockSize);
}
@Autowired
public void setSystemProperties(final SystemProperties config) {
this.blockBytesLimit = config.blockQueueSize();
}
private static class QueuedBlock {
byte[] hash;
List<TransactionReceipt> receipts;
public QueuedBlock(byte[] hash) {
this.hash = hash;
}
public boolean hasResponse() {
return receipts != null;
}
public void reset() {
receipts = null;
}
}
}
| 11,715
| 33.765579
| 116
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/BlockDownloader.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.commons.collections4.queue.CircularFifoQueue;
import org.ethereum.core.*;
import org.ethereum.crypto.HashUtil;
import org.ethereum.net.server.Channel;
import org.ethereum.validator.BlockHeaderValidator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.spongycastle.util.encoders.Hex;
import java.util.*;
import java.util.concurrent.*;
import static java.lang.Math.max;
import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* Created by Anton Nashatyrev on 27.10.2016.
*/
public abstract class BlockDownloader {
private final static Logger logger = LoggerFactory.getLogger("sync");
private int blockQueueLimit = 2000;
private int headerQueueLimit = 10000;
// Max number of Blocks / Headers in one request
public static int MAX_IN_REQUEST = 192;
private static int REQUESTS = 32;
private BlockHeaderValidator headerValidator;
private SyncPool pool;
private SyncQueueIfc syncQueue;
private boolean headersDownload = true;
private boolean blockBodiesDownload = true;
private CountDownLatch receivedHeadersLatch = new CountDownLatch(0);
private CountDownLatch receivedBlocksLatch = new CountDownLatch(0);
private Thread getHeadersThread;
private Thread getBodiesThread;
protected boolean headersDownloadComplete;
private boolean downloadComplete;
private CountDownLatch stopLatch = new CountDownLatch(1);
protected String name = "BlockDownloader";
private long estimatedBlockSize = 0;
private final CircularFifoQueue<Long> lastBlockSizes = new CircularFifoQueue<>(10 * MAX_IN_REQUEST);
public BlockDownloader(BlockHeaderValidator headerValidator) {
this.headerValidator = headerValidator;
}
protected abstract void pushBlocks(List<BlockWrapper> blockWrappers);
protected abstract void pushHeaders(List<BlockHeaderWrapper> headers);
protected abstract int getBlockQueueFreeSize();
protected abstract int getMaxHeadersInQueue();
protected void finishDownload() {}
public boolean isDownloadComplete() {
return downloadComplete;
}
public void setBlockBodiesDownload(boolean blockBodiesDownload) {
this.blockBodiesDownload = blockBodiesDownload;
}
public void setHeadersDownload(boolean headersDownload) {
this.headersDownload = headersDownload;
}
public void init(SyncQueueIfc syncQueue, final SyncPool pool, String name) {
this.syncQueue = syncQueue;
this.pool = pool;
this.name = name;
logger.info("{}: Initializing BlockDownloader.", name);
if (headersDownload) {
getHeadersThread = new Thread(this::headerRetrieveLoop, "SyncThreadHeaders");
getHeadersThread.start();
}
if (blockBodiesDownload) {
getBodiesThread = new Thread(this::blockRetrieveLoop, "SyncThreadBlocks");
getBodiesThread.start();
}
}
public void stop() {
if (getHeadersThread != null) getHeadersThread.interrupt();
if (getBodiesThread != null) getBodiesThread.interrupt();
stopLatch.countDown();
}
public void waitForStop() {
try {
stopLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
public void setHeaderQueueLimit(int headerQueueLimit) {
this.headerQueueLimit = headerQueueLimit;
}
public int getBlockQueueLimit() {
return blockQueueLimit;
}
public int getHeaderQueueLimit() {
return headerQueueLimit;
}
public void setBlockQueueLimit(int blockQueueLimit) {
this.blockQueueLimit = blockQueueLimit;
}
private void headerRetrieveLoop() {
List<SyncQueueIfc.HeadersRequest> hReq = emptyList();
while(!Thread.currentThread().isInterrupted()) {
try {
if (hReq.isEmpty()) {
synchronized (this) {
hReq = syncQueue.requestHeaders(MAX_IN_REQUEST, 128, getMaxHeadersInQueue());
if (hReq == null) {
logger.info("{}: Headers download complete.", name);
headersDownloadComplete = true;
if (!blockBodiesDownload) {
finishDownload();
downloadComplete = true;
}
return;
}
String l = "########## " + name + ": New header requests (" + hReq.size() + "):\n";
for (SyncQueueIfc.HeadersRequest request : hReq) {
l += " " + request + "\n";
}
logger.debug(l);
}
}
int reqHeadersCounter = 0;
for (Iterator<SyncQueueIfc.HeadersRequest> it = hReq.iterator(); it.hasNext();) {
SyncQueueIfc.HeadersRequest headersRequest = it.next();
final Channel any = getAnyPeer();
if (any == null) {
logger.debug("{} headerRetrieveLoop: No IDLE peers found", name);
break;
} else {
logger.debug("{} headerRetrieveLoop: request headers (" + headersRequest.toString() + ") from " + any.getNode(), name);
ListenableFuture<List<BlockHeader>> futureHeaders = headersRequest.getHash() == null ?
any.getEthHandler().sendGetBlockHeaders(headersRequest.getStart(), headersRequest.getCount(), headersRequest.isReverse()) :
any.getEthHandler().sendGetBlockHeaders(headersRequest.getHash(), headersRequest.getCount(), headersRequest.getStep(), headersRequest.isReverse());
if (futureHeaders != null) {
Futures.addCallback(futureHeaders, new FutureCallback<List<BlockHeader>>() {
@Override
public void onSuccess(List<BlockHeader> result) {
if (!validateAndAddHeaders(result, any.getNodeId())) {
onFailure(new RuntimeException("Received headers validation failed"));
}
}
@Override
public void onFailure(Throwable t) {
logger.debug("{}: Error receiving headers. Dropping the peer.", name, t);
any.getEthHandler().dropConnection();
}
}, MoreExecutors.directExecutor());
it.remove();
reqHeadersCounter++;
}
}
}
receivedHeadersLatch = new CountDownLatch(max(reqHeadersCounter / 2, 1));
receivedHeadersLatch.await(isSyncDone() ? 10000 : 500, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
break;
} catch (Exception e) {
logger.error("Unexpected: ", e);
}
}
}
private void blockRetrieveLoop() {
class BlocksCallback implements FutureCallback<List<Block>> {
private Channel peer;
public BlocksCallback(Channel peer) {
this.peer = peer;
}
@Override
public void onSuccess(List<Block> result) {
addBlocks(result, peer.getNodeId());
}
@Override
public void onFailure(Throwable t) {
logger.debug("{}: Error receiving Blocks. Dropping the peer.", name, t);
peer.getEthHandler().dropConnection();
}
}
List<SyncQueueIfc.BlocksRequest> bReqs = emptyList();
while(!Thread.currentThread().isInterrupted()) {
try {
if (bReqs.isEmpty()) {
bReqs = syncQueue.requestBlocks(16 * 1024).split(MAX_IN_REQUEST);
}
if (bReqs.isEmpty() && headersDownloadComplete) {
logger.info("{}: Block download complete.", name);
finishDownload();
downloadComplete = true;
return;
}
int blocksToAsk = getBlockQueueFreeSize();
if (blocksToAsk >= MAX_IN_REQUEST) {
// SyncQueueIfc.BlocksRequest bReq = syncQueue.requestBlocks(maxBlocks);
boolean fewHeadersReqMode = false;
if (bReqs.size() == 1 && bReqs.get(0).getBlockHeaders().size() <= 3) {
// new blocks are better to request from the header senders first
// to get more chances to receive block body promptly
for (BlockHeaderWrapper blockHeaderWrapper : bReqs.get(0).getBlockHeaders()) {
Channel channel = pool.getByNodeId(blockHeaderWrapper.getNodeId());
if (channel != null) {
ListenableFuture<List<Block>> futureBlocks =
channel.getEthHandler().sendGetBlockBodies(singletonList(blockHeaderWrapper));
if (futureBlocks != null) {
Futures.addCallback(futureBlocks, new BlocksCallback(channel),
MoreExecutors.directExecutor());
fewHeadersReqMode = true;
}
}
}
}
int maxRequests = blocksToAsk / MAX_IN_REQUEST;
int maxBlocks = MAX_IN_REQUEST * Math.min(maxRequests, REQUESTS);
int reqBlocksCounter = 0;
int blocksRequested = 0;
Iterator<SyncQueueIfc.BlocksRequest> it = bReqs.iterator();
while (it.hasNext() && blocksRequested < maxBlocks) {
// for (SyncQueueIfc.BlocksRequest blocksRequest : bReq.split(MAX_IN_REQUEST)) {
SyncQueueIfc.BlocksRequest blocksRequest = it.next();
Channel any = getAnyPeer();
if (any == null) {
logger.debug("{} blockRetrieveLoop: No IDLE peers found", name);
break;
} else {
logger.debug("{} blockRetrieveLoop: Requesting " + blocksRequest.getBlockHeaders().size() + " blocks from " + any.getNode(), name);
ListenableFuture<List<Block>> futureBlocks =
any.getEthHandler().sendGetBlockBodies(blocksRequest.getBlockHeaders());
blocksRequested += blocksRequest.getBlockHeaders().size();
if (futureBlocks != null) {
Futures.addCallback(futureBlocks, new BlocksCallback(any),
MoreExecutors.directExecutor());
reqBlocksCounter++;
it.remove();
}
}
}
// Case when we have requested few headers and was not able
// to remove request from the list in above cycle because
// there were no idle peers or whatever
if (fewHeadersReqMode && !bReqs.isEmpty()) {
bReqs.clear();
}
receivedBlocksLatch = new CountDownLatch(max(reqBlocksCounter - 2, 1));
receivedBlocksLatch.await(1000, TimeUnit.MILLISECONDS);
} else {
logger.debug("{} blockRetrieveLoop: BlockQueue is full", name);
Thread.sleep(200);
}
} catch (InterruptedException e) {
break;
} catch (Exception e) {
logger.error("Unexpected: ", e);
}
}
}
/**
* Adds a list of blocks to the queue
*
* @param blocks block list received from remote peer and be added to the queue
* @param nodeId nodeId of remote peer which these blocks are received from
*/
private void addBlocks(List<Block> blocks, byte[] nodeId) {
if (blocks.isEmpty()) {
return;
}
synchronized (this) {
logger.debug("{}: Adding new " + blocks.size() + " blocks to sync queue: " +
blocks.get(0).getShortDescr() + " ... " + blocks.get(blocks.size() - 1).getShortDescr(), name);
List<Block> newBlocks = syncQueue.addBlocks(blocks);
List<BlockWrapper> wrappers = new ArrayList<>();
for (Block b : newBlocks) {
wrappers.add(new BlockWrapper(b, nodeId));
}
logger.debug("{}: Pushing " + wrappers.size() + " blocks to import queue: " + (wrappers.isEmpty() ? "" :
wrappers.get(0).getBlock().getShortDescr() + " ... " + wrappers.get(wrappers.size() - 1).getBlock().getShortDescr()), name);
pushBlocks(wrappers);
}
receivedBlocksLatch.countDown();
if (logger.isDebugEnabled()) logger.debug(
"{}: Blocks waiting to be proceed: lastBlock.number: [{}]",
name,
blocks.get(blocks.size() - 1).getNumber()
);
}
/**
* Adds list of headers received from remote host <br>
* Runs header validation before addition <br>
* It also won't add headers of those blocks which are already presented in the queue
*
* @param headers list of headers got from remote host
* @param nodeId remote host nodeId
*
* @return true if blocks passed validation and were added to the queue,
* otherwise it returns false
*/
private boolean validateAndAddHeaders(List<BlockHeader> headers, byte[] nodeId) {
if (headers.isEmpty()) return true;
List<BlockHeaderWrapper> wrappers = new ArrayList<>(headers.size());
for (BlockHeader header : headers) {
if (!isValid(header)) {
if (logger.isDebugEnabled()) {
logger.debug("{}: Invalid header RLP: {}", toHexString(header.getEncoded()), name);
}
return false;
}
wrappers.add(new BlockHeaderWrapper(header, nodeId));
}
SyncQueueIfc.ValidatedHeaders res;
synchronized (this) {
res = syncQueue.addHeadersAndValidate(wrappers);
if (res.isValid() && !res.getHeaders().isEmpty()) {
pushHeaders(res.getHeaders());
}
}
dropIfValidationFailed(res);
receivedHeadersLatch.countDown();
logger.debug("{}: {} headers added", name, headers.size());
return true;
}
/**
* Checks whether validation has been passed correctly or not
* and drops misleading peer if it hasn't
*/
protected void dropIfValidationFailed(SyncQueueIfc.ValidatedHeaders res) {
if (!res.isValid() && res.getNodeId() != null) {
if (logger.isWarnEnabled()) logger.warn("Invalid header received: {}, reason: {}, peer: {}",
res.getHeader() == null ? "" : res.getHeader().getShortDescr(),
res.getReason(),
Hex.toHexString(res.getNodeId()).substring(0, 8));
Channel peer = pool.getByNodeId(res.getNodeId());
if (peer != null) {
peer.dropConnection();
}
}
}
/**
* Runs checks against block's header. <br>
* All these checks make sense before block is added to queue
* in front of checks running by {@link BlockchainImpl#isValid(BlockHeader)}
*
* @param header block header
* @return true if block is valid, false otherwise
*/
protected boolean isValid(BlockHeader header) {
return headerValidator.validateAndLog(header, logger);
}
Channel getAnyPeer() {
return pool.getAnyIdle();
}
public boolean isSyncDone() {
return false;
}
public void close() {
try {
if (pool != null) pool.close();
stop();
} catch (Exception e) {
logger.warn("Problems closing SyncManager", e);
}
}
/**
* Estimates block size in bytes.
* Block memory size can depend on the underlying logic,
* hence ancestors should call this method on their own,
* preferably after actions that impact on block memory size (like RLP parsing, signature recover) are done
*/
protected void estimateBlockSize(BlockWrapper blockWrapper) {
synchronized (lastBlockSizes) {
lastBlockSizes.add(blockWrapper.estimateMemSize());
estimatedBlockSize = lastBlockSizes.stream().mapToLong(Long::longValue).sum() / lastBlockSizes.size();
}
logger.debug("{}: estimated block size: {}", name, estimatedBlockSize);
}
protected void estimateBlockSize(Collection<BlockWrapper> blockWrappers) {
if (blockWrappers.isEmpty())
return;
synchronized (lastBlockSizes) {
blockWrappers.forEach(b -> lastBlockSizes.add(b.estimateMemSize()));
estimatedBlockSize = lastBlockSizes.stream().mapToLong(Long::longValue).sum() / lastBlockSizes.size();
}
logger.debug("{}: estimated block size: {}", name, estimatedBlockSize);
}
public long getEstimatedBlockSize() {
return estimatedBlockSize;
}
}
| 19,430
| 38.981481
| 183
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/SyncQueueReverseImpl.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import org.ethereum.core.Block;
import org.ethereum.core.BlockHeaderWrapper;
import org.ethereum.util.ByteArrayMap;
import org.ethereum.util.FastByteComparisons;
import org.ethereum.util.MinMaxMap;
import java.util.*;
/**
* Created by Anton Nashatyrev on 27.10.2016.
*/
public class SyncQueueReverseImpl implements SyncQueueIfc {
byte[] curHeaderHash;
MinMaxMap<BlockHeaderWrapper> headers = new MinMaxMap<>();
long minValidated = -1;
long finishValidated = 0;
ByteArrayMap<Block> blocks = new ByteArrayMap<>();
boolean headersOnly;
public SyncQueueReverseImpl(byte[] startHash) {
this.curHeaderHash = startHash;
}
public SyncQueueReverseImpl(byte[] startHash, long finishValidated) {
this.curHeaderHash = startHash;
this.finishValidated = finishValidated;
}
public SyncQueueReverseImpl(byte[] startHash, boolean headersOnly) {
this.curHeaderHash = startHash;
this.headersOnly = headersOnly;
}
@Override
public synchronized List<HeadersRequest> requestHeaders(int maxSize, int maxRequests, int maxTotalHeaders) {
List<HeadersRequest> ret = new ArrayList<>();
if (maxTotalHeaders == 0) return ret;
int totalHeaders = 0;
if (minValidated < 0) {
ret.add(new SyncQueueImpl.HeadersRequestImpl(curHeaderHash, maxSize, true, maxSize - 1));
totalHeaders += maxSize;
if (totalHeaders >= maxTotalHeaders) return ret;
} else if (minValidated == finishValidated) {
// genesis reached
return null;
} else {
if (minValidated - headers.getMin() < maxSize * maxSize && minValidated > maxSize) {
ret.add(new SyncQueueImpl.HeadersRequestImpl(
headers.get(headers.getMin()).getHash(), maxSize, true, maxSize - 1));
maxRequests--;
totalHeaders += maxSize;
}
Set<Map.Entry<Long, BlockHeaderWrapper>> entries =
headers.descendingMap().subMap(minValidated, true, headers.getMin(), true).entrySet();
Iterator<Map.Entry<Long, BlockHeaderWrapper>> it = entries.iterator();
BlockHeaderWrapper prevEntry = it.next().getValue();
while(maxRequests > 0 && totalHeaders < maxTotalHeaders && it.hasNext()) {
BlockHeaderWrapper entry = it.next().getValue();
if (prevEntry.getNumber() - entry.getNumber() > 1) {
ret.add(new SyncQueueImpl.HeadersRequestImpl(prevEntry.getHash(), maxSize, true));
totalHeaders += maxSize;
maxRequests--;
}
prevEntry = entry;
}
if (maxRequests > 0 && totalHeaders < maxTotalHeaders) {
ret.add(new SyncQueueImpl.HeadersRequestImpl(prevEntry.getHash(), maxSize, true));
}
}
return ret;
}
@Override
public synchronized List<BlockHeaderWrapper> addHeaders(Collection<BlockHeaderWrapper> newHeaders) {
if (minValidated < 0) {
// need to fetch initial header
for (BlockHeaderWrapper header : newHeaders) {
if (FastByteComparisons.equal(curHeaderHash, header.getHash())) {
minValidated = header.getNumber();
headers.put(header.getNumber(), header);
}
}
}
// start header not found or we are already done
if (minValidated <= finishValidated) return Collections.emptyList();
for (BlockHeaderWrapper header : newHeaders) {
if (header.getNumber() < minValidated) {
headers.put(header.getNumber(), header);
}
}
for (; minValidated >= headers.getMin() && minValidated >= finishValidated; minValidated--) {
BlockHeaderWrapper header = headers.get(minValidated);
BlockHeaderWrapper parent = headers.get(minValidated - 1);
if (parent == null) {
// Some peers doesn't return 0 block header
if (minValidated == 1 && finishValidated == 0) minValidated = 0;
break;
}
if (!FastByteComparisons.equal(header.getHeader().getParentHash(), parent.getHash())) {
// chain is broken here (unlikely) - refetch the rest
headers.clearAllBefore(header.getNumber());
break;
}
}
if (headersOnly) {
List<BlockHeaderWrapper> ret = new ArrayList<>();
for (long i = headers.getMax(); i > minValidated; i--) {
ret.add(headers.remove(i));
}
return ret;
} else {
return null;
}
}
@Override
public synchronized ValidatedHeaders addHeadersAndValidate(Collection<BlockHeaderWrapper> headers) {
List<BlockHeaderWrapper> added = addHeaders(headers);
return new ValidatedHeaders(added, true);
}
@Override
public synchronized BlocksRequest requestBlocks(int maxSize) {
List<BlockHeaderWrapper> reqHeaders = new ArrayList<>();
for (BlockHeaderWrapper header : headers.descendingMap().values()) {
if (maxSize == 0) break;
if (blocks.get(header.getHash()) == null) {
reqHeaders.add(header);
maxSize--;
}
}
return new SyncQueueImpl.BlocksRequestImpl(reqHeaders);
}
@Override
public synchronized List<Block> addBlocks(Collection<Block> newBlocks) {
for (Block block : newBlocks) {
blocks.put(block.getHash(), block);
}
List<Block> ret = new ArrayList<>();
for (long i = headers.getMax(); i > minValidated; i--) {
Block block = blocks.get(headers.get(i).getHash());
if (block == null) break;
ret.add(block);
blocks.remove(headers.get(i).getHash());
headers.remove(i);
}
return ret;
}
@Override
public synchronized int getHeadersCount() {
return headers.size();
}
public synchronized int getValidatedHeadersCount() {
return headers.getMax() == null ? 0 : (int) (headers.getMax() - minValidated + 1);
}
}
| 7,149
| 36.830688
| 112
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/SyncPool.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.Blockchain;
import org.ethereum.listener.EthereumListener;
import org.ethereum.net.message.ReasonCode;
import org.ethereum.net.rlpx.Node;
import org.ethereum.net.rlpx.discover.NodeHandler;
import org.ethereum.net.rlpx.discover.NodeManager;
import org.ethereum.net.server.Channel;
import org.ethereum.net.server.ChannelManager;
import org.ethereum.util.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.spongycastle.util.encoders.Hex;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import javax.annotation.Nullable;
import java.math.BigInteger;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static org.ethereum.util.BIUtil.isIn20PercentRange;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* <p>Encapsulates logic which manages peers involved in blockchain sync</p>
*
* Holds connections, bans, disconnects and other peers logic<br>
* The pool is completely threadsafe<br>
* Implements {@link Iterable} and can be used in "foreach" loop<br>
* Used by {@link SyncManager}
*
* @author Mikhail Kalinin
* @since 10.08.2015
*/
@Component
public class SyncPool {
public static final Logger logger = LoggerFactory.getLogger("sync");
private static final long WORKER_TIMEOUT = 3; // 3 seconds
private final List<Channel> activePeers = Collections.synchronizedList(new ArrayList<Channel>());
private BigInteger lowerUsefulDifficulty = BigInteger.ZERO;
@Autowired
private EthereumListener ethereumListener;
@Autowired
private NodeManager nodeManager;
private ChannelManager channelManager;
private Blockchain blockchain;
private SystemProperties config;
private ScheduledExecutorService poolLoopExecutor = Executors.newSingleThreadScheduledExecutor();
private Predicate<NodeHandler> nodesSelector;
private ScheduledExecutorService logExecutor = Executors.newSingleThreadScheduledExecutor();
@Autowired
public SyncPool(final SystemProperties config) {
this.config = config;
}
public void init(final ChannelManager channelManager, final Blockchain blockchain) {
if (this.channelManager != null) return; // inited already
this.channelManager = channelManager;
this.blockchain = blockchain;
updateLowerUsefulDifficulty();
poolLoopExecutor.scheduleWithFixedDelay(() -> {
try {
heartBeat();
updateLowerUsefulDifficulty();
prepareActive();
fillUp();
cleanupActive();
} catch (Throwable t) {
logger.error("Unhandled exception", t);
}
}, WORKER_TIMEOUT, WORKER_TIMEOUT, TimeUnit.SECONDS);
logExecutor.scheduleWithFixedDelay(() -> {
try {
logActivePeers();
logger.info("\n");
} catch (Throwable t) {
t.printStackTrace();
logger.error("Exception in log worker", t);
}
}, 30, 30, TimeUnit.SECONDS);
}
public void setNodesSelector(Predicate<NodeHandler> nodesSelector) {
this.nodesSelector = nodesSelector;
}
public void close() {
try {
poolLoopExecutor.shutdownNow();
logExecutor.shutdownNow();
} catch (Exception e) {
logger.warn("Problems shutting down executor", e);
}
}
@Nullable
public synchronized Channel getAnyIdle() {
ArrayList<Channel> channels = new ArrayList<>(activePeers);
Collections.shuffle(channels);
for (Channel peer : channels) {
if (peer.isIdle())
return peer;
}
return null;
}
@Nullable
public synchronized Channel getBestIdle() {
for (Channel peer : activePeers) {
if (peer.isIdle())
return peer;
}
return null;
}
@Nullable
public synchronized Channel getNotLastIdle() {
ArrayList<Channel> channels = new ArrayList<>(activePeers);
Collections.shuffle(channels);
Channel candidate = null;
for (Channel peer : channels) {
if (peer.isIdle()) {
if (candidate == null) {
candidate = peer;
} else {
return candidate;
}
}
}
return null;
}
public synchronized List<Channel> getAllIdle() {
List<Channel> ret = new ArrayList<>();
for (Channel peer : activePeers) {
if (peer.isIdle())
ret.add(peer);
}
return ret;
}
public synchronized List<Channel> getActivePeers() {
return new ArrayList<>(activePeers);
}
public synchronized int getActivePeersCount() {
return activePeers.size();
}
@Nullable
public synchronized Channel getByNodeId(byte[] nodeId) {
return channelManager.getActivePeer(nodeId);
}
public synchronized void onDisconnect(Channel peer) {
if (activePeers.remove(peer)) {
logger.info("Peer {}: disconnected", peer.getPeerIdShort());
}
}
public synchronized Set<String> nodesInUse() {
Set<String> ids = new HashSet<>();
for (Channel peer : channelManager.getActivePeers()) {
ids.add(peer.getPeerId());
}
return ids;
}
synchronized void logActivePeers() {
if (logger.isInfoEnabled()) {
StringBuilder sb = new StringBuilder("Peer stats:\n");
sb.append("Active peers\n");
sb.append("============\n");
Set<Node> activeSet = new HashSet<>();
for (Channel peer : new ArrayList<>(activePeers)) {
sb.append(peer.logSyncStats()).append('\n');
activeSet.add(peer.getNode());
}
sb.append("Other connected peers\n");
sb.append("============\n");
for (Channel peer : new ArrayList<>(channelManager.getActivePeers())) {
if (!activeSet.contains(peer.getNode())) {
sb.append(peer.logSyncStats()).append('\n');
}
}
logger.info(sb.toString());
}
}
class NodeSelector implements Predicate<NodeHandler> {
BigInteger lowerDifficulty;
Set<String> nodesInUse;
public NodeSelector(BigInteger lowerDifficulty) {
this.lowerDifficulty = lowerDifficulty;
}
public NodeSelector(BigInteger lowerDifficulty, Set<String> nodesInUse) {
this.lowerDifficulty = lowerDifficulty;
this.nodesInUse = nodesInUse;
}
@Override
public boolean test(NodeHandler handler) {
if (nodesInUse != null && nodesInUse.contains(handler.getNode().getHexId())) {
return false;
}
if (handler.getNodeStatistics().isPredefined()) return true;
if (nodesSelector != null && !nodesSelector.test(handler)) return false;
if (lowerDifficulty.compareTo(BigInteger.ZERO) > 0 &&
handler.getNodeStatistics().getEthTotalDifficulty() == null) {
return false;
}
if (handler.getNodeStatistics().getReputation() < 100) return false;
return handler.getNodeStatistics().getEthTotalDifficulty().compareTo(lowerDifficulty) >= 0;
}
}
private void fillUp() {
int lackSize = config.maxActivePeers() - channelManager.getActivePeers().size();
if(lackSize <= 0) return;
final Set<String> nodesInUse = nodesInUse();
nodesInUse.add(Hex.toHexString(config.nodeId())); // exclude home node
List<NodeHandler> newNodes;
newNodes = nodeManager.getNodes(new NodeSelector(lowerUsefulDifficulty, nodesInUse), lackSize);
if (lackSize > 0 && newNodes.isEmpty()) {
newNodes = nodeManager.getNodes(new NodeSelector(BigInteger.ZERO, nodesInUse), lackSize);
}
if (logger.isTraceEnabled()) {
logDiscoveredNodes(newNodes);
}
for(NodeHandler n : newNodes) {
channelManager.connect(n.getNode());
}
}
private synchronized void prepareActive() {
List<Channel> managerActive = new ArrayList<>(channelManager.getActivePeers());
if (logger.isTraceEnabled())
logger.trace("Preparing active peers from {} channelManager peers", managerActive.size());
// Filtering out with nodeSelector because server-connected nodes were not tested
NodeSelector nodeSelector = new NodeSelector(BigInteger.ZERO);
List<Channel> active = new ArrayList<>();
for (Channel channel : managerActive) {
if (nodeSelector.test(nodeManager.getNodeHandler(channel.getNode()))) {
active.add(channel);
}
}
if (logger.isTraceEnabled())
logger.trace("After filtering out with node selector, {} peers remaining", active.size());
if (active.isEmpty()) return;
// filtering by 20% from top difficulty
active.sort((c1, c2) -> c2.getTotalDifficulty().compareTo(c1.getTotalDifficulty()));
BigInteger highestDifficulty = active.get(0).getTotalDifficulty();
int thresholdIdx = min(config.syncPeerCount(), active.size()) - 1;
for (int i = thresholdIdx; i >= 0; i--) {
if (isIn20PercentRange(active.get(i).getTotalDifficulty(), highestDifficulty)) {
thresholdIdx = i;
break;
}
}
List<Channel> filtered = active.subList(0, thresholdIdx + 1);
// Dropping other peers to free up slots for active
// Act more aggressive until sync is done
int cap = channelManager.getSyncManager().isSyncDone() ?
// 10 peers are enough for variance in data on short sync
Math.max(config.maxActivePeers() / 2, config.maxActivePeers() - 10) : config.maxActivePeers() / 6;
int otherCount = managerActive.size() - filtered.size();
int killCount = max(0, otherCount - cap);
if (killCount > 0) {
AtomicInteger dropped = new AtomicInteger(0);
for (Channel channel : managerActive) {
if (!filtered.contains(channel)) {
if (channel.isIdle()) {
channelManager.disconnect(channel, ReasonCode.TOO_MANY_PEERS);
if (dropped.incrementAndGet() >= killCount) break;
}
}
}
logger.debug("Dropped {} other peers to free up sync slots", dropped.get());
}
for (Channel channel : filtered) {
if (!activePeers.contains(channel)) {
ethereumListener.onPeerAddedToSyncPool(channel);
}
}
if (logger.isTraceEnabled())
logger.trace("{} peers set to be active in SyncPool", filtered.size());
activePeers.clear();
activePeers.addAll(filtered);
}
private synchronized void cleanupActive() {
Iterator<Channel> iterator = activePeers.iterator();
while (iterator.hasNext()) {
Channel next = iterator.next();
if (next.isDisconnected()) {
logger.info("Removing peer " + next + " from active due to disconnect.");
iterator.remove();
}
}
}
private void logDiscoveredNodes(List<NodeHandler> nodes) {
StringBuilder sb = new StringBuilder();
for(NodeHandler n : nodes) {
sb.append(Utils.getNodeIdShort(toHexString(n.getNode().getId())));
sb.append(", ");
}
if(sb.length() > 0) {
sb.delete(sb.length() - 2, sb.length());
}
logger.trace(
"Node list obtained from discovery: {}",
nodes.size() > 0 ? sb.toString() : "empty"
);
}
private void updateLowerUsefulDifficulty() {
BigInteger td = blockchain.getTotalDifficulty();
if (td.compareTo(lowerUsefulDifficulty) > 0) {
lowerUsefulDifficulty = td;
}
}
public ChannelManager getChannelManager() {
return channelManager;
}
private void heartBeat() {
// for (Channel peer : channelManager.getActivePeers()) {
// if (!peer.isIdle() && peer.getSyncStats().secondsSinceLastUpdate() > config.peerChannelReadTimeout()) {
// logger.info("Peer {}: no response after {} seconds", peer.getPeerIdShort(), config.peerChannelReadTimeout());
// peer.dropConnection();
// }
// }
}
}
| 13,926
| 33.904762
| 127
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/SyncStatistics.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
/**
* Manages sync measurements
*
* @author Mikhail Kalinin
* @since 20.08.2015
*/
public class SyncStatistics {
private long updatedAt;
private long blocksCount;
private long headersCount;
private int headerBunchesCount;
public SyncStatistics() {
reset();
}
public void reset() {
updatedAt = System.currentTimeMillis();
blocksCount = 0;
headersCount = 0;
headerBunchesCount = 0;
}
public void addBlocks(long cnt) {
blocksCount += cnt;
fixCommon(cnt);
}
public void addHeaders(long cnt) {
headerBunchesCount++;
headersCount += cnt;
fixCommon(cnt);
}
private void fixCommon(long cnt) {
updatedAt = System.currentTimeMillis();
}
public long getBlocksCount() {
return blocksCount;
}
public long getHeadersCount() {
return headersCount;
}
public long secondsSinceLastUpdate() {
return (System.currentTimeMillis() - updatedAt) / 1000;
}
public int getHeaderBunchesCount() {
return headerBunchesCount;
}
}
| 1,940
| 25.22973
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/sync/HeadersDownloader.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.sync;
import org.ethereum.config.SystemProperties;
import org.ethereum.core.BlockHeader;
import org.ethereum.core.BlockHeaderWrapper;
import org.ethereum.core.BlockWrapper;
import org.ethereum.core.Blockchain;
import org.ethereum.db.DbFlushManager;
import org.ethereum.db.HeaderStore;
import org.ethereum.db.IndexedBlockStore;
import org.ethereum.net.server.Channel;
import org.ethereum.net.server.ChannelManager;
import org.ethereum.validator.BlockHeaderValidator;
import org.ethereum.validator.EthashRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Lazy;
import org.springframework.stereotype.Component;
import java.util.List;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* Created by Anton Nashatyrev on 27.10.2016.
*/
@Component
@Lazy
public class HeadersDownloader extends BlockDownloader {
private final static Logger logger = LoggerFactory.getLogger("sync");
@Autowired
SyncPool syncPool;
@Autowired
ChannelManager channelManager;
@Autowired
IndexedBlockStore blockStore;
@Autowired
HeaderStore headerStore;
@Autowired
DbFlushManager dbFlushManager;
@Autowired
Blockchain blockchain;
byte[] genesisHash;
int headersLoaded = 0;
private EthashRule reverseEthashRule;
@Autowired
public HeadersDownloader(BlockHeaderValidator headerValidator, SystemProperties systemProperties) {
super(headerValidator);
reverseEthashRule = EthashRule.createReverse(systemProperties);
setHeaderQueueLimit(200000);
setBlockBodiesDownload(false);
logger.info("HeaderDownloader created.");
}
public void init(byte[] startFromBlockHash) {
logger.info("HeaderDownloader init: startHash = " + toHexString(startFromBlockHash));
SyncQueueReverseImpl syncQueue = new SyncQueueReverseImpl(startFromBlockHash, true);
super.init(syncQueue, syncPool, "HeadersDownloader");
syncPool.init(channelManager, blockchain);
}
@Override
protected synchronized void pushBlocks(List<BlockWrapper> blockWrappers) {}
@Override
protected void pushHeaders(List<BlockHeaderWrapper> headers) {
if (headers.get(headers.size() - 1).getNumber() == 0) {
genesisHash = headers.get(headers.size() - 1).getHash();
}
if (headers.get(headers.size() - 1).getNumber() == 1) {
genesisHash = headers.get(headers.size() - 1).getHeader().getParentHash();
}
logger.info(name + ": " + headers.size() + " headers loaded: " + headers.get(0).getNumber() + " - " + headers.get(headers.size() - 1).getNumber());
for (BlockHeaderWrapper header : headers) {
headerStore.saveHeader(header.getHeader());
headersLoaded++;
}
dbFlushManager.commit();
}
/**
* Headers download could block chain synchronization occupying all peers
* Prevents this by leaving one peer without work
* Fallbacks to any peer when low number of active peers available
*/
@Override
Channel getAnyPeer() {
return syncPool.getActivePeersCount() > 2 ? syncPool.getNotLastIdle() : syncPool.getAnyIdle();
}
@Override
protected int getBlockQueueFreeSize() {
return Integer.MAX_VALUE;
}
@Override
protected int getMaxHeadersInQueue() {
return getHeaderQueueLimit();
}
public int getHeadersLoaded() {
return headersLoaded;
}
@Override
protected void finishDownload() {
stop();
}
public byte[] getGenesisHash() {
return genesisHash;
}
@Override
protected boolean isValid(BlockHeader header) {
return super.isValid(header) && reverseEthashRule.validateAndLog(header, logger);
}
}
| 4,691
| 30.918367
| 155
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/solidity/Abi.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.solidity;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections4.Predicate;
import org.ethereum.util.ByteUtil;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static com.fasterxml.jackson.annotation.JsonInclude.Include;
import static java.lang.String.format;
import static org.apache.commons.collections4.ListUtils.select;
import static org.apache.commons.lang3.ArrayUtils.subarray;
import static org.apache.commons.lang3.StringUtils.join;
import static org.apache.commons.lang3.StringUtils.stripEnd;
import static org.ethereum.crypto.HashUtil.sha3;
import static org.ethereum.solidity.SolidityType.IntType.decodeInt;
import static org.ethereum.solidity.SolidityType.IntType.encodeInt;
public class Abi extends ArrayList<Abi.Entry> {
private final static ObjectMapper DEFAULT_MAPPER = new ObjectMapper()
.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
.enable(DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL);
public static Abi fromJson(String json) {
try {
return DEFAULT_MAPPER.readValue(json, Abi.class);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public String toJson() {
try {
return new ObjectMapper().writeValueAsString(this);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
private <T extends Abi.Entry> T find(Class<T> resultClass, final Abi.Entry.Type type, final Predicate<T> searchPredicate) {
return (T) CollectionUtils.find(this, entry -> entry.type == type && searchPredicate.evaluate((T) entry));
}
public Function findFunction(Predicate<Function> searchPredicate) {
return find(Function.class, Abi.Entry.Type.function, searchPredicate);
}
public Event findEvent(Predicate<Event> searchPredicate) {
return find(Event.class, Abi.Entry.Type.event, searchPredicate);
}
public Abi.Constructor findConstructor() {
return find(Constructor.class, Entry.Type.constructor, object -> true);
}
@Override
public String toString() {
return toJson();
}
@JsonInclude(Include.NON_NULL)
public static abstract class Entry {
public enum Type {
constructor,
function,
event,
fallback
}
@JsonInclude(Include.NON_NULL)
public static class Param {
public Boolean indexed;
public String name;
public SolidityType type;
public static List<?> decodeList(List<Param> params, byte[] encoded) {
List<Object> result = new ArrayList<>(params.size());
int offset = 0;
for (Param param : params) {
Object decoded = param.type.isDynamicType()
? param.type.decode(encoded, decodeInt(encoded, offset).intValue())
: param.type.decode(encoded, offset);
result.add(decoded);
offset += param.type.getFixedSize();
}
return result;
}
@Override
public String toString() {
return format("%s%s%s", type.getCanonicalName(), (indexed != null && indexed) ? " indexed " : " ", name);
}
}
public final Boolean anonymous;
public final Boolean constant;
public final String name;
public final List<Param> inputs;
public final List<Param> outputs;
public final Type type;
public final Boolean payable;
public Entry(Boolean anonymous, Boolean constant, String name, List<Param> inputs, List<Param> outputs, Type type, Boolean payable) {
this.anonymous = anonymous;
this.constant = constant;
this.name = name;
this.inputs = inputs;
this.outputs = outputs;
this.type = type;
this.payable = payable;
}
public String formatSignature() {
StringBuilder paramsTypes = new StringBuilder();
for (Entry.Param param : inputs) {
paramsTypes.append(param.type.getCanonicalName()).append(",");
}
return format("%s(%s)", name, stripEnd(paramsTypes.toString(), ","));
}
public byte[] fingerprintSignature() {
return sha3(formatSignature().getBytes());
}
public byte[] encodeSignature() {
return fingerprintSignature();
}
@JsonCreator
public static Entry create(@JsonProperty("anonymous") boolean anonymous,
@JsonProperty("constant") boolean constant,
@JsonProperty("name") String name,
@JsonProperty("inputs") List<Param> inputs,
@JsonProperty("outputs") List<Param> outputs,
@JsonProperty("type") Type type,
@JsonProperty(value = "payable", required = false, defaultValue = "false") Boolean payable) {
Entry result = null;
switch (type) {
case constructor:
result = new Constructor(inputs, outputs);
break;
case function:
case fallback:
result = new Function(constant, name, inputs, outputs, payable);
break;
case event:
result = new Event(anonymous, name, inputs, outputs);
break;
}
return result;
}
}
public static class Constructor extends Entry {
public Constructor(List<Param> inputs, List<Param> outputs) {
super(null, null, "", inputs, outputs, Type.constructor, false);
}
public List<?> decode(byte[] encoded) {
return Param.decodeList(inputs, encoded);
}
public String formatSignature(String contractName) {
return format("function %s(%s)", contractName, join(inputs, ", "));
}
}
public static class Function extends Entry {
private static final int ENCODED_SIGN_LENGTH = 4;
public Function(boolean constant, String name, List<Param> inputs, List<Param> outputs, Boolean payable) {
super(null, constant, name, inputs, outputs, Type.function, payable);
}
public byte[] encode(Object... args) {
return ByteUtil.merge(encodeSignature(), encodeArguments(args));
}
private byte[] encodeArguments(Object... args) {
if (args.length > inputs.size())
throw new RuntimeException("Too many arguments: " + args.length + " > " + inputs.size());
int staticSize = 0;
int dynamicCnt = 0;
// calculating static size and number of dynamic params
for (int i = 0; i < args.length; i++) {
SolidityType type = inputs.get(i).type;
if (type.isDynamicType()) {
dynamicCnt++;
}
staticSize += type.getFixedSize();
}
byte[][] bb = new byte[args.length + dynamicCnt][];
for (int curDynamicPtr = staticSize, curDynamicCnt = 0, i = 0; i < args.length; i++) {
SolidityType type = inputs.get(i).type;
if (type.isDynamicType()) {
byte[] dynBB = type.encode(args[i]);
bb[i] = encodeInt(curDynamicPtr);
bb[args.length + curDynamicCnt] = dynBB;
curDynamicCnt++;
curDynamicPtr += dynBB.length;
} else {
bb[i] = type.encode(args[i]);
}
}
return ByteUtil.merge(bb);
}
public List<?> decode(byte[] encoded) {
return Param.decodeList(inputs, subarray(encoded, ENCODED_SIGN_LENGTH, encoded.length));
}
public List<?> decodeResult(byte[] encoded) {
return Param.decodeList(outputs, encoded);
}
@Override
public byte[] encodeSignature() {
return extractSignature(super.encodeSignature());
}
public static byte[] extractSignature(byte[] data) {
return subarray(data, 0, ENCODED_SIGN_LENGTH);
}
@Override
public String toString() {
String returnTail = "";
if (constant) {
returnTail += " constant";
}
if (!outputs.isEmpty()) {
List<String> types = new ArrayList<>();
for (Param output : outputs) {
types.add(output.type.getCanonicalName());
}
returnTail += format(" returns(%s)", join(types, ", "));
}
return format("function %s(%s)%s;", name, join(inputs, ", "), returnTail);
}
}
public static class Event extends Entry {
public Event(boolean anonymous, String name, List<Param> inputs, List<Param> outputs) {
super(anonymous, null, name, inputs, outputs, Type.event, false);
}
public List<?> decode(byte[] data, byte[][] topics) {
List<Object> result = new ArrayList<>(inputs.size());
byte[][] argTopics = anonymous ? topics : subarray(topics, 1, topics.length);
List<Param> indexedParams = filteredInputs(true);
List<Object> indexed = new ArrayList<>();
for (int i = 0; i < indexedParams.size(); i++) {
Object decodedTopic;
if (indexedParams.get(i).type.isDynamicType()) {
// If arrays (including string and bytes) are used as indexed arguments,
// the Keccak-256 hash of it is stored as topic instead.
decodedTopic = SolidityType.Bytes32Type.decodeBytes32(argTopics[i], 0);
} else {
decodedTopic = indexedParams.get(i).type.decode(argTopics[i]);
}
indexed.add(decodedTopic);
}
List<?> notIndexed = Param.decodeList(filteredInputs(false), data);
for (Param input : inputs) {
result.add(input.indexed ? indexed.remove(0) : notIndexed.remove(0));
}
return result;
}
private List<Param> filteredInputs(final boolean indexed) {
return select(inputs, param -> param.indexed == indexed);
}
@Override
public String toString() {
return format("event %s(%s);", name, join(inputs, ", "));
}
}
}
| 12,040
| 36.278638
| 141
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/solidity/SolidityType.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.solidity;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonValue;
import org.ethereum.util.ByteUtil;
import org.ethereum.vm.DataWord;
import java.lang.reflect.Array;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.ethereum.util.ByteUtil.toHexString;
public abstract class SolidityType {
private final static int Int32Size = 32;
protected String name;
public SolidityType(String name) {
this.name = name;
}
/**
* The type name as it was specified in the interface description
*/
public String getName() {
return name;
}
/**
* The canonical type name (used for the method signature creation)
* E.g. 'int' - canonical 'int256'
*/
@JsonValue
public String getCanonicalName() {
return getName();
}
@JsonCreator
public static SolidityType getType(String typeName) {
if (typeName.endsWith("]")) return ArrayType.getType(typeName);
if ("bool".equals(typeName)) return new BoolType();
if (typeName.startsWith("int")) return new IntType(typeName);
if (typeName.startsWith("uint")) return new UnsignedIntType(typeName);
if ("address".equals(typeName)) return new AddressType();
if ("string".equals(typeName)) return new StringType();
if ("bytes".equals(typeName)) return new BytesType();
if ("function".equals(typeName)) return new FunctionType();
if (typeName.startsWith("bytes")) return new Bytes32Type(typeName);
throw new RuntimeException("Unknown type: " + typeName);
}
/**
* Encodes the value according to specific type rules
*
* @param value
*/
public abstract byte[] encode(Object value);
public abstract Object decode(byte[] encoded, int offset);
public Object decode(byte[] encoded) {
return decode(encoded, 0);
}
/**
* @return fixed size in bytes. For the dynamic types returns IntType.getFixedSize()
* which is effectively the int offset to dynamic data
*/
public int getFixedSize() {
return Int32Size;
}
public boolean isDynamicType() {
return false;
}
@Override
public String toString() {
return getName();
}
public static abstract class ArrayType extends SolidityType {
public static ArrayType getType(String typeName) {
int idx1 = typeName.lastIndexOf("[");
int idx2 = typeName.lastIndexOf("]");
if (idx1 + 1 == idx2) {
return new DynamicArrayType(typeName);
} else {
return new StaticArrayType(typeName);
}
}
SolidityType elementType;
public ArrayType(String name) {
super(name);
elementType = SolidityType.getType(name.substring(0, name.lastIndexOf("[")));
}
@Override
public byte[] encode(Object value) {
if (value.getClass().isArray()) {
List<Object> elems = new ArrayList<>();
for (int i = 0; i < Array.getLength(value); i++) {
elems.add(Array.get(value, i));
}
return encodeList(elems);
} else if (value instanceof List) {
return encodeList((List) value);
} else {
throw new RuntimeException("List value expected for type " + getName());
}
}
protected byte[] encodeTuple(List l) {
byte[][] elems;
if (elementType.isDynamicType()) {
elems = new byte[l.size() * 2][];
int offset = l.size() * Int32Size;
for (int i = 0; i < l.size(); i++) {
elems[i] = IntType.encodeInt(offset);
byte[] encoded = elementType.encode(l.get(i));
elems[l.size() + i] = encoded;
offset += Int32Size * ((encoded.length - 1) / Int32Size + 1);
}
} else {
elems = new byte[l.size()][];
for (int i = 0; i < l.size(); i++) {
elems[i] = elementType.encode(l.get(i));
}
}
return ByteUtil.merge(elems);
}
public Object[] decodeTuple(byte[] encoded, int origOffset, int len) {
int offset = origOffset;
Object[] ret = new Object[len];
for (int i = 0; i < len; i++) {
if (elementType.isDynamicType()) {
ret[i] = elementType.decode(encoded, origOffset + IntType.decodeInt(encoded, offset).intValue());
} else {
ret[i] = elementType.decode(encoded, offset);
}
offset += elementType.getFixedSize();
}
return ret;
}
public SolidityType getElementType() {
return elementType;
}
public abstract byte[] encodeList(List l);
}
public static class StaticArrayType extends ArrayType {
int size;
public StaticArrayType(String name) {
super(name);
int idx1 = name.lastIndexOf("[");
int idx2 = name.lastIndexOf("]");
String dim = name.substring(idx1 + 1, idx2);
size = Integer.parseInt(dim);
}
@Override
public String getCanonicalName() {
return getElementType().getCanonicalName() + "[" + size + "]";
}
@Override
public byte[] encodeList(List l) {
if (l.size() != size) throw new RuntimeException("List size (" + l.size() + ") != " + size + " for type " + getName());
return encodeTuple(l);
}
@Override
public Object[] decode(byte[] encoded, int offset) {
return decodeTuple(encoded, offset, size);
}
@Override
public int getFixedSize() {
if (isDynamicType()) {
return Int32Size;
} else {
return elementType.getFixedSize() * size;
}
}
@Override
public boolean isDynamicType() {
return getElementType().isDynamicType() && size > 0;
}
}
public static class DynamicArrayType extends ArrayType {
public DynamicArrayType(String name) {
super(name);
}
@Override
public String getCanonicalName() {
return elementType.getCanonicalName() + "[]";
}
@Override
public byte[] encodeList(List l) {
return ByteUtil.merge(IntType.encodeInt(l.size()), encodeTuple(l));
}
@Override
public Object decode(byte[] encoded, int origOffset) {
int len = IntType.decodeInt(encoded, origOffset).intValue();
return decodeTuple(encoded, origOffset + Int32Size, len);
}
@Override
public boolean isDynamicType() {
return true;
}
}
public static class BytesType extends SolidityType {
protected BytesType(String name) {
super(name);
}
public BytesType() {
super("bytes");
}
@Override
public byte[] encode(Object value) {
byte[] bb;
if (value instanceof byte[]) {
bb = (byte[]) value;
} else if (value instanceof String) {
bb = ((String) value).getBytes();
} else {
throw new RuntimeException("byte[] or String value is expected for type 'bytes'");
}
byte[] ret = new byte[((bb.length - 1) / Int32Size + 1) * Int32Size]; // padding 32 bytes
System.arraycopy(bb, 0, ret, 0, bb.length);
return ByteUtil.merge(IntType.encodeInt(bb.length), ret);
}
@Override
public Object decode(byte[] encoded, int offset) {
int len = IntType.decodeInt(encoded, offset).intValue();
if (len == 0) return new byte[0];
offset += Int32Size;
return Arrays.copyOfRange(encoded, offset, offset + len);
}
@Override
public boolean isDynamicType() {
return true;
}
}
public static class StringType extends BytesType {
public StringType() {
super("string");
}
@Override
public byte[] encode(Object value) {
if (!(value instanceof String)) throw new RuntimeException("String value expected for type 'string'");
return super.encode(((String)value).getBytes(StandardCharsets.UTF_8));
}
@Override
public Object decode(byte[] encoded, int offset) {
return new String((byte[]) super.decode(encoded, offset), StandardCharsets.UTF_8);
}
}
public static class Bytes32Type extends SolidityType {
public Bytes32Type(String s) {
super(s);
}
@Override
public byte[] encode(Object value) {
if (value instanceof Number) {
BigInteger bigInt = new BigInteger(value.toString());
return IntType.encodeInt(bigInt);
} else if (value instanceof String) {
byte[] ret = new byte[Int32Size];
byte[] bytes = ((String) value).getBytes(StandardCharsets.UTF_8);
System.arraycopy(bytes, 0, ret, 0, bytes.length);
return ret;
} else if (value instanceof byte[]) {
byte[] bytes = (byte[]) value;
byte[] ret = new byte[Int32Size];
System.arraycopy(bytes, 0, ret, Int32Size - bytes.length, bytes.length);
return ret;
}
throw new RuntimeException("Can't encode java type " + value.getClass() + " to bytes32");
}
@Override
public Object decode(byte[] encoded, int offset) {
return decodeBytes32(encoded, offset);
}
public static byte[] decodeBytes32(byte[] encoded, int offset) {
return Arrays.copyOfRange(encoded, offset, offset + Int32Size);
}
}
public static class AddressType extends IntType {
public AddressType() {
super("address");
}
@Override
public byte[] encode(Object value) {
if (value instanceof String && !((String)value).startsWith("0x")) {
// address is supposed to be always in hex
value = "0x" + value;
}
byte[] addr = super.encode(value);
for (int i = 0; i < 12; i++) {
if (addr[i] != 0) {
throw new RuntimeException("Invalid address (should be 20 bytes length): " + toHexString(addr));
}
}
return addr;
}
@Override
public Object decode(byte[] encoded, int offset) {
BigInteger bi = (BigInteger) super.decode(encoded, offset);
return ByteUtil.bigIntegerToBytes(bi, 20);
}
}
public static abstract class NumericType extends SolidityType {
public NumericType(String name) {
super(name);
}
BigInteger encodeInternal(Object value) {
BigInteger bigInt;
if (value instanceof String) {
String s = ((String)value).toLowerCase().trim();
int radix = 10;
if (s.startsWith("0x")) {
s = s.substring(2);
radix = 16;
} else if (s.contains("a") || s.contains("b") || s.contains("c") ||
s.contains("d") || s.contains("e") || s.contains("f")) {
radix = 16;
}
bigInt = new BigInteger(s, radix);
} else if (value instanceof BigInteger) {
bigInt = (BigInteger) value;
} else if (value instanceof Number) {
bigInt = new BigInteger(value.toString());
} else if (value instanceof byte[]) {
bigInt = ByteUtil.bytesToBigInteger((byte[]) value);
} else {
throw new RuntimeException("Invalid value for type '" + this + "': " + value + " (" + value.getClass() + ")");
}
return bigInt;
}
}
public static class IntType extends NumericType {
public IntType(String name) {
super(name);
}
@Override
public String getCanonicalName() {
if (getName().equals("int")) return "int256";
return super.getCanonicalName();
}
public static BigInteger decodeInt(byte[] encoded, int offset) {
return new BigInteger(Arrays.copyOfRange(encoded, offset, offset + Int32Size));
}
public static byte[] encodeInt(int i) {
return encodeInt(new BigInteger("" + i));
}
public static byte[] encodeInt(BigInteger bigInt) {
return ByteUtil.bigIntegerToBytesSigned(bigInt, Int32Size);
}
@Override
public Object decode(byte[] encoded, int offset) {
return decodeInt(encoded, offset);
}
@Override
public byte[] encode(Object value) {
BigInteger bigInt = encodeInternal(value);
return encodeInt(bigInt);
}
}
public static class UnsignedIntType extends NumericType {
public UnsignedIntType(String name) {
super(name);
}
@Override
public String getCanonicalName() {
if (getName().equals("uint")) return "uint256";
return super.getCanonicalName();
}
public static BigInteger decodeInt(byte[] encoded, int offset) {
return new BigInteger(1, Arrays.copyOfRange(encoded, offset, offset + Int32Size));
}
public static byte[] encodeInt(int i) {
return encodeInt(new BigInteger("" + i));
}
public static byte[] encodeInt(BigInteger bigInt) {
if (bigInt.signum() == -1) {
throw new RuntimeException("Wrong value for uint type: " + bigInt);
}
return ByteUtil.bigIntegerToBytes(bigInt, Int32Size);
}
@Override
public byte[] encode(Object value) {
BigInteger bigInt = encodeInternal(value);
return encodeInt(bigInt);
}
@Override
public Object decode(byte[] encoded, int offset) {
return decodeInt(encoded, offset);
}
}
public static class BoolType extends IntType {
public BoolType() {
super("bool");
}
@Override
public byte[] encode(Object value) {
if (!(value instanceof Boolean)) throw new RuntimeException("Wrong value for bool type: " + value);
return super.encode(value == Boolean.TRUE ? 1 : 0);
}
@Override
public Object decode(byte[] encoded, int offset) {
return Boolean.valueOf(((Number) super.decode(encoded, offset)).intValue() != 0);
}
}
public static class FunctionType extends Bytes32Type {
public FunctionType() {
super("function");
}
@Override
public byte[] encode(Object value) {
if (!(value instanceof byte[])) throw new RuntimeException("Expected byte[] value for FunctionType");
if (((byte[]) value).length != 24) throw new RuntimeException("Expected byte[24] for FunctionType");
return super.encode(ByteUtil.merge((byte[]) value, new byte[8]));
}
}
}
| 16,649
| 32.979592
| 131
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/solidity/compiler/Solc.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.solidity.compiler;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.StandardCopyOption;
import java.util.Scanner;
import org.ethereum.config.SystemProperties;
/**
* Created by Anton Nashatyrev on 03.03.2016.
*/
public class Solc {
private File solc = null;
Solc(SystemProperties config) {
try {
init(config);
} catch (IOException e) {
throw new RuntimeException("Can't init solc compiler: ", e);
}
}
private void init(SystemProperties config) throws IOException {
if (config != null && config.customSolcPath() != null) {
solc = new File(config.customSolcPath());
if (!solc.canExecute()) {
throw new RuntimeException(String.format(
"Solidity compiler from config solc.path: %s is not a valid executable",
config.customSolcPath()
));
}
} else {
initBundled();
}
}
private void initBundled() throws IOException {
File tmpDir = new File(System.getProperty("java.io.tmpdir"), "solc");
tmpDir.mkdirs();
InputStream is = getClass().getResourceAsStream("/native/" + getOS() + "/solc/file.list");
try (Scanner scanner = new Scanner(is)) {
while (scanner.hasNext()) {
String s = scanner.next();
File targetFile = new File(tmpDir, s);
InputStream fis = getClass().getResourceAsStream("/native/" + getOS() + "/solc/" + s);
Files.copy(fis, targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
if (solc == null) {
// first file in the list denotes executable
solc = targetFile;
solc.setExecutable(true);
}
targetFile.deleteOnExit();
}
}
}
private String getOS() {
String osName = System.getProperty("os.name").toLowerCase();
if (osName.contains("win")) {
return "win";
} else if (osName.contains("linux")) {
return "linux";
} else if (osName.contains("mac")) {
return "mac";
} else {
throw new RuntimeException("Can't find solc compiler: unrecognized OS: " + osName);
}
}
public File getExecutable() {
return solc;
}
}
| 3,295
| 33.333333
| 102
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/solidity/compiler/CompilationResult.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.solidity.compiler;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@JsonIgnoreProperties(ignoreUnknown = true)
public class CompilationResult {
@JsonProperty("contracts") private Map<String, ContractMetadata> contracts;
@JsonProperty("version") public String version;
@JsonIgnore public static CompilationResult parse(String rawJson) throws IOException {
if(rawJson == null || rawJson.isEmpty()){
CompilationResult empty = new CompilationResult();
empty.contracts = Collections.emptyMap();
empty.version = "";
return empty;
} else {
return new ObjectMapper().readValue(rawJson, CompilationResult.class);
}
}
/**
* @return the contract's path given this compilation result contains exactly one contract
*/
@JsonIgnore public Path getContractPath() {
if (contracts.size() > 1) {
throw new UnsupportedOperationException("Source contains more than 1 contact. Please specify the contract name. Available keys (" + getContractKeys() + ").");
} else {
String key = contracts.keySet().iterator().next();
return Paths.get(key.substring(0, key.lastIndexOf(':')));
}
}
/**
* @return the contract's name given this compilation result contains exactly one contract
*/
@JsonIgnore public String getContractName() {
if (contracts.size() > 1) {
throw new UnsupportedOperationException("Source contains more than 1 contact. Please specify the contract name. Available keys (" + getContractKeys() + ").");
} else {
String key = contracts.keySet().iterator().next();
return key.substring(key.lastIndexOf(':') + 1);
}
}
/**
* @param contractName The contract name
* @return the first contract found for a given contract name; use {@link #getContract(Path, String)} if this compilation result contains more than one contract with the same name
*/
@JsonIgnore public ContractMetadata getContract(String contractName) {
if (contractName == null && contracts.size() == 1) {
return contracts.values().iterator().next();
} else if (contractName == null || contractName.isEmpty()) {
throw new UnsupportedOperationException("Source contains more than 1 contact. Please specify the contract name. Available keys (" + getContractKeys() + ").");
}
for (Map.Entry<String, ContractMetadata> entry : contracts.entrySet()) {
String key = entry.getKey();
String name = key.substring(key.lastIndexOf(':') + 1);
if (contractName.equals(name)) {
return entry.getValue();
}
}
throw new UnsupportedOperationException("No contract found with name '" + contractName + "'. Please specify a valid contract name. Available keys (" + getContractKeys() + ").");
}
/**
* @param contractPath The contract path
* @param contractName The contract name
* @return the contract with key {@code contractPath:contractName} if it exists; {@code null} otherwise
*/
@JsonIgnore public ContractMetadata getContract(Path contractPath, String contractName) {
return contracts.get(contractPath.toAbsolutePath().toString() + ':' + contractName);
}
/**
* @return all contracts from this compilation result
*/
@JsonIgnore public List<ContractMetadata> getContracts() {
return new ArrayList<>(contracts.values());
}
/**
* @return all keys from this compilation result
*/
@JsonIgnore public List<String> getContractKeys() {
return new ArrayList<>(contracts.keySet());
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static class ContractMetadata {
public String abi;
public String bin;
public String solInterface;
public String metadata;
public String getInterface() {
return solInterface;
}
public void setInterface(String solInterface) {
this.solInterface = solInterface;
}
}
}
| 5,327
| 38.761194
| 185
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/solidity/compiler/ContractException.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.solidity.compiler;
public class ContractException extends RuntimeException {
public ContractException(String message) {
super(message);
}
public static ContractException permissionError(String msg, Object... args) {
return error("contract permission error", msg, args);
}
public static ContractException compilationError(String msg, Object... args) {
return error("contract compilation error", msg, args);
}
public static ContractException validationError(String msg, Object... args) {
return error("contract validation error", msg, args);
}
public static ContractException assembleError(String msg, Object... args) {
return error("contract assemble error", msg, args);
}
private static ContractException error(String title, String message, Object... args) {
return new ContractException(title + ": " + String.format(message, args));
}
}
| 1,752
| 37.108696
| 90
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/solidity/compiler/SourceArtifact.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.solidity.compiler;
import java.io.File;
import java.io.IOException;
import java.util.*;
import static java.lang.String.format;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyList;
import static org.apache.commons.collections4.CollectionUtils.disjunction;
import static org.apache.commons.collections4.CollectionUtils.isNotEmpty;
import static org.apache.commons.lang3.StringUtils.substringsBetween;
import static org.ethereum.solidity.compiler.ContractException.assembleError;
public class SourceArtifact {
private String name;
private List<String> dependencies;
private String source;
private final Set<SourceArtifact> injectedDependencies = new HashSet<>();
private final Set<SourceArtifact> dependentArtifacts = new HashSet<>();
public SourceArtifact(String name, String source) {
this.name = name;
this.dependencies = extractDependencies(source);
this.source = source.replaceAll("import\\s\"\\.*?\\.sol\";", "");
}
public SourceArtifact(File f) {
}
private static List<String> extractDependencies(String source) {
String[] deps = substringsBetween(source, "import \"", "\";");
return deps == null ? Collections.<String>emptyList() : asList(deps);
}
// public SourceArtifact(MultipartFile srcFile) throws IOException {
// this(srcFile.getOriginalFilename(), new String(srcFile.getBytes(), "UTF-8"));
// }
public void injectDependency(SourceArtifact srcArtifact) {
injectedDependencies.add(srcArtifact);
srcArtifact.addDependentArtifact(this);
}
private void addDependentArtifact(SourceArtifact srcArtifact) {
dependentArtifacts.add(srcArtifact);
}
public boolean hasDependentArtifacts() {
return !dependentArtifacts.isEmpty();
}
private Collection<String> getUnresolvedDependencies() {
Set<String> ret = new HashSet<>();
for (SourceArtifact injectedDependency : injectedDependencies) {
ret.add(injectedDependency.getName());
}
return disjunction(dependencies, ret);
}
public String plainSource() {
Collection<String> unresolvedDeps = getUnresolvedDependencies();
if (isNotEmpty(unresolvedDeps)) {
throw assembleError("Followed dependencies aren't resolved: %s", unresolvedDeps);
}
String result = this.source;
for (SourceArtifact dependencyArtifact : injectedDependencies) {
String importDefinition = format("import \"%s\";", dependencyArtifact.getName());
String dependencySrc = format("// %s\n%s", importDefinition, dependencyArtifact.plainSource());
result = result.replace(importDefinition, dependencySrc);
}
return result;
}
public String getName() {
return name;
}
public List<String> getDependencies() {
return dependencies;
}
}
| 3,754
| 33.768519
| 107
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/solidity/compiler/Sources.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.solidity.compiler;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import static org.ethereum.solidity.compiler.ContractException.assembleError;
public class Sources {
private final Map<String, SourceArtifact> artifacts = new HashMap<>();
private String targetArtifact;
public Sources(File[] files) {
for (File file : files) {
artifacts.put(file.getName(), new SourceArtifact(file));
}
}
public void resolveDependencies() {
for (String srcName : artifacts.keySet()) {
SourceArtifact src = artifacts.get(srcName);
for (String dep : src.getDependencies()) {
SourceArtifact depArtifact = artifacts.get(dep);
if (depArtifact == null) {
throw assembleError("can't resolve dependency: dependency '%s' not found.", dep);
}
src.injectDependency(depArtifact);
};
}
for (SourceArtifact artifact : artifacts.values()) {
if (!artifact.hasDependentArtifacts()) {
targetArtifact = artifact.getName();
}
}
}
public String plainSource() {
return artifacts.get(targetArtifact).plainSource();
}
}
| 2,083
| 33.733333
| 101
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/solidity/compiler/SolidityCompiler.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.solidity.compiler;
import org.ethereum.config.SystemProperties;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Serializable;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static java.util.stream.Collectors.toList;
@Component
public class SolidityCompiler {
private Solc solc;
private static SolidityCompiler INSTANCE;
@Autowired
public SolidityCompiler(SystemProperties config) {
solc = new Solc(config);
}
public static Result compile(File sourceDirectory, boolean combinedJson, Option... options) throws IOException {
return getInstance().compileSrc(sourceDirectory, false, combinedJson, options);
}
/**
* This class is mainly here for backwards compatibility; however we are now reusing it making it the solely public
* interface listing all the supported options.
*/
public static final class Options {
public static final OutputOption AST = OutputOption.AST;
public static final OutputOption BIN = OutputOption.BIN;
public static final OutputOption INTERFACE = OutputOption.INTERFACE;
public static final OutputOption ABI = OutputOption.ABI;
public static final OutputOption METADATA = OutputOption.METADATA;
public static final OutputOption ASTJSON = OutputOption.ASTJSON;
private static final NameOnlyOption OPTIMIZE = NameOnlyOption.OPTIMIZE;
private static final NameOnlyOption VERSION = NameOnlyOption.VERSION;
private static class CombinedJson extends ListOption {
private CombinedJson(List values) {
super("combined-json", values);
}
}
public static class AllowPaths extends ListOption {
public AllowPaths(List values) {
super("allow-paths", values);
}
}
}
public interface Option extends Serializable {
String getValue();
String getName();
}
private static class ListOption implements Option {
private String name;
private List values;
private ListOption(String name, List values) {
this.name = name;
this.values = values;
}
@Override public String getValue() {
StringBuilder result = new StringBuilder();
for (Object value : values) {
if (OutputOption.class.isAssignableFrom(value.getClass())) {
result.append((result.length() == 0) ? ((OutputOption) value).getName() : ',' + ((OutputOption) value).getName());
} else if (Path.class.isAssignableFrom(value.getClass())) {
result.append((result.length() == 0) ? ((Path) value).toAbsolutePath().toString() : ',' + ((Path) value).toAbsolutePath().toString());
} else if (File.class.isAssignableFrom(value.getClass())) {
result.append((result.length() == 0) ? ((File) value).getAbsolutePath() : ',' + ((File) value).getAbsolutePath());
} else if (String.class.isAssignableFrom(value.getClass())) {
result.append((result.length() == 0) ? value : "," + value);
} else {
throw new UnsupportedOperationException("Unexpected type, value '" + value + "' cannot be retrieved.");
}
}
return result.toString();
}
@Override public String getName() { return name; }
@Override public String toString() { return name; }
}
private enum NameOnlyOption implements Option {
OPTIMIZE("optimize"),
VERSION("version");
private String name;
NameOnlyOption(String name) {
this.name = name;
}
@Override public String getValue() { return ""; }
@Override public String getName() { return name; }
@Override public String toString() {
return name;
}
}
private enum OutputOption implements Option {
AST("ast"),
BIN("bin"),
INTERFACE("interface"),
ABI("abi"),
METADATA("metadata"),
ASTJSON("ast-json");
private String name;
OutputOption(String name) {
this.name = name;
}
@Override public String getValue() { return ""; }
@Override public String getName() { return name; }
@Override public String toString() {
return name;
}
}
public static class CustomOption implements Option {
private String name;
private String value;
public CustomOption(String name) {
if (name.startsWith("--")) {
this.name = name.substring(2);
} else {
this.name = name;
}
}
public CustomOption(String name, String value) {
this(name);
this.value = value;
}
@Override
public String getValue() {
return value;
}
@Override
public String getName() {
return name;
}
}
public static class Result {
public String errors;
public String output;
private boolean success;
public Result(String errors, String output, boolean success) {
this.errors = errors;
this.output = output;
this.success = success;
}
public boolean isFailed() {
return !success;
}
}
private static class ParallelReader extends Thread {
private InputStream stream;
private StringBuilder content = new StringBuilder();
ParallelReader(InputStream stream) {
this.stream = stream;
}
public String getContent() {
return getContent(true);
}
public synchronized String getContent(boolean waitForComplete) {
if (waitForComplete) {
while(stream != null) {
try {
wait();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
return content.toString();
}
public void run() {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) {
String line;
while ((line = reader.readLine()) != null) {
content.append(line).append("\n");
}
} catch (IOException ioe) {
ioe.printStackTrace();
} finally {
synchronized (this) {
stream = null;
notifyAll();
}
}
}
}
public static Result compile(byte[] source, boolean combinedJson, Option... options) throws IOException {
return getInstance().compileSrc(source, false, combinedJson, options);
}
public Result compileSrc(File source, boolean optimize, boolean combinedJson, Option... options) throws IOException {
List<String> commandParts = prepareCommandOptions(optimize, combinedJson, options);
commandParts.add(source.getAbsolutePath());
ProcessBuilder processBuilder = new ProcessBuilder(commandParts)
.directory(solc.getExecutable().getParentFile());
processBuilder.environment().put("LD_LIBRARY_PATH",
solc.getExecutable().getParentFile().getCanonicalPath());
Process process = processBuilder.start();
ParallelReader error = new ParallelReader(process.getErrorStream());
ParallelReader output = new ParallelReader(process.getInputStream());
error.start();
output.start();
try {
process.waitFor();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
boolean success = process.exitValue() == 0;
return new Result(error.getContent(), output.getContent(), success);
}
private List<String> prepareCommandOptions(boolean optimize, boolean combinedJson, Option... options) throws IOException {
List<String> commandParts = new ArrayList<>();
commandParts.add(solc.getExecutable().getCanonicalPath());
if (optimize) {
commandParts.add("--" + Options.OPTIMIZE.getName());
}
if (combinedJson) {
Option combinedJsonOption = new Options.CombinedJson(getElementsOf(OutputOption.class, options));
commandParts.add("--" + combinedJsonOption.getName());
commandParts.add(combinedJsonOption.getValue());
} else {
for (Option option : getElementsOf(OutputOption.class, options)) {
commandParts.add("--" + option.getName());
}
}
for (Option option : getElementsOf(ListOption.class, options)) {
commandParts.add("--" + option.getName());
commandParts.add(option.getValue());
}
for (Option option : getElementsOf(CustomOption.class, options)) {
commandParts.add("--" + option.getName());
if (option.getValue() != null) {
commandParts.add(option.getValue());
}
}
return commandParts;
}
private static <T> List<T> getElementsOf(Class<T> clazz, Option... options) {
return Arrays.stream(options).filter(clazz::isInstance).map(clazz::cast).collect(toList());
}
public Result compileSrc(byte[] source, boolean optimize, boolean combinedJson, Option... options) throws IOException {
List<String> commandParts = prepareCommandOptions(optimize, combinedJson, options);
//new in solidity 0.5.0: using stdin requires an explicit "-". The following output
//of 'solc' if no file is provided, e.g.,: solc --combined-json abi,bin,interface,metadata
//
// No input files given. If you wish to use the standard input please specify "-" explicitly.
//
// For older solc version "-" is not an issue as it is accepet as well
commandParts.add("-");
ProcessBuilder processBuilder = new ProcessBuilder(commandParts)
.directory(solc.getExecutable().getParentFile());
processBuilder.environment().put("LD_LIBRARY_PATH",
solc.getExecutable().getParentFile().getCanonicalPath());
Process process = processBuilder.start();
try (BufferedOutputStream stream = new BufferedOutputStream(process.getOutputStream())) {
stream.write(source);
}
ParallelReader error = new ParallelReader(process.getErrorStream());
ParallelReader output = new ParallelReader(process.getInputStream());
error.start();
output.start();
try {
process.waitFor();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
boolean success = process.exitValue() == 0;
return new Result(error.getContent(), output.getContent(), success);
}
public static String runGetVersionOutput() throws IOException {
List<String> commandParts = new ArrayList<>();
commandParts.add(getInstance().solc.getExecutable().getCanonicalPath());
commandParts.add("--" + Options.VERSION.getName());
ProcessBuilder processBuilder = new ProcessBuilder(commandParts)
.directory(getInstance().solc.getExecutable().getParentFile());
processBuilder.environment().put("LD_LIBRARY_PATH",
getInstance().solc.getExecutable().getParentFile().getCanonicalPath());
Process process = processBuilder.start();
ParallelReader error = new ParallelReader(process.getErrorStream());
ParallelReader output = new ParallelReader(process.getInputStream());
error.start();
output.start();
try {
process.waitFor();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
if (process.exitValue() == 0) {
return output.getContent();
}
throw new RuntimeException("Problem getting solc version: " + error.getContent());
}
public static SolidityCompiler getInstance() {
if (INSTANCE == null) {
INSTANCE = new SolidityCompiler(SystemProperties.getDefault());
}
return INSTANCE;
}
}
| 13,563
| 34.883598
| 154
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/trie/SecureTrie.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.trie;
import org.ethereum.datasource.Source;
import org.ethereum.util.Value;
import static org.ethereum.crypto.HashUtil.sha3;
import static org.ethereum.util.ByteUtil.EMPTY_BYTE_ARRAY;
public class SecureTrie extends TrieImpl {
public SecureTrie(byte[] root) {
super(root);
}
public SecureTrie(Source<byte[], byte[]> cache) {
super(cache, null);
}
public SecureTrie(Source<byte[], byte[]> cache, byte[] root) {
super(cache, root);
}
@Override
public byte[] get(byte[] key) {
return super.get(sha3(key));
}
@Override
public void put(byte[] key, byte[] value) {
super.put(sha3(key), value);
}
@Override
public void delete(byte[] key) {
put(key, EMPTY_BYTE_ARRAY);
}
}
| 1,594
| 28
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/trie/Node.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.trie;
import org.ethereum.util.Value;
/**
* A Node in a Merkle Patricia Tree is one of the following:
*
* - NULL (represented as the empty string)
* - A two-item array [ key, value ] (1 key for 2-item array)
* - A 17-item array [ v0 ... v15, vt ] (16 keys for 17-item array)
*
* The idea is that in the event that there is a long path of nodes
* each with only one element, we shortcut the descent by setting up
* a [ key, value ] node, where the key gives the hexadecimal path
* to descend, in the compact encoding described above, and the value
* is just the hash of the node like in the standard radix tree.
*
* R
* / \
* / \
* N N
* / \ / \
* L L L L
*
* Also, we add another conceptual change: internal nodes can no longer
* have values, only leaves with no children of their own can; however,
* since to be fully generic we want the key/value store to be able
* store keys like 'dog' and 'doge' at the same time, we simply add
* a terminator symbol (16) to the alphabet so there is never a value
* "en-route" to another value.
*
* Where a node is referenced inside a node, what is included is:
*
* H(rlp.encode(x)) where H(x) = keccak(x) if len(x) >= 32 else x
*
* Note that when updating a trie, you will need to store the key/value pair (keccak(x), x)
* in a persistent lookup table when you create a node with length >= 32,
* but if the node is shorter than that then you do not need to store anything
* when length < 32 for the obvious reason that the function f(x) = x is reversible.
*
* @author Nick Savers
* @since 20.05.2014
*/
public class Node {
/* RLP encoded value of the Trie-node */
private final Value value;
private boolean dirty;
public Node(Value val) {
this(val, false);
}
public Node(Value val, boolean dirty) {
this.value = val;
this.dirty = dirty;
}
public Node copy() {
return new Node(this.value, this.dirty);
}
public boolean isDirty() {
return dirty;
}
public void setDirty(boolean dirty) {
this.dirty = dirty;
}
public Value getValue() {
return value;
}
@Override
public String toString() {
return "[" + dirty + ", " + value + "]";
}
}
| 3,256
| 32.234694
| 91
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/trie/TrieImpl.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.trie;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.lang3.text.StrBuilder;
import org.ethereum.crypto.HashUtil;
import org.ethereum.datasource.Source;
import org.ethereum.datasource.inmem.HashMapDB;
import org.ethereum.datasource.inmem.HashMapDBSimple;
import org.ethereum.net.swarm.Key;
import org.ethereum.util.FastByteComparisons;
import org.ethereum.util.RLP;
import org.ethereum.util.Value;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.spongycastle.util.encoders.Hex;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.*;
import static org.apache.commons.lang3.concurrent.ConcurrentUtils.constantFuture;
import static org.ethereum.crypto.HashUtil.EMPTY_TRIE_HASH;
import static org.ethereum.util.ByteUtil.EMPTY_BYTE_ARRAY;
import static org.ethereum.util.RLP.EMPTY_ELEMENT_RLP;
import static org.ethereum.util.RLP.encodeElement;
import static org.ethereum.util.RLP.encodeList;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* Created by Anton Nashatyrev on 07.02.2017.
*/
public class TrieImpl implements Trie<byte[]> {
private final static Object NULL_NODE = new Object();
private final static int MIN_BRANCHES_CONCURRENTLY = 3;
private static ExecutorService executor;
private static final Logger logger = LoggerFactory.getLogger("state");
public static ExecutorService getExecutor() {
if (executor == null) {
executor = Executors.newFixedThreadPool(4,
new ThreadFactoryBuilder().setNameFormat("trie-calc-thread-%d").build());
}
return executor;
}
public enum NodeType {
BranchNode,
KVNodeValue,
KVNodeNode
}
public final class Node {
private byte[] hash = null;
private byte[] rlp = null;
private RLP.LList parsedRlp = null;
private boolean dirty = false;
private Object[] children = null;
// new empty BranchNode
public Node() {
children = new Object[17];
dirty = true;
}
// new KVNode with key and (value or node)
public Node(TrieKey key, Object valueOrNode) {
this(new Object[]{key, valueOrNode});
dirty = true;
}
// new Node with hash or RLP
public Node(byte[] hashOrRlp) {
if (hashOrRlp.length == 32) {
this.hash = hashOrRlp;
} else {
this.rlp = hashOrRlp;
}
}
private Node(RLP.LList parsedRlp) {
this.parsedRlp = parsedRlp;
this.rlp = parsedRlp.getEncoded();
}
private Node(Object[] children) {
this.children = children;
}
public boolean resolveCheck() {
if (rlp != null || parsedRlp != null || hash == null) return true;
rlp = getHash(hash);
return rlp != null;
}
private void resolve() {
if (!resolveCheck()) {
logger.error("Invalid Trie state, can't resolve hash " + toHexString(hash));
throw new RuntimeException("Invalid Trie state, can't resolve hash " + toHexString(hash));
}
}
public byte[] encode() {
return encode(1, true);
}
private byte[] encode(final int depth, boolean forceHash) {
if (!dirty) {
return hash != null ? encodeElement(hash) : rlp;
} else {
NodeType type = getType();
byte[] ret;
if (type == NodeType.BranchNode) {
if (depth == 1 && async) {
// parallelize encode() on the first trie level only and if there are at least
// MIN_BRANCHES_CONCURRENTLY branches are modified
final Object[] encoded = new Object[17];
int encodeCnt = 0;
for (int i = 0; i < 16; i++) {
final Node child = branchNodeGetChild(i);
if (child == null) {
encoded[i] = EMPTY_ELEMENT_RLP;
} else if (!child.dirty) {
encoded[i] = child.encode(depth + 1, false);
} else {
encodeCnt++;
}
}
for (int i = 0; i < 16; i++) {
if (encoded[i] == null) {
final Node child = branchNodeGetChild(i);
if (encodeCnt >= MIN_BRANCHES_CONCURRENTLY) {
encoded[i] = getExecutor().submit(() -> child.encode(depth + 1, false));
} else {
encoded[i] = child.encode(depth + 1, false);
}
}
}
byte[] value = branchNodeGetValue();
encoded[16] = constantFuture(encodeElement(value));
try {
ret = encodeRlpListFutures(encoded);
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
byte[][] encoded = new byte[17][];
for (int i = 0; i < 16; i++) {
Node child = branchNodeGetChild(i);
encoded[i] = child == null ? EMPTY_ELEMENT_RLP : child.encode(depth + 1, false);
}
byte[] value = branchNodeGetValue();
encoded[16] = encodeElement(value);
ret = encodeList(encoded);
}
} else if (type == NodeType.KVNodeNode) {
ret = encodeList(encodeElement(kvNodeGetKey().toPacked()), kvNodeGetChildNode().encode(depth + 1, false));
} else {
byte[] value = kvNodeGetValue();
ret = encodeList(encodeElement(kvNodeGetKey().toPacked()),
encodeElement(value == null ? EMPTY_BYTE_ARRAY : value));
}
if (hash != null) {
deleteHash(hash);
}
dirty = false;
if (ret.length < 32 && !forceHash) {
rlp = ret;
return ret;
} else {
hash = HashUtil.sha3(ret);
addHash(hash, ret);
return encodeElement(hash);
}
}
}
@SafeVarargs
private final byte[] encodeRlpListFutures(Object... list) throws ExecutionException, InterruptedException {
byte[][] vals = new byte[list.length][];
for (int i = 0; i < list.length; i++) {
if (list[i] instanceof Future) {
vals[i] = ((Future<byte[]>) list[i]).get();
} else {
vals[i] = (byte[]) list[i];
}
}
return encodeList(vals);
}
private void parse() {
if (children != null) return;
resolve();
RLP.LList list = parsedRlp == null ? RLP.decodeLazyList(rlp) : parsedRlp;
if (list.size() == 2) {
children = new Object[2];
TrieKey key = TrieKey.fromPacked(list.getBytes(0));
children[0] = key;
if (key.isTerminal()) {
children[1] = list.getBytes(1);
} else {
children[1] = list.isList(1) ? new Node(list.getList(1)) : new Node(list.getBytes(1));
}
} else {
children = new Object[17];
parsedRlp = list;
}
}
public Node branchNodeGetChild(int hex) {
parse();
assert getType() == NodeType.BranchNode;
Object n = children[hex];
if (n == null && parsedRlp != null) {
if (parsedRlp.isList(hex)) {
n = new Node(parsedRlp.getList(hex));
} else {
byte[] bytes = parsedRlp.getBytes(hex);
if (bytes.length == 0) {
n = NULL_NODE;
} else {
n = new Node(bytes);
}
}
children[hex] = n;
}
return n == NULL_NODE ? null : (Node) n;
}
public Node branchNodeSetChild(int hex, Node node) {
parse();
assert getType() == NodeType.BranchNode;
children[hex] = node == null ? NULL_NODE : node;
dirty = true;
return this;
}
public byte[] branchNodeGetValue() {
parse();
assert getType() == NodeType.BranchNode;
Object n = children[16];
if (n == null && parsedRlp != null) {
byte[] bytes = parsedRlp.getBytes(16);
if (bytes.length == 0) {
n = NULL_NODE;
} else {
n = bytes;
}
children[16] = n;
}
return n == NULL_NODE ? null : (byte[]) n;
}
public Node branchNodeSetValue(byte[] val) {
parse();
assert getType() == NodeType.BranchNode;
children[16] = val == null ? NULL_NODE : val;
dirty = true;
return this;
}
public int branchNodeCompactIdx() {
parse();
assert getType() == NodeType.BranchNode;
int cnt = 0;
int idx = -1;
for (int i = 0; i < 16; i++) {
if (branchNodeGetChild(i) != null) {
cnt++;
idx = i;
if (cnt > 1) return -1;
}
}
return cnt > 0 ? idx : (branchNodeGetValue() == null ? -1 : 16);
}
public boolean branchNodeCanCompact() {
parse();
assert getType() == NodeType.BranchNode;
int cnt = 0;
for (int i = 0; i < 16; i++) {
cnt += branchNodeGetChild(i) == null ? 0 : 1;
if (cnt > 1) return false;
}
return cnt == 0 || branchNodeGetValue() == null;
}
public TrieKey kvNodeGetKey() {
parse();
assert getType() != NodeType.BranchNode;
return (TrieKey) children[0];
}
public Node kvNodeGetChildNode() {
parse();
assert getType() == NodeType.KVNodeNode;
return (Node) children[1];
}
public byte[] kvNodeGetValue() {
parse();
assert getType() == NodeType.KVNodeValue;
return (byte[]) children[1];
}
public Node kvNodeSetValue(byte[] value) {
parse();
assert getType() == NodeType.KVNodeValue;
children[1] = value;
dirty = true;
return this;
}
public Object kvNodeGetValueOrNode() {
parse();
assert getType() != NodeType.BranchNode;
return children[1];
}
public Node kvNodeSetValueOrNode(Object valueOrNode) {
parse();
assert getType() != NodeType.BranchNode;
children[1] = valueOrNode;
dirty = true;
return this;
}
public NodeType getType() {
parse();
return children.length == 17 ? NodeType.BranchNode :
(children[1] instanceof Node ? NodeType.KVNodeNode : NodeType.KVNodeValue);
}
public void dispose() {
if (hash != null) {
deleteHash(hash);
}
}
public Node invalidate() {
dirty = true;
return this;
}
/*********** Dump methods ************/
public String dumpStruct(String indent, String prefix) {
String ret = indent + prefix + getType() + (dirty ? " *" : "") +
(hash == null ? "" : "(hash: " + Hex.toHexString(hash).substring(0, 6) + ")");
if (getType() == NodeType.BranchNode) {
byte[] value = branchNodeGetValue();
ret += (value == null ? "" : " [T] = " + Hex.toHexString(value)) + "\n";
for (int i = 0; i < 16; i++) {
Node child = branchNodeGetChild(i);
if (child != null) {
ret += child.dumpStruct(indent + " ", "[" + i + "] ");
}
}
} else if (getType() == NodeType.KVNodeNode) {
ret += " [" + kvNodeGetKey() + "]\n";
ret += kvNodeGetChildNode().dumpStruct(indent + " ", "");
} else {
ret += " [" + kvNodeGetKey() + "] = " + Hex.toHexString(kvNodeGetValue()) + "\n";
}
return ret;
}
public List<String> dumpTrieNode(boolean compact) {
List<String> ret = new ArrayList<>();
if (hash != null) {
ret.add(hash2str(hash, compact) + " ==> " + dumpContent(false, compact));
}
if (getType() == NodeType.BranchNode) {
for (int i = 0; i < 16; i++) {
Node child = branchNodeGetChild(i);
if (child != null) ret.addAll(child.dumpTrieNode(compact));
}
} else if (getType() == NodeType.KVNodeNode) {
ret.addAll(kvNodeGetChildNode().dumpTrieNode(compact));
}
return ret;
}
private String dumpContent(boolean recursion, boolean compact) {
if (recursion && hash != null) return hash2str(hash, compact);
String ret;
if (getType() == NodeType.BranchNode) {
ret = "[";
for (int i = 0; i < 16; i++) {
Node child = branchNodeGetChild(i);
ret += i == 0 ? "" : ",";
ret += child == null ? "" : child.dumpContent(true, compact);
}
byte[] value = branchNodeGetValue();
ret += value == null ? "" : ", " + val2str(value, compact);
ret += "]";
} else if (getType() == NodeType.KVNodeNode) {
ret = "[<" + kvNodeGetKey() + ">, " + kvNodeGetChildNode().dumpContent(true, compact) + "]";
} else {
ret = "[<" + kvNodeGetKey() + ">, " + val2str(kvNodeGetValue(), compact) + "]";
}
return ret;
}
@Override
public String toString() {
return getType() + (dirty ? " *" : "") + (hash == null ? "" : "(hash: " + toHexString(hash) + " )");
}
}
public interface ScanAction {
void doOnNode(byte[] hash, Node node);
void doOnValue(byte[] nodeHash, Node node, byte[] key, byte[] value);
}
private Source<byte[], byte[]> cache;
private Node root;
private boolean async = true;
public TrieImpl() {
this((byte[]) null);
}
public TrieImpl(byte[] root) {
this(new HashMapDB<byte[]>(), root);
}
public TrieImpl(Source<byte[], byte[]> cache) {
this(cache, null);
}
public TrieImpl(Source<byte[], byte[]> cache, byte[] root) {
this.cache = cache;
setRoot(root);
}
public void setAsync(boolean async) {
this.async = async;
}
private void encode() {
if (root != null) {
root.encode();
}
}
public void setRoot(byte[] root) {
if (root != null && !FastByteComparisons.equal(root, EMPTY_TRIE_HASH)) {
this.root = new Node(root);
} else {
this.root = null;
}
}
private boolean hasRoot() {
return root != null && root.resolveCheck();
}
public Source<byte[], byte[]> getCache() {
return cache;
}
private byte[] getHash(byte[] hash) {
return cache.get(hash);
}
private void addHash(byte[] hash, byte[] ret) {
cache.put(hash, ret);
}
private void deleteHash(byte[] hash) {
cache.delete(hash);
}
public byte[] get(byte[] key) {
if (!hasRoot()) return null; // treating unknown root hash as empty trie
TrieKey k = TrieKey.fromNormal(key);
return get(root, k);
}
private byte[] get(Node n, TrieKey k) {
if (n == null) return null;
NodeType type = n.getType();
if (type == NodeType.BranchNode) {
if (k.isEmpty()) return n.branchNodeGetValue();
Node childNode = n.branchNodeGetChild(k.getHex(0));
return get(childNode, k.shift(1));
} else {
TrieKey k1 = k.matchAndShift(n.kvNodeGetKey());
if (k1 == null) return null;
if (type == NodeType.KVNodeValue) {
return k1.isEmpty() ? n.kvNodeGetValue() : null;
} else {
return get(n.kvNodeGetChildNode(), k1);
}
}
}
public void put(byte[] key, byte[] value) {
TrieKey k = TrieKey.fromNormal(key);
if (root == null) {
if (value != null && value.length > 0) {
root = new Node(k, value);
}
} else {
if (value == null || value.length == 0) {
root = delete(root, k);
} else {
root = insert(root, k, value);
}
}
}
private Node insert(Node n, TrieKey k, Object nodeOrValue) {
NodeType type = n.getType();
if (type == NodeType.BranchNode) {
if (k.isEmpty()) return n.branchNodeSetValue((byte[]) nodeOrValue);
Node childNode = n.branchNodeGetChild(k.getHex(0));
if (childNode != null) {
return n.branchNodeSetChild(k.getHex(0), insert(childNode, k.shift(1), nodeOrValue));
} else {
TrieKey childKey = k.shift(1);
Node newChildNode;
if (!childKey.isEmpty()) {
newChildNode = new Node(childKey, nodeOrValue);
} else {
newChildNode = nodeOrValue instanceof Node ?
(Node) nodeOrValue : new Node(childKey, nodeOrValue);
}
return n.branchNodeSetChild(k.getHex(0), newChildNode);
}
} else {
TrieKey currentNodeKey = n.kvNodeGetKey();
TrieKey commonPrefix = k.getCommonPrefix(currentNodeKey);
if (commonPrefix.isEmpty()) {
Node newBranchNode = new Node();
insert(newBranchNode, currentNodeKey, n.kvNodeGetValueOrNode());
insert(newBranchNode, k, nodeOrValue);
n.dispose();
return newBranchNode;
} else if (commonPrefix.equals(k)) {
return n.kvNodeSetValueOrNode(nodeOrValue);
} else if (commonPrefix.equals(currentNodeKey)) {
insert(n.kvNodeGetChildNode(), k.shift(commonPrefix.getLength()), nodeOrValue);
return n.invalidate();
} else {
Node newBranchNode = new Node();
Node newKvNode = new Node(commonPrefix, newBranchNode);
// TODO can be optimized
insert(newKvNode, currentNodeKey, n.kvNodeGetValueOrNode());
insert(newKvNode, k, nodeOrValue);
n.dispose();
return newKvNode;
}
}
}
@Override
public void delete(byte[] key) {
TrieKey k = TrieKey.fromNormal(key);
if (root != null) {
root = delete(root, k);
}
}
private Node delete(Node n, TrieKey k) {
NodeType type = n.getType();
Node newKvNode;
if (type == NodeType.BranchNode) {
if (k.isEmpty()) {
n.branchNodeSetValue(null);
} else {
int idx = k.getHex(0);
Node child = n.branchNodeGetChild(idx);
if (child == null) return n; // no key found
Node newNode = delete(child, k.shift(1));
n.branchNodeSetChild(idx, newNode);
if (newNode != null) return n; // newNode != null thus number of children didn't decrease
}
// child node or value was deleted and the branch node may need to be compacted
int compactIdx = n.branchNodeCompactIdx();
if (compactIdx < 0) return n; // no compaction is required
// only value or a single child left - compact branch node to kvNode
n.dispose();
if (compactIdx == 16) { // only value left
return new Node(TrieKey.empty(true), n.branchNodeGetValue());
} else { // only single child left
newKvNode = new Node(TrieKey.singleHex(compactIdx), n.branchNodeGetChild(compactIdx));
}
} else { // n - kvNode
TrieKey k1 = k.matchAndShift(n.kvNodeGetKey());
if (k1 == null) {
// no key found
return n;
} else if (type == NodeType.KVNodeValue) {
if (k1.isEmpty()) {
// delete this kvNode
n.dispose();
return null;
} else {
// else no key found
return n;
}
} else {
Node newChild = delete(n.kvNodeGetChildNode(), k1);
if (newChild == null) throw new RuntimeException("Shouldn't happen");
newKvNode = n.kvNodeSetValueOrNode(newChild);
}
}
// if we get here a new kvNode was created, now need to check
// if it should be compacted with child kvNode
Node newChild = newKvNode.kvNodeGetChildNode();
if (newChild.getType() != NodeType.BranchNode) {
// two kvNodes should be compacted into a single one
TrieKey newKey = newKvNode.kvNodeGetKey().concat(newChild.kvNodeGetKey());
Node newNode = new Node(newKey, newChild.kvNodeGetValueOrNode());
newChild.dispose();
newKvNode.dispose();
return newNode;
} else {
// no compaction needed
return newKvNode;
}
}
@Override
public byte[] getRootHash() {
encode();
return root != null ? root.hash : EMPTY_TRIE_HASH;
}
@Override
public void clear() {
throw new RuntimeException("Not implemented yet");
}
@Override
public boolean flush() {
if (root != null && root.dirty) {
// persist all dirty nodes to underlying Source
encode();
// release all Trie Node instances for GC
root = new Node(root.hash);
return true;
} else {
return false;
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TrieImpl trieImpl1 = (TrieImpl) o;
return FastByteComparisons.equal(getRootHash(), trieImpl1.getRootHash());
}
public String dumpStructure() {
return root == null ? "<empty>" : root.dumpStruct("", "");
}
public String dumpTrie() {
return dumpTrie(true);
}
public String dumpTrie(boolean compact) {
if (root == null) return "<empty>";
encode();
StrBuilder ret = new StrBuilder();
List<String> strings = root.dumpTrieNode(compact);
ret.append("Root: " + hash2str(getRootHash(), compact) + "\n");
for (String s : strings) {
ret.append(s).append('\n');
}
return ret.toString();
}
public void scanTree(ScanAction scanAction) {
scanTree(root, TrieKey.empty(false), scanAction);
}
public void scanTree(Node node, TrieKey k, ScanAction scanAction) {
if (node == null) return;
if (node.hash != null) {
scanAction.doOnNode(node.hash, node);
}
if (node.getType() == NodeType.BranchNode) {
if (node.branchNodeGetValue() != null)
scanAction.doOnValue(node.hash, node, k.toNormal(), node.branchNodeGetValue());
for (int i = 0; i < 16; i++) {
scanTree(node.branchNodeGetChild(i), k.concat(TrieKey.singleHex(i)), scanAction);
}
} else if (node.getType() == NodeType.KVNodeNode) {
scanTree(node.kvNodeGetChildNode(), k.concat(node.kvNodeGetKey()), scanAction);
} else {
scanAction.doOnValue(node.hash, node, k.concat(node.kvNodeGetKey()).toNormal(), node.kvNodeGetValue());
}
}
private static String hash2str(byte[] hash, boolean shortHash) {
String ret = Hex.toHexString(hash);
return "0x" + (shortHash ? ret.substring(0,8) : ret);
}
private static String val2str(byte[] val, boolean shortHash) {
String ret = Hex.toHexString(val);
if (val.length > 16) {
ret = ret.substring(0,10) + "... len " + val.length;
}
return "\"" + ret + "\"";
}
}
| 26,580
| 34.871795
| 126
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/trie/CountAllNodes.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.trie;
import org.ethereum.util.Value;
/**
* @author Roman Mandeleil
* @since 29.08.2014
*/
public class CountAllNodes implements TrieImpl.ScanAction {
int counted = 0;
@Override
public void doOnNode(byte[] hash, TrieImpl.Node node) {
++counted;
}
@Override
public void doOnValue(byte[] nodeHash, TrieImpl.Node node, byte[] key, byte[] value) {}
public int getCounted() {
return counted;
}
}
| 1,259
| 29
| 91
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/trie/TrieKey.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.trie;
import static org.ethereum.util.ByteUtil.EMPTY_BYTE_ARRAY;
import static org.ethereum.util.ByteUtil.toHexString;
/**
* Created by Anton Nashatyrev on 13.02.2017.
*/
public final class TrieKey {
public static final int ODD_OFFSET_FLAG = 0x1;
public static final int TERMINATOR_FLAG = 0x2;
private final byte[] key;
private final int off;
private final boolean terminal;
public static TrieKey fromNormal(byte[] key) {
return new TrieKey(key);
}
public static TrieKey fromPacked(byte[] key) {
return new TrieKey(key, ((key[0] >> 4) & ODD_OFFSET_FLAG) != 0 ? 1 : 2, ((key[0] >> 4) & TERMINATOR_FLAG) != 0);
}
public static TrieKey empty(boolean terminal) {
return new TrieKey(EMPTY_BYTE_ARRAY, 0, terminal);
}
public static TrieKey singleHex(int hex) {
TrieKey ret = new TrieKey(new byte[1], 1, false);
ret.setHex(0, hex);
return ret;
}
public TrieKey(byte[] key, int off, boolean terminal) {
this.terminal = terminal;
this.off = off;
this.key = key;
}
private TrieKey(byte[] key) {
this(key, 0, true);
}
public byte[] toPacked() {
int flags = ((off & 1) != 0 ? ODD_OFFSET_FLAG : 0) | (terminal ? TERMINATOR_FLAG : 0);
byte[] ret = new byte[getLength() / 2 + 1];
int toCopy = (flags & ODD_OFFSET_FLAG) != 0 ? ret.length : ret.length - 1;
System.arraycopy(key, key.length - toCopy, ret, ret.length - toCopy, toCopy);
ret[0] &= 0x0F;
ret[0] |= flags << 4;
return ret;
}
public byte[] toNormal() {
if ((off & 1) != 0) throw new RuntimeException("Can't convert a key with odd number of hexes to normal: " + this);
int arrLen = key.length - off / 2;
byte[] ret = new byte[arrLen];
System.arraycopy(key, key.length - arrLen, ret, 0, arrLen);
return ret;
}
public boolean isTerminal() {
return terminal;
}
public boolean isEmpty() {
return getLength() == 0;
}
public TrieKey shift(int hexCnt) {
return new TrieKey(this.key, off + hexCnt, terminal);
}
public TrieKey getCommonPrefix(TrieKey k) {
// TODO can be optimized
int prefixLen = 0;
int thisLength = getLength();
int kLength = k.getLength();
while (prefixLen < thisLength && prefixLen < kLength && getHex(prefixLen) == k.getHex(prefixLen))
prefixLen++;
byte[] prefixKey = new byte[(prefixLen + 1) >> 1];
TrieKey ret = new TrieKey(prefixKey, (prefixLen & 1) == 0 ? 0 : 1,
prefixLen == getLength() && prefixLen == k.getLength() && terminal && k.isTerminal());
for (int i = 0; i < prefixLen; i++) {
ret.setHex(i, k.getHex(i));
}
return ret;
}
public TrieKey matchAndShift(TrieKey k) {
int len = getLength();
int kLen = k.getLength();
if (len < kLen) return null;
if ((off & 1) == (k.off & 1)) {
// optimization to compare whole keys bytes
if ((off & 1) == 1) {
if (getHex(0) != k.getHex(0)) return null;
}
int idx1 = (off + 1) >> 1;
int idx2 = (k.off + 1) >> 1;
int l = kLen >> 1;
for (int i = 0; i < l; i++, idx1++, idx2++) {
if (key[idx1] != k.key[idx2]) return null;
}
} else {
for (int i = 0; i < kLen; i++) {
if (getHex(i) != k.getHex(i)) return null;
}
}
return shift(kLen);
}
public int getLength() {
return (key.length << 1) - off;
}
private void setHex(int idx, int hex) {
int byteIdx = (off + idx) >> 1;
if (((off + idx) & 1) == 0) {
key[byteIdx] &= 0x0F;
key[byteIdx] |= hex << 4;
} else {
key[byteIdx] &= 0xF0;
key[byteIdx] |= hex;
}
}
public int getHex(int idx) {
byte b = key[(off + idx) >> 1];
return (((off + idx) & 1) == 0 ? (b >> 4) : b) & 0xF;
}
public TrieKey concat(TrieKey k) {
if (isTerminal()) throw new RuntimeException("Can' append to terminal key: " + this + " + " + k);
int len = getLength();
int kLen = k.getLength();
int newLen = len + kLen;
byte[] newKeyBytes = new byte[(newLen + 1) >> 1];
TrieKey ret = new TrieKey(newKeyBytes, newLen & 1, k.isTerminal());
for (int i = 0; i < len; i++) {
ret.setHex(i, getHex(i));
}
for (int i = 0; i < kLen; i++) {
ret.setHex(len + i, k.getHex(i));
}
return ret;
}
@Override
public boolean equals(Object obj) {
TrieKey k = (TrieKey) obj;
int len = getLength();
if (len != k.getLength()) return false;
// TODO can be optimized
for (int i = 0; i < len; i++) {
if (getHex(i) != k.getHex(i)) return false;
}
return isTerminal() == k.isTerminal();
}
@Override
public String toString() {
return toHexString(key).substring(off) + (isTerminal() ? "T" : "");
}
}
| 6,022
| 31.556757
| 122
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/trie/Trie.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.trie;
import org.ethereum.datasource.Source;
/**
* Created by Anton Nashatyrev on 05.10.2016.
*/
public interface Trie<V> extends Source<byte[], V> {
byte[] getRootHash();
void setRoot(byte[] root);
/**
* Recursively delete all nodes from root
*/
void clear();
}
| 1,107
| 29.777778
| 80
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/trie/CollectFullSetOfNodes.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.trie;
import org.ethereum.db.ByteArrayWrapper;
import org.ethereum.util.Value;
import java.util.HashSet;
import java.util.Set;
/**
* @author Roman Mandeleil
* @since 29.08.2014
*/
public class CollectFullSetOfNodes implements TrieImpl.ScanAction {
Set<ByteArrayWrapper> nodes = new HashSet<>();
@Override
public void doOnNode(byte[] hash, TrieImpl.Node node) {
nodes.add(new ByteArrayWrapper(hash));
}
@Override
public void doOnValue(byte[] nodeHash, TrieImpl.Node node, byte[] key, byte[] value) {}
public Set<ByteArrayWrapper> getCollectedHashes() {
return nodes;
}
}
| 1,438
| 30.977778
| 91
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/trie/TraceAllNodes.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.trie;
import org.ethereum.util.Value;
import org.spongycastle.util.encoders.Hex;
/**
* @author Roman Mandeleil
* @since 29.08.2014
*/
public class TraceAllNodes implements TrieImpl.ScanAction {
StringBuilder output = new StringBuilder();
@Override
public void doOnNode(byte[] hash, TrieImpl.Node node) {
output.append(Hex.toHexString(hash)).append(" ==> ").append(node.toString()).append("\n");
}
@Override
public void doOnValue(byte[] nodeHash, TrieImpl.Node node, byte[] key, byte[] value) {}
public String getOutput() {
return output.toString();
}
}
| 1,423
| 30.644444
| 98
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/vm/MessageCall.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.vm;
/**
* A wrapper for a message call from a contract to another account.
* This can either be a normal CALL, CALLCODE, DELEGATECALL or POST call.
*/
public class MessageCall {
/**
* Type of internal call. Either CALL, CALLCODE or POST
*/
private final OpCode type;
/**
* gas to pay for the call, remaining gas will be refunded to the caller
*/
private final DataWord gas;
/**
* address of account which code to call
*/
private final DataWord codeAddress;
/**
* the value that can be transfer along with the code execution
*/
private final DataWord endowment;
/**
* start of memory to be input data to the call
*/
private final DataWord inDataOffs;
/**
* size of memory to be input data to the call
*/
private final DataWord inDataSize;
/**
* start of memory to be output of the call
*/
private DataWord outDataOffs;
/**
* size of memory to be output data to the call
*/
private DataWord outDataSize;
public MessageCall(OpCode type, DataWord gas, DataWord codeAddress,
DataWord endowment, DataWord inDataOffs, DataWord inDataSize) {
this.type = type;
this.gas = gas;
this.codeAddress = codeAddress;
this.endowment = endowment;
this.inDataOffs = inDataOffs;
this.inDataSize = inDataSize;
}
public MessageCall(OpCode type, DataWord gas, DataWord codeAddress,
DataWord endowment, DataWord inDataOffs, DataWord inDataSize,
DataWord outDataOffs, DataWord outDataSize) {
this(type, gas, codeAddress, endowment, inDataOffs, inDataSize);
this.outDataOffs = outDataOffs;
this.outDataSize = outDataSize;
}
public OpCode getType() {
return type;
}
public DataWord getGas() {
return gas;
}
public DataWord getCodeAddress() {
return codeAddress;
}
public DataWord getEndowment() {
return endowment;
}
public DataWord getInDataOffs() {
return inDataOffs;
}
public DataWord getInDataSize() {
return inDataSize;
}
public DataWord getOutDataOffs() {
return outDataOffs;
}
public DataWord getOutDataSize() {
return outDataSize;
}
}
| 3,171
| 27.836364
| 86
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/vm/GasCost.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.vm;
/**
* The fundamental network cost unit. Paid for exclusively by Ether, which is converted
* freely to and from Gas as required. Gas does not exist outside of the internal Ethereum
* computation engine; its price is set by the Transaction and miners are free to
* ignore Transactions whose Gas price is too low.
*/
public class GasCost {
/* backwards compatibility, remove eventually */
private final int STEP = 1;
private final int SSTORE = 300;
/* backwards compatibility, remove eventually */
private final int ZEROSTEP = 0;
private final int QUICKSTEP = 2;
private final int FASTESTSTEP = 3;
private final int FASTSTEP = 5;
private final int MIDSTEP = 8;
private final int SLOWSTEP = 10;
private final int EXTSTEP = 20;
private final int GENESISGASLIMIT = 1000000;
private final int MINGASLIMIT = 125000;
private final int BALANCE = 20;
private final int SHA3 = 30;
private final int SHA3_WORD = 6;
private final int SLOAD = 50;
private final int STOP = 0;
private final int SUICIDE = 0;
private final int CLEAR_SSTORE = 5000;
private final int SET_SSTORE = 20000;
private final int RESET_SSTORE = 5000;
private final int REFUND_SSTORE = 15000;
private final int REUSE_SSTORE = 200;
private final int CREATE = 32000;
private final int JUMPDEST = 1;
private final int CREATE_DATA_BYTE = 5;
private final int CALL = 40;
private final int STIPEND_CALL = 2300;
private final int VT_CALL = 9000; //value transfer call
private final int NEW_ACCT_CALL = 25000; //new account call
private final int MEMORY = 3;
private final int SUICIDE_REFUND = 24000;
private final int QUAD_COEFF_DIV = 512;
private final int CREATE_DATA = 200;
private final int TX_NO_ZERO_DATA = 68;
private final int TX_ZERO_DATA = 4;
private final int TRANSACTION = 21000;
private final int TRANSACTION_CREATE_CONTRACT = 53000;
private final int LOG_GAS = 375;
private final int LOG_DATA_GAS = 8;
private final int LOG_TOPIC_GAS = 375;
private final int COPY_GAS = 3;
private final int EXP_GAS = 10;
private final int EXP_BYTE_GAS = 10;
private final int IDENTITY = 15;
private final int IDENTITY_WORD = 3;
private final int RIPEMD160 = 600;
private final int RIPEMD160_WORD = 120;
private final int SHA256 = 60;
private final int SHA256_WORD = 12;
private final int EC_RECOVER = 3000;
private final int EXT_CODE_SIZE = 20;
private final int EXT_CODE_COPY = 20;
private final int EXT_CODE_HASH = 400;
private final int NEW_ACCT_SUICIDE = 0;
public int getSTEP() {
return STEP;
}
public int getSSTORE() {
return SSTORE;
}
public int getZEROSTEP() {
return ZEROSTEP;
}
public int getQUICKSTEP() {
return QUICKSTEP;
}
public int getFASTESTSTEP() {
return FASTESTSTEP;
}
public int getFASTSTEP() {
return FASTSTEP;
}
public int getMIDSTEP() {
return MIDSTEP;
}
public int getSLOWSTEP() {
return SLOWSTEP;
}
public int getEXTSTEP() {
return EXTSTEP;
}
public int getGENESISGASLIMIT() {
return GENESISGASLIMIT;
}
public int getMINGASLIMIT() {
return MINGASLIMIT;
}
public int getBALANCE() {
return BALANCE;
}
public int getSHA3() {
return SHA3;
}
public int getSHA3_WORD() {
return SHA3_WORD;
}
public int getSLOAD() {
return SLOAD;
}
public int getSTOP() {
return STOP;
}
public int getSUICIDE() {
return SUICIDE;
}
public int getCLEAR_SSTORE() {
return CLEAR_SSTORE;
}
public int getSET_SSTORE() {
return SET_SSTORE;
}
public int getRESET_SSTORE() {
return RESET_SSTORE;
}
public int getREFUND_SSTORE() {
return REFUND_SSTORE;
}
public int getREUSE_SSTORE() {
return REUSE_SSTORE;
}
public int getCREATE() {
return CREATE;
}
public int getJUMPDEST() {
return JUMPDEST;
}
public int getCREATE_DATA_BYTE() {
return CREATE_DATA_BYTE;
}
public int getCALL() {
return CALL;
}
public int getSTIPEND_CALL() {
return STIPEND_CALL;
}
public int getVT_CALL() {
return VT_CALL;
}
public int getNEW_ACCT_CALL() {
return NEW_ACCT_CALL;
}
public int getNEW_ACCT_SUICIDE() {
return NEW_ACCT_SUICIDE;
}
public int getMEMORY() {
return MEMORY;
}
public int getSUICIDE_REFUND() {
return SUICIDE_REFUND;
}
public int getQUAD_COEFF_DIV() {
return QUAD_COEFF_DIV;
}
public int getCREATE_DATA() {
return CREATE_DATA;
}
public int getTX_NO_ZERO_DATA() {
return TX_NO_ZERO_DATA;
}
public int getTX_ZERO_DATA() {
return TX_ZERO_DATA;
}
public int getTRANSACTION() {
return TRANSACTION;
}
public int getTRANSACTION_CREATE_CONTRACT() {
return TRANSACTION_CREATE_CONTRACT;
}
public int getLOG_GAS() {
return LOG_GAS;
}
public int getLOG_DATA_GAS() {
return LOG_DATA_GAS;
}
public int getLOG_TOPIC_GAS() {
return LOG_TOPIC_GAS;
}
public int getCOPY_GAS() {
return COPY_GAS;
}
public int getEXP_GAS() {
return EXP_GAS;
}
public int getEXP_BYTE_GAS() {
return EXP_BYTE_GAS;
}
public int getIDENTITY() {
return IDENTITY;
}
public int getIDENTITY_WORD() {
return IDENTITY_WORD;
}
public int getRIPEMD160() {
return RIPEMD160;
}
public int getRIPEMD160_WORD() {
return RIPEMD160_WORD;
}
public int getSHA256() {
return SHA256;
}
public int getSHA256_WORD() {
return SHA256_WORD;
}
public int getEC_RECOVER() {
return EC_RECOVER;
}
public int getEXT_CODE_SIZE() {
return EXT_CODE_SIZE;
}
public int getEXT_CODE_COPY() {
return EXT_CODE_COPY;
}
public int getEXT_CODE_HASH() {
return EXT_CODE_HASH;
}
}
| 7,101
| 22.285246
| 90
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/vm/VMUtils.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.vm;
import org.ethereum.config.SystemProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.zip.Deflater;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.Inflater;
import java.util.zip.InflaterOutputStream;
import static java.lang.String.format;
import static org.apache.commons.codec.binary.Base64.decodeBase64;
import static org.apache.commons.codec.binary.Base64.encodeBase64String;
import static org.springframework.util.StringUtils.isEmpty;
public final class VMUtils {
private static final Logger LOGGER = LoggerFactory.getLogger("VM");
private VMUtils() {
}
public static void closeQuietly(Closeable closeable) {
try {
if (closeable != null) {
closeable.close();
}
} catch (IOException ioe) {
// ignore
}
}
private static File createProgramTraceFile(SystemProperties config, String txHash) {
File result = null;
if (config.vmTrace() && !isEmpty(config.vmTraceDir())) {
File file = new File(new File(config.databaseDir(), config.vmTraceDir()), txHash + ".json");
if (file.exists()) {
if (file.isFile() && file.canWrite()) {
result = file;
}
} else {
try {
file.getParentFile().mkdirs();
file.createNewFile();
result = file;
} catch (IOException e) {
// ignored
}
}
}
return result;
}
private static void writeStringToFile(File file, String data) {
OutputStream out = null;
try {
out = new FileOutputStream(file);
if (data != null) {
out.write(data.getBytes("UTF-8"));
}
} catch (Exception e){
LOGGER.error(format("Cannot write to file '%s': ", file.getAbsolutePath()), e);
} finally {
closeQuietly(out);
}
}
public static void saveProgramTraceFile(SystemProperties config, String txHash, String content) {
File file = createProgramTraceFile(config, txHash);
if (file != null) {
writeStringToFile(file, content);
}
}
private static final int BUF_SIZE = 4096;
private static void write(InputStream in, OutputStream out, int bufSize) throws IOException {
try {
byte[] buf = new byte[bufSize];
for (int count = in.read(buf); count != -1; count = in.read(buf)) {
out.write(buf, 0, count);
}
} finally {
closeQuietly(in);
closeQuietly(out);
}
}
public static byte[] compress(byte[] bytes) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ByteArrayInputStream in = new ByteArrayInputStream(bytes);
DeflaterOutputStream out = new DeflaterOutputStream(baos, new Deflater(), BUF_SIZE);
write(in, out, BUF_SIZE);
return baos.toByteArray();
}
public static byte[] compress(String content) throws IOException {
return compress(content.getBytes("UTF-8"));
}
public static byte[] decompress(byte[] data) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream(data.length);
ByteArrayInputStream in = new ByteArrayInputStream(data);
InflaterOutputStream out = new InflaterOutputStream(baos, new Inflater(), BUF_SIZE);
write(in, out, BUF_SIZE);
return baos.toByteArray();
}
public static String zipAndEncode(String content) {
try {
return encodeBase64String(compress(content));
} catch (Exception e) {
LOGGER.error("Cannot zip or encode: ", e);
return content;
}
}
public static String unzipAndDecode(String content) {
try {
byte[] decoded = decodeBase64(content);
return new String(decompress(decoded), "UTF-8");
} catch (Exception e) {
LOGGER.error("Cannot unzip or decode: ", e);
return content;
}
}
/**
* Returns number of VM words required to hold data of size {@code size}
*/
public static long getSizeInWords(long size) {
return size == 0 ? 0 : (size - 1) / 32 + 1;
}
}
| 5,254
| 31.042683
| 104
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/vm/PrecompiledContracts.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.vm;
import org.apache.commons.lang3.tuple.Pair;
import org.ethereum.config.BlockchainConfig;
import org.ethereum.crypto.ECKey;
import org.ethereum.crypto.HashUtil;
import org.ethereum.crypto.zksnark.*;
import org.ethereum.util.BIUtil;
import java.math.BigInteger;
import static org.ethereum.util.BIUtil.addSafely;
import static org.ethereum.util.BIUtil.isLessThan;
import static org.ethereum.util.BIUtil.isZero;
import static org.ethereum.util.ByteUtil.*;
import static org.ethereum.vm.VMUtils.getSizeInWords;
/**
* @author Roman Mandeleil
* @since 09.01.2015
*/
public class PrecompiledContracts {
private static final ECRecover ecRecover = new ECRecover();
private static final Sha256 sha256 = new Sha256();
private static final Ripempd160 ripempd160 = new Ripempd160();
private static final Identity identity = new Identity();
private static final ModExp modExp = new ModExp();
private static final BN128Addition altBN128Add = new BN128Addition();
private static final BN128Multiplication altBN128Mul = new BN128Multiplication();
private static final BN128Pairing altBN128Pairing = new BN128Pairing();
private static final DataWord ecRecoverAddr = DataWord.of("0000000000000000000000000000000000000000000000000000000000000001");
private static final DataWord sha256Addr = DataWord.of("0000000000000000000000000000000000000000000000000000000000000002");
private static final DataWord ripempd160Addr = DataWord.of("0000000000000000000000000000000000000000000000000000000000000003");
private static final DataWord identityAddr = DataWord.of("0000000000000000000000000000000000000000000000000000000000000004");
private static final DataWord modExpAddr = DataWord.of("0000000000000000000000000000000000000000000000000000000000000005");
private static final DataWord altBN128AddAddr = DataWord.of("0000000000000000000000000000000000000000000000000000000000000006");
private static final DataWord altBN128MulAddr = DataWord.of("0000000000000000000000000000000000000000000000000000000000000007");
private static final DataWord altBN128PairingAddr = DataWord.of("0000000000000000000000000000000000000000000000000000000000000008");
public static PrecompiledContract getContractForAddress(DataWord address, BlockchainConfig config) {
if (address == null) return identity;
if (address.equals(ecRecoverAddr)) return ecRecover;
if (address.equals(sha256Addr)) return sha256;
if (address.equals(ripempd160Addr)) return ripempd160;
if (address.equals(identityAddr)) return identity;
// Byzantium precompiles
if (address.equals(modExpAddr) && config.eip198()) return modExp;
if (address.equals(altBN128AddAddr) && config.eip213()) return altBN128Add;
if (address.equals(altBN128MulAddr) && config.eip213()) return altBN128Mul;
if (address.equals(altBN128PairingAddr) && config.eip212()) return altBN128Pairing;
return null;
}
private static byte[] encodeRes(byte[] w1, byte[] w2) {
byte[] res = new byte[64];
w1 = stripLeadingZeroes(w1);
w2 = stripLeadingZeroes(w2);
System.arraycopy(w1, 0, res, 32 - w1.length, w1.length);
System.arraycopy(w2, 0, res, 64 - w2.length, w2.length);
return res;
}
public static abstract class PrecompiledContract {
public abstract long getGasForData(byte[] data);
public abstract Pair<Boolean, byte[]> execute(byte[] data);
}
public static class Identity extends PrecompiledContract {
public Identity() {
}
@Override
public long getGasForData(byte[] data) {
// gas charge for the execution:
// minimum 1 and additional 1 for each 32 bytes word (round up)
if (data == null) return 15;
return 15 + getSizeInWords(data.length) * 3;
}
@Override
public Pair<Boolean, byte[]> execute(byte[] data) {
return Pair.of(true, data);
}
}
public static class Sha256 extends PrecompiledContract {
@Override
public long getGasForData(byte[] data) {
// gas charge for the execution:
// minimum 50 and additional 50 for each 32 bytes word (round up)
if (data == null) return 60;
return 60 + getSizeInWords(data.length) * 12;
}
@Override
public Pair<Boolean, byte[]> execute(byte[] data) {
if (data == null) return Pair.of(true, HashUtil.sha256(EMPTY_BYTE_ARRAY));
return Pair.of(true, HashUtil.sha256(data));
}
}
public static class Ripempd160 extends PrecompiledContract {
@Override
public long getGasForData(byte[] data) {
// TODO #POC9 Replace magic numbers with constants
// gas charge for the execution:
// minimum 50 and additional 50 for each 32 bytes word (round up)
if (data == null) return 600;
return 600 + getSizeInWords(data.length) * 120;
}
@Override
public Pair<Boolean, byte[]> execute(byte[] data) {
byte[] result = null;
if (data == null) result = HashUtil.ripemd160(EMPTY_BYTE_ARRAY);
else result = HashUtil.ripemd160(data);
return Pair.of(true, DataWord.of(result).getData());
}
}
public static class ECRecover extends PrecompiledContract {
@Override
public long getGasForData(byte[] data) {
return 3000;
}
@Override
public Pair<Boolean, byte[]> execute(byte[] data) {
byte[] h = new byte[32];
byte[] v = new byte[32];
byte[] r = new byte[32];
byte[] s = new byte[32];
DataWord out = null;
try {
System.arraycopy(data, 0, h, 0, 32);
System.arraycopy(data, 32, v, 0, 32);
System.arraycopy(data, 64, r, 0, 32);
int sLength = data.length < 128 ? data.length - 96 : 32;
System.arraycopy(data, 96, s, 0, sLength);
ECKey.ECDSASignature signature = ECKey.ECDSASignature.fromComponents(r, s, v[31]);
if (validateV(v) && signature.validateComponents()) {
out = DataWord.of(ECKey.signatureToAddress(h, signature));
}
} catch (Throwable any) {
}
if (out == null) {
return Pair.of(true, EMPTY_BYTE_ARRAY);
} else {
return Pair.of(true, out.getData());
}
}
private static boolean validateV(byte[] v) {
for (int i = 0; i < v.length - 1; i++) {
if (v[i] != 0) return false;
}
return true;
}
}
/**
* Computes modular exponentiation on big numbers
*
* format of data[] array:
* [length_of_BASE] [length_of_EXPONENT] [length_of_MODULUS] [BASE] [EXPONENT] [MODULUS]
* where every length is a 32-byte left-padded integer representing the number of bytes.
* Call data is assumed to be infinitely right-padded with zero bytes.
*
* Returns an output as a byte array with the same length as the modulus
*/
public static class ModExp extends PrecompiledContract {
private static final BigInteger GQUAD_DIVISOR = BigInteger.valueOf(20);
private static final int ARGS_OFFSET = 32 * 3; // addresses length part
@Override
public long getGasForData(byte[] data) {
if (data == null) data = EMPTY_BYTE_ARRAY;
int baseLen = parseLen(data, 0);
int expLen = parseLen(data, 1);
int modLen = parseLen(data, 2);
byte[] expHighBytes = parseBytes(data, addSafely(ARGS_OFFSET, baseLen), Math.min(expLen, 32));
long multComplexity = getMultComplexity(Math.max(baseLen, modLen));
long adjExpLen = getAdjustedExponentLength(expHighBytes, expLen);
// use big numbers to stay safe in case of overflow
BigInteger gas = BigInteger.valueOf(multComplexity)
.multiply(BigInteger.valueOf(Math.max(adjExpLen, 1)))
.divide(GQUAD_DIVISOR);
return isLessThan(gas, BigInteger.valueOf(Long.MAX_VALUE)) ? gas.longValue() : Long.MAX_VALUE;
}
@Override
public Pair<Boolean, byte[]> execute(byte[] data) {
if (data == null)
return Pair.of(true, EMPTY_BYTE_ARRAY);
int baseLen = parseLen(data, 0);
int expLen = parseLen(data, 1);
int modLen = parseLen(data, 2);
BigInteger base = parseArg(data, ARGS_OFFSET, baseLen);
BigInteger exp = parseArg(data, addSafely(ARGS_OFFSET, baseLen), expLen);
BigInteger mod = parseArg(data, addSafely(addSafely(ARGS_OFFSET, baseLen), expLen), modLen);
// check if modulus is zero
if (isZero(mod))
return Pair.of(true, new byte[modLen]); // should keep length of the result
byte[] res = stripLeadingZeroes(base.modPow(exp, mod).toByteArray());
// adjust result to the same length as the modulus has
if (res.length < modLen) {
byte[] adjRes = new byte[modLen];
System.arraycopy(res, 0, adjRes, modLen - res.length, res.length);
return Pair.of(true, adjRes);
} else {
return Pair.of(true, res);
}
}
private long getMultComplexity(long x) {
long x2 = x * x;
if (x <= 64) return x2;
if (x <= 1024) return x2 / 4 + 96 * x - 3072;
return x2 / 16 + 480 * x - 199680;
}
private long getAdjustedExponentLength(byte[] expHighBytes, long expLen) {
int leadingZeros = numberOfLeadingZeros(expHighBytes);
int highestBit = 8 * expHighBytes.length - leadingZeros;
// set index basement to zero
if (highestBit > 0) highestBit--;
if (expLen <= 32) {
return highestBit;
} else {
return 8 * (expLen - 32) + highestBit;
}
}
private int parseLen(byte[] data, int idx) {
byte[] bytes = parseBytes(data, 32 * idx, 32);
return DataWord.of(bytes).intValueSafe();
}
private BigInteger parseArg(byte[] data, int offset, int len) {
byte[] bytes = parseBytes(data, offset, len);
return bytesToBigInteger(bytes);
}
}
/**
* Computes point addition on Barreto–Naehrig curve.
* See {@link BN128Fp} for details<br/>
* <br/>
*
* input data[]:<br/>
* two points encoded as (x, y), where x and y are 32-byte left-padded integers,<br/>
* if input is shorter than expected, it's assumed to be right-padded with zero bytes<br/>
* <br/>
*
* output:<br/>
* resulting point (x', y'), where x and y encoded as 32-byte left-padded integers<br/>
*
*/
public static class BN128Addition extends PrecompiledContract {
@Override
public long getGasForData(byte[] data) {
return 500;
}
@Override
public Pair<Boolean, byte[]> execute(byte[] data) {
if (data == null)
data = EMPTY_BYTE_ARRAY;
byte[] x1 = parseWord(data, 0);
byte[] y1 = parseWord(data, 1);
byte[] x2 = parseWord(data, 2);
byte[] y2 = parseWord(data, 3);
BN128<Fp> p1 = BN128Fp.create(x1, y1);
if (p1 == null)
return Pair.of(false, EMPTY_BYTE_ARRAY);
BN128<Fp> p2 = BN128Fp.create(x2, y2);
if (p2 == null)
return Pair.of(false, EMPTY_BYTE_ARRAY);
BN128<Fp> res = p1.add(p2).toEthNotation();
return Pair.of(true, encodeRes(res.x().bytes(), res.y().bytes()));
}
}
/**
* Computes multiplication of scalar value on a point belonging to Barreto–Naehrig curve.
* See {@link BN128Fp} for details<br/>
* <br/>
*
* input data[]:<br/>
* point encoded as (x, y) is followed by scalar s, where x, y and s are 32-byte left-padded integers,<br/>
* if input is shorter than expected, it's assumed to be right-padded with zero bytes<br/>
* <br/>
*
* output:<br/>
* resulting point (x', y'), where x and y encoded as 32-byte left-padded integers<br/>
*
*/
public static class BN128Multiplication extends PrecompiledContract {
@Override
public long getGasForData(byte[] data) {
return 40000;
}
@Override
public Pair<Boolean, byte[]> execute(byte[] data) {
if (data == null)
data = EMPTY_BYTE_ARRAY;
byte[] x = parseWord(data, 0);
byte[] y = parseWord(data, 1);
byte[] s = parseWord(data, 2);
BN128<Fp> p = BN128Fp.create(x, y);
if (p == null)
return Pair.of(false, EMPTY_BYTE_ARRAY);
BN128<Fp> res = p.mul(BIUtil.toBI(s)).toEthNotation();
return Pair.of(true, encodeRes(res.x().bytes(), res.y().bytes()));
}
}
/**
* Computes pairing check. <br/>
* See {@link PairingCheck} for details.<br/>
* <br/>
*
* Input data[]: <br/>
* an array of points (a1, b1, ... , ak, bk), <br/>
* where "ai" is a point of {@link BN128Fp} curve and encoded as two 32-byte left-padded integers (x; y) <br/>
* "bi" is a point of {@link BN128G2} curve and encoded as four 32-byte left-padded integers {@code (ai + b; ci + d)},
* each coordinate of the point is a big-endian {@link Fp2} number, so {@code b} precedes {@code a} in the encoding:
* {@code (b, a; d, c)} <br/>
* thus each pair (ai, bi) has 192 bytes length, if 192 is not a multiple of {@code data.length} then execution fails <br/>
* the number of pairs is derived from input length by dividing it by 192 (the length of a pair) <br/>
* <br/>
*
* output: <br/>
* pairing product which is either 0 or 1, encoded as 32-byte left-padded integer <br/>
*
*/
public static class BN128Pairing extends PrecompiledContract {
private static final int PAIR_SIZE = 192;
@Override
public long getGasForData(byte[] data) {
if (data == null) return 100000;
return 80000 * (data.length / PAIR_SIZE) + 100000;
}
@Override
public Pair<Boolean, byte[]> execute(byte[] data) {
if (data == null)
data = EMPTY_BYTE_ARRAY;
// fail if input len is not a multiple of PAIR_SIZE
if (data.length % PAIR_SIZE > 0)
return Pair.of(false, EMPTY_BYTE_ARRAY);
PairingCheck check = PairingCheck.create();
// iterating over all pairs
for (int offset = 0; offset < data.length; offset += PAIR_SIZE) {
Pair<BN128G1, BN128G2> pair = decodePair(data, offset);
// fail if decoding has failed
if (pair == null)
return Pair.of(false, EMPTY_BYTE_ARRAY);
check.addPair(pair.getLeft(), pair.getRight());
}
check.run();
int result = check.result();
return Pair.of(true, DataWord.of(result).getData());
}
private Pair<BN128G1, BN128G2> decodePair(byte[] in, int offset) {
byte[] x = parseWord(in, offset, 0);
byte[] y = parseWord(in, offset, 1);
BN128G1 p1 = BN128G1.create(x, y);
// fail if point is invalid
if (p1 == null) return null;
// (b, a)
byte[] b = parseWord(in, offset, 2);
byte[] a = parseWord(in, offset, 3);
// (d, c)
byte[] d = parseWord(in, offset, 4);
byte[] c = parseWord(in, offset, 5);
BN128G2 p2 = BN128G2.create(a, b, c, d);
// fail if point is invalid
if (p2 == null) return null;
return Pair.of(p1, p2);
}
}
}
| 17,225
| 34.012195
| 136
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/vm/VM.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.vm;
import org.ethereum.config.BlockchainConfig;
import org.ethereum.config.SystemProperties;
import org.ethereum.db.ContractDetails;
import org.ethereum.vm.hook.VMHook;
import org.ethereum.vm.program.Program;
import org.ethereum.vm.program.Stack;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Stream;
import static org.ethereum.crypto.HashUtil.sha3;
import static org.ethereum.util.ByteUtil.EMPTY_BYTE_ARRAY;
import static org.ethereum.util.ByteUtil.toHexString;
import static org.ethereum.vm.OpCode.*;
import static org.ethereum.vm.VMUtils.getSizeInWords;
/**
* The Ethereum Virtual Machine (EVM) is responsible for initialization
* and executing a transaction on a contract.
*
* It is a quasi-Turing-complete machine; the quasi qualification
* comes from the fact that the computation is intrinsically bounded
* through a parameter, gas, which limits the total amount of computation done.
*
* The EVM is a simple stack-based architecture. The word size of the machine
* (and thus size of stack item) is 256-bit. This was chosen to facilitate
* the SHA3-256 hash scheme and elliptic-curve computations. The memory model
* is a simple word-addressed byte array. The stack has an unlimited size.
* The machine also has an independent storage model; this is similar in concept
* to the memory but rather than a byte array, it is a word-addressable word array.
*
* Unlike memory, which is volatile, storage is non volatile and is
* maintained as part of the system state. All locations in both storage
* and memory are well-defined initially as zero.
*
* The machine does not follow the standard von Neumann architecture.
* Rather than storing program code in generally-accessible memory or storage,
* it is stored separately in a virtual ROM interactable only though
* a specialised instruction.
*
* The machine can have exceptional execution for several reasons,
* including stack underflows and invalid instructions. These unambiguously
* and validly result in immediate halting of the machine with all state changes
* left intact. The one piece of exceptional execution that does not leave
* state changes intact is the out-of-gas (OOG) exception.
*
* Here, the machine halts immediately and reports the issue to
* the execution agent (either the transaction processor or, recursively,
* the spawning execution environment) and which will deal with it separately.
*
* @author Roman Mandeleil
* @since 01.06.2014
*/
public class VM {
private static final Logger logger = LoggerFactory.getLogger("VM");
private static final Logger dumpLogger = LoggerFactory.getLogger("dump");
private static BigInteger _32_ = BigInteger.valueOf(32);
private static final String logString = "{} Op: [{}] Gas: [{}] Deep: [{}] Hint: [{}]";
// max mem size which couldn't be paid for ever
// used to reduce expensive BigInt arithmetic
private static BigInteger MAX_MEM_SIZE = BigInteger.valueOf(Integer.MAX_VALUE);
/* Keeps track of the number of steps performed in this VM */
private int vmCounter = 0;
private boolean vmTrace;
private long dumpBlock;
private static final Map<OpCode, Function<BlockchainConfig, Boolean>> opValidators = new HashMap<OpCode, Function<BlockchainConfig, Boolean>>()
{{
put(DELEGATECALL, (config) -> config.getConstants().hasDelegateCallOpcode());
put(REVERT, BlockchainConfig::eip206);
put(RETURNDATACOPY, BlockchainConfig::eip211);
put(RETURNDATASIZE, BlockchainConfig::eip211);
put(STATICCALL, BlockchainConfig::eip214);
put(EXTCODEHASH, BlockchainConfig::eip1052);
put(SHL, BlockchainConfig::eip145);
put(SHR, BlockchainConfig::eip145);
put(SAR, BlockchainConfig::eip145);
put(CREATE2, BlockchainConfig::eip1014);
}};
private final SystemProperties config;
// deprecated field that holds VM hook. Will be removed in the future releases.
private static VMHook deprecatedHook = VMHook.EMPTY;
private final boolean hasHooks;
private final VMHook[] hooks;
public VM() {
this(SystemProperties.getDefault(), VMHook.EMPTY);
}
@Autowired
public VM(SystemProperties config, VMHook hook) {
this.config = config;
this.vmTrace = config.vmTrace();
this.dumpBlock = config.dumpBlock();
this.hooks = Stream.of(deprecatedHook, hook)
.filter(h -> !h.isEmpty())
.toArray(VMHook[]::new);
this.hasHooks = this.hooks.length > 0;
}
private void onHookEvent(Consumer<VMHook> consumer) {
for (VMHook hook : this.hooks) {
consumer.accept(hook);
}
}
private long calcMemGas(GasCost gasCosts, long oldMemSize, BigInteger newMemSize, long copySize) {
long gasCost = 0;
// Avoid overflows
if (newMemSize.compareTo(MAX_MEM_SIZE) > 0) {
throw Program.Exception.gasOverflow(newMemSize, MAX_MEM_SIZE);
}
// memory gas calc
long memoryUsage = (newMemSize.longValue() + 31) / 32 * 32;
if (memoryUsage > oldMemSize) {
long memWords = (memoryUsage / 32);
long memWordsOld = (oldMemSize / 32);
//TODO #POC9 c_quadCoeffDiv = 512, this should be a constant, not magic number
long memGas = (gasCosts.getMEMORY() * memWords + memWords * memWords / 512)
- (gasCosts.getMEMORY() * memWordsOld + memWordsOld * memWordsOld / 512);
gasCost += memGas;
}
if (copySize > 0) {
long copyGas = gasCosts.getCOPY_GAS() * ((copySize + 31) / 32);
gasCost += copyGas;
}
return gasCost;
}
private boolean isDeadAccount(Program program, byte[] addr) {
return !program.getStorage().isExist(addr) || program.getStorage().getAccountState(addr).isEmpty();
}
/**
* Validates whether operation is allowed
* with current blockchain config
* @param op VM operation
* @param program Current program
*/
private void validateOp(OpCode op, Program program) {
if (!(opValidators.containsKey(op))) return;
BlockchainConfig blockchainConfig = program.getBlockchainConfig();
if (!opValidators.get(op).apply(blockchainConfig)) {
throw Program.Exception.invalidOpCode(program.getCurrentOp());
}
}
public void step(Program program) {
if (vmTrace) {
program.saveOpTrace();
}
try {
BlockchainConfig blockchainConfig = program.getBlockchainConfig();
OpCode op = OpCode.code(program.getCurrentOp());
if (op == null) {
throw Program.Exception.invalidOpCode(program.getCurrentOp());
}
validateOp(op, program);
program.setLastOp(op.val());
program.verifyStackSize(op.require());
program.verifyStackOverflow(op.require(), op.ret()); //Check not exceeding stack limits
long oldMemSize = program.getMemSize();
Stack stack = program.getStack();
String hint = "";
long callGas = 0, memWords = 0; // parameters for logging
long gasCost = op.getTier().asInt();
long gasBefore = program.getGasLong();
int stepBefore = program.getPC();
GasCost gasCosts = blockchainConfig.getGasCost();
DataWord adjustedCallGas = null;
/*DEBUG #POC9 if( op.asInt() == 96 || op.asInt() == -128 || op.asInt() == 57 || op.asInt() == 115) {
//byte alphaone = 0x63;
//op = OpCode.code(alphaone);
gasCost = 3;
}
if( op.asInt() == -13 ) {
//byte alphaone = 0x63;
//op = OpCode.code(alphaone);
gasCost = 0;
}*/
// Calculate fees and spend gas
switch (op) {
case STOP:
gasCost = gasCosts.getSTOP();
break;
case SUICIDE:
gasCost = gasCosts.getSUICIDE();
DataWord suicideAddressWord = stack.get(stack.size() - 1);
if (blockchainConfig.eip161()) {
if (isDeadAccount(program, suicideAddressWord.getLast20Bytes()) &&
!program.getBalance(program.getOwnerAddress()).isZero()) {
gasCost += gasCosts.getNEW_ACCT_SUICIDE();
}
} else {
if (!program.getStorage().isExist(suicideAddressWord.getLast20Bytes())) {
gasCost += gasCosts.getNEW_ACCT_SUICIDE();
}
}
break;
case SSTORE:
DataWord currentValue = program.getCurrentValue(stack.peek());
if (currentValue == null) currentValue = DataWord.ZERO;
DataWord newValue = stack.get(stack.size() - 2);
if (blockchainConfig.eip1283()) { // Net gas metering for SSTORE
if (newValue.equals(currentValue)) {
gasCost = gasCosts.getREUSE_SSTORE();
} else {
DataWord origValue = program.getOriginalValue(stack.peek());
if (origValue == null) origValue = DataWord.ZERO;
if (currentValue.equals(origValue)) {
if (origValue.isZero()) {
gasCost = gasCosts.getSET_SSTORE();
} else {
gasCost = gasCosts.getCLEAR_SSTORE();
if (newValue.isZero()) {
program.futureRefundGas(gasCosts.getREFUND_SSTORE());
}
}
} else {
gasCost = gasCosts.getREUSE_SSTORE();
if (!origValue.isZero()) {
if (currentValue.isZero()) {
program.futureRefundGas(-gasCosts.getREFUND_SSTORE());
} else if (newValue.isZero()) {
program.futureRefundGas(gasCosts.getREFUND_SSTORE());
}
}
if (origValue.equals(newValue)) {
if (origValue.isZero()) {
program.futureRefundGas(gasCosts.getSET_SSTORE() - gasCosts.getREUSE_SSTORE());
} else {
program.futureRefundGas(gasCosts.getCLEAR_SSTORE() - gasCosts.getREUSE_SSTORE());
}
}
}
}
} else { // Before EIP-1283 cost calculation
if (currentValue.isZero() && !newValue.isZero())
gasCost = gasCosts.getSET_SSTORE();
else if (!currentValue.isZero() && newValue.isZero()) {
// refund step cost policy.
program.futureRefundGas(gasCosts.getREFUND_SSTORE());
gasCost = gasCosts.getCLEAR_SSTORE();
} else {
gasCost = gasCosts.getRESET_SSTORE();
}
}
break;
case SLOAD:
gasCost = gasCosts.getSLOAD();
break;
case BALANCE:
gasCost = gasCosts.getBALANCE();
break;
// These all operate on memory and therefore potentially expand it:
case MSTORE:
gasCost += calcMemGas(gasCosts, oldMemSize, memNeeded(stack.peek(), DataWord.of(32)), 0);
break;
case MSTORE8:
gasCost += calcMemGas(gasCosts, oldMemSize, memNeeded(stack.peek(), DataWord.ONE), 0);
break;
case MLOAD:
gasCost += calcMemGas(gasCosts, oldMemSize, memNeeded(stack.peek(), DataWord.of(32)), 0);
break;
case RETURN:
case REVERT:
gasCost = gasCosts.getSTOP() + calcMemGas(gasCosts, oldMemSize,
memNeeded(stack.peek(), stack.get(stack.size() - 2)), 0);
break;
case SHA3:
gasCost = gasCosts.getSHA3() + calcMemGas(gasCosts, oldMemSize, memNeeded(stack.peek(), stack.get(stack.size() - 2)), 0);
DataWord size = stack.get(stack.size() - 2);
long chunkUsed = getSizeInWords(size.longValueSafe());
gasCost += chunkUsed * gasCosts.getSHA3_WORD();
break;
case CALLDATACOPY:
case RETURNDATACOPY:
gasCost += calcMemGas(gasCosts, oldMemSize,
memNeeded(stack.peek(), stack.get(stack.size() - 3)),
stack.get(stack.size() - 3).longValueSafe());
break;
case CODECOPY:
gasCost += calcMemGas(gasCosts, oldMemSize,
memNeeded(stack.peek(), stack.get(stack.size() - 3)),
stack.get(stack.size() - 3).longValueSafe());
break;
case EXTCODESIZE:
gasCost = gasCosts.getEXT_CODE_SIZE();
break;
case EXTCODECOPY:
gasCost = gasCosts.getEXT_CODE_COPY() + calcMemGas(gasCosts, oldMemSize,
memNeeded(stack.get(stack.size() - 2), stack.get(stack.size() - 4)),
stack.get(stack.size() - 4).longValueSafe());
break;
case EXTCODEHASH:
gasCost = gasCosts.getEXT_CODE_HASH();
break;
case CALL:
case CALLCODE:
case DELEGATECALL:
case STATICCALL:
gasCost = gasCosts.getCALL();
DataWord callGasWord = stack.get(stack.size() - 1);
DataWord callAddressWord = stack.get(stack.size() - 2);
DataWord value = op.callHasValue() ?
stack.get(stack.size() - 3) : DataWord.ZERO;
//check to see if account does not exist and is not a precompiled contract
if (op == CALL) {
if (blockchainConfig.eip161()) {
if (isDeadAccount(program, callAddressWord.getLast20Bytes()) && !value.isZero()) {
gasCost += gasCosts.getNEW_ACCT_CALL();
}
} else {
if (!program.getStorage().isExist(callAddressWord.getLast20Bytes())) {
gasCost += gasCosts.getNEW_ACCT_CALL();
}
}
}
//TODO #POC9 Make sure this is converted to BigInteger (256num support)
if (!value.isZero() )
gasCost += gasCosts.getVT_CALL();
int opOff = op.callHasValue() ? 4 : 3;
BigInteger in = memNeeded(stack.get(stack.size() - opOff), stack.get(stack.size() - opOff - 1)); // in offset+size
BigInteger out = memNeeded(stack.get(stack.size() - opOff - 2), stack.get(stack.size() - opOff - 3)); // out offset+size
gasCost += calcMemGas(gasCosts, oldMemSize, in.max(out), 0);
if (gasCost > program.getGas().longValueSafe()) {
throw Program.Exception.notEnoughOpGas(op, callGasWord, program.getGas());
}
DataWord gasLeft = program.getGas();
DataWord subResult = gasLeft.sub(DataWord.of(gasCost));
adjustedCallGas = blockchainConfig.getCallGas(op, callGasWord, subResult);
gasCost += adjustedCallGas.longValueSafe();
break;
case CREATE:
gasCost = gasCosts.getCREATE() + calcMemGas(gasCosts, oldMemSize,
memNeeded(stack.get(stack.size() - 2), stack.get(stack.size() - 3)), 0);
break;
case CREATE2:
DataWord codeSize = stack.get(stack.size() - 3);
gasCost = gasCosts.getCREATE() +
calcMemGas(gasCosts, oldMemSize, memNeeded(stack.get(stack.size() - 2), codeSize), 0) +
getSizeInWords(codeSize.longValueSafe()) * gasCosts.getSHA3_WORD();
break;
case LOG0:
case LOG1:
case LOG2:
case LOG3:
case LOG4:
int nTopics = op.val() - OpCode.LOG0.val();
BigInteger dataSize = stack.get(stack.size() - 2).value();
BigInteger dataCost = dataSize.multiply(BigInteger.valueOf(gasCosts.getLOG_DATA_GAS()));
if (program.getGas().value().compareTo(dataCost) < 0) {
throw Program.Exception.notEnoughOpGas(op, dataCost, program.getGas().value());
}
gasCost = gasCosts.getLOG_GAS() +
gasCosts.getLOG_TOPIC_GAS() * nTopics +
gasCosts.getLOG_DATA_GAS() * stack.get(stack.size() - 2).longValue() +
calcMemGas(gasCosts, oldMemSize, memNeeded(stack.peek(), stack.get(stack.size() - 2)), 0);
break;
case EXP:
DataWord exp = stack.get(stack.size() - 2);
int bytesOccupied = exp.bytesOccupied();
gasCost = gasCosts.getEXP_GAS() + gasCosts.getEXP_BYTE_GAS() * bytesOccupied;
break;
default:
break;
}
//DEBUG System.out.println(" OP IS " + op.name() + " GASCOST IS " + gasCost + " NUM IS " + op.asInt());
program.spendGas(gasCost, op.name());
// Log debugging line for VM
if (program.getNumber().intValue() == dumpBlock) {
this.dumpLine(op, gasBefore, gasCost + callGas, memWords, program);
}
if (hasHooks) {
onHookEvent(hook -> hook.step(program, op));
}
// Execute operation
switch (op) {
/**
* Stop and Arithmetic Operations
*/
case STOP: {
program.setHReturn(EMPTY_BYTE_ARRAY);
program.stop();
}
break;
case ADD: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " + " + word2.value();
DataWord addResult = word1.add(word2);
program.stackPush(addResult);
program.step();
}
break;
case MUL: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " * " + word2.value();
DataWord mulResult = word1.mul(word2);
program.stackPush(mulResult);
program.step();
}
break;
case SUB: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " - " + word2.value();
DataWord subResult = word1.sub(word2);
program.stackPush(subResult);
program.step();
}
break;
case DIV: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " / " + word2.value();
DataWord divResult = word1.div(word2);
program.stackPush(divResult);
program.step();
}
break;
case SDIV: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.sValue() + " / " + word2.sValue();
DataWord sDivResult = word1.sDiv(word2);
program.stackPush(sDivResult);
program.step();
}
break;
case MOD: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " % " + word2.value();
DataWord modResult = word1.mod(word2);
program.stackPush(modResult);
program.step();
}
break;
case SMOD: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.sValue() + " #% " + word2.sValue();
DataWord sModResult = word1.sMod(word2);
program.stackPush(sModResult);
program.step();
}
break;
case EXP: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " ** " + word2.value();
DataWord expResult = word1.exp(word2);
program.stackPush(expResult);
program.step();
}
break;
case SIGNEXTEND: {
DataWord word1 = program.stackPop();
BigInteger k = word1.value();
if (k.compareTo(_32_) < 0) {
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1 + " " + word2.value();
DataWord extendResult = word2.signExtend(k.byteValue());
program.stackPush(extendResult);
}
program.step();
}
break;
case NOT: {
DataWord word1 = program.stackPop();
DataWord bnotWord = word1.bnot();
if (logger.isInfoEnabled())
hint = "" + bnotWord.value();
program.stackPush(bnotWord);
program.step();
}
break;
case LT: {
// TODO: can be improved by not using BigInteger
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " < " + word2.value();
if (word1.value().compareTo(word2.value()) == -1) {
program.stackPush(DataWord.ONE);
} else {
program.stackPush(DataWord.ZERO);
}
program.step();
}
break;
case SLT: {
// TODO: can be improved by not using BigInteger
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.sValue() + " < " + word2.sValue();
if (word1.sValue().compareTo(word2.sValue()) == -1) {
program.stackPush(DataWord.ONE);
} else {
program.stackPush(DataWord.ZERO);
}
program.step();
}
break;
case SGT: {
// TODO: can be improved by not using BigInteger
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.sValue() + " > " + word2.sValue();
if (word1.sValue().compareTo(word2.sValue()) == 1) {
program.stackPush(DataWord.ONE);
} else {
program.stackPush(DataWord.ZERO);
}
program.step();
}
break;
case GT: {
// TODO: can be improved by not using BigInteger
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " > " + word2.value();
if (word1.value().compareTo(word2.value()) == 1) {
program.stackPush(DataWord.ONE);
} else {
program.stackPush(DataWord.ZERO);
}
program.step();
}
break;
case EQ: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " == " + word2.value();
DataWord xorResult = word1.xor(word2);
if (xorResult.isZero()) {
program.stackPush(DataWord.ONE);
} else {
program.stackPush(DataWord.ZERO);
}
program.step();
}
break;
case ISZERO: {
DataWord word1 = program.stackPop();
if (word1.isZero()) {
program.stackPush(DataWord.ONE);
} else {
program.stackPush(DataWord.ZERO);
}
if (logger.isInfoEnabled())
hint = "" + word1.value();
program.step();
}
break;
/**
* Bitwise Logic Operations
*/
case AND: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " && " + word2.value();
DataWord andResult = word1.and(word2);
program.stackPush(andResult);
program.step();
}
break;
case OR: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " || " + word2.value();
DataWord orResult = word1.or(word2);
program.stackPush(orResult);
program.step();
}
break;
case XOR: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
if (logger.isInfoEnabled())
hint = word1.value() + " ^ " + word2.value();
DataWord xorResult = word1.xor(word2);
program.stackPush(xorResult);
program.step();
}
break;
case BYTE: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
final DataWord result;
if (word1.value().compareTo(_32_) == -1) {
byte tmp = word2.getData()[word1.intValue()];
result = DataWord.of(tmp);
} else {
result = DataWord.ZERO;
}
if (logger.isInfoEnabled())
hint = "" + result.value();
program.stackPush(result);
program.step();
}
break;
case SHL: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
final DataWord result = word2.shiftLeft(word1);
if (logger.isInfoEnabled())
hint = "" + result.value();
program.stackPush(result);
program.step();
}
break;
case SHR: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
final DataWord result = word2.shiftRight(word1);
if (logger.isInfoEnabled())
hint = "" + result.value();
program.stackPush(result);
program.step();
}
break;
case SAR: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
final DataWord result = word2.shiftRightSigned(word1);
if (logger.isInfoEnabled())
hint = "" + result.value();
program.stackPush(result);
program.step();
}
break;
case ADDMOD: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
DataWord word3 = program.stackPop();
DataWord addmodResult = word1.addmod(word2, word3);
program.stackPush(addmodResult);
program.step();
}
break;
case MULMOD: {
DataWord word1 = program.stackPop();
DataWord word2 = program.stackPop();
DataWord word3 = program.stackPop();
DataWord mulmodResult = word1.mulmod(word2, word3);
program.stackPush(mulmodResult);
program.step();
}
break;
/**
* SHA3
*/
case SHA3: {
DataWord memOffsetData = program.stackPop();
DataWord lengthData = program.stackPop();
byte[] buffer = program.memoryChunk(memOffsetData.intValueSafe(), lengthData.intValueSafe());
byte[] encoded = sha3(buffer);
DataWord word = DataWord.of(encoded);
if (logger.isInfoEnabled())
hint = word.toString();
program.stackPush(word);
program.step();
}
break;
/**
* Environmental Information
*/
case ADDRESS: {
DataWord address = program.getOwnerAddress();
if (logger.isInfoEnabled())
hint = "address: " + toHexString(address.getLast20Bytes());
program.stackPush(address);
program.step();
}
break;
case BALANCE: {
DataWord address = program.stackPop();
DataWord balance = program.getBalance(address);
if (logger.isInfoEnabled())
hint = "address: "
+ toHexString(address.getLast20Bytes())
+ " balance: " + balance.toString();
program.stackPush(balance);
program.step();
}
break;
case ORIGIN: {
DataWord originAddress = program.getOriginAddress();
if (logger.isInfoEnabled())
hint = "address: " + toHexString(originAddress.getLast20Bytes());
program.stackPush(originAddress);
program.step();
}
break;
case CALLER: {
DataWord callerAddress = program.getCallerAddress();
if (logger.isInfoEnabled())
hint = "address: " + toHexString(callerAddress.getLast20Bytes());
program.stackPush(callerAddress);
program.step();
}
break;
case CALLVALUE: {
DataWord callValue = program.getCallValue();
if (logger.isInfoEnabled())
hint = "value: " + callValue;
program.stackPush(callValue);
program.step();
}
break;
case CALLDATALOAD: {
DataWord dataOffs = program.stackPop();
DataWord value = program.getDataValue(dataOffs);
if (logger.isInfoEnabled())
hint = "data: " + value;
program.stackPush(value);
program.step();
}
break;
case CALLDATASIZE: {
DataWord dataSize = program.getDataSize();
if (logger.isInfoEnabled())
hint = "size: " + dataSize.value();
program.stackPush(dataSize);
program.step();
}
break;
case CALLDATACOPY: {
DataWord memOffsetData = program.stackPop();
DataWord dataOffsetData = program.stackPop();
DataWord lengthData = program.stackPop();
byte[] msgData = program.getDataCopy(dataOffsetData, lengthData);
if (logger.isInfoEnabled())
hint = "data: " + toHexString(msgData);
program.memorySave(memOffsetData.intValueSafe(), lengthData.intValueSafe(), msgData);
program.step();
}
break;
case RETURNDATASIZE: {
DataWord dataSize = program.getReturnDataBufferSize();
if (logger.isInfoEnabled())
hint = "size: " + dataSize.value();
program.stackPush(dataSize);
program.step();
}
break;
case RETURNDATACOPY: {
DataWord memOffsetData = program.stackPop();
DataWord dataOffsetData = program.stackPop();
DataWord lengthData = program.stackPop();
byte[] msgData = program.getReturnDataBufferData(dataOffsetData, lengthData);
if (msgData == null) {
throw new Program.ReturnDataCopyIllegalBoundsException(dataOffsetData, lengthData, program.getReturnDataBufferSize().longValueSafe());
}
if (logger.isInfoEnabled())
hint = "data: " + toHexString(msgData);
program.memorySave(memOffsetData.intValueSafe(), lengthData.intValueSafe(), msgData);
program.step();
}
break;
case CODESIZE:
case EXTCODESIZE: {
int length;
if (op == OpCode.CODESIZE)
length = program.getCode().length;
else {
DataWord address = program.stackPop();
length = program.getCodeAt(address).length;
}
DataWord codeLength = DataWord.of(length);
if (logger.isInfoEnabled())
hint = "size: " + length;
program.stackPush(codeLength);
program.step();
}
break;
case CODECOPY:
case EXTCODECOPY: {
byte[] fullCode = EMPTY_BYTE_ARRAY;
if (op == OpCode.CODECOPY)
fullCode = program.getCode();
if (op == OpCode.EXTCODECOPY) {
DataWord address = program.stackPop();
fullCode = program.getCodeAt(address);
}
int memOffset = program.stackPop().intValueSafe();
int codeOffset = program.stackPop().intValueSafe();
int lengthData = program.stackPop().intValueSafe();
int sizeToBeCopied =
(long) codeOffset + lengthData > fullCode.length ?
(fullCode.length < codeOffset ? 0 : fullCode.length - codeOffset)
: lengthData;
byte[] codeCopy = new byte[lengthData];
if (codeOffset < fullCode.length)
System.arraycopy(fullCode, codeOffset, codeCopy, 0, sizeToBeCopied);
if (logger.isInfoEnabled())
hint = "code: " + toHexString(codeCopy);
program.memorySave(memOffset, lengthData, codeCopy);
program.step();
}
break;
case EXTCODEHASH: {
DataWord address = program.stackPop();
byte[] codeHash = program.getCodeHashAt(address);
program.stackPush(codeHash);
program.step();
}
break;
case GASPRICE: {
DataWord gasPrice = program.getGasPrice();
if (logger.isInfoEnabled())
hint = "price: " + gasPrice.toString();
program.stackPush(gasPrice);
program.step();
}
break;
/**
* Block Information
*/
case BLOCKHASH: {
int blockIndex = program.stackPop().intValueSafe();
DataWord blockHash = program.getBlockHash(blockIndex);
if (logger.isInfoEnabled())
hint = "blockHash: " + blockHash;
program.stackPush(blockHash);
program.step();
}
break;
case COINBASE: {
DataWord coinbase = program.getCoinbase();
if (logger.isInfoEnabled())
hint = "coinbase: " + toHexString(coinbase.getLast20Bytes());
program.stackPush(coinbase);
program.step();
}
break;
case TIMESTAMP: {
DataWord timestamp = program.getTimestamp();
if (logger.isInfoEnabled())
hint = "timestamp: " + timestamp.value();
program.stackPush(timestamp);
program.step();
}
break;
case NUMBER: {
DataWord number = program.getNumber();
if (logger.isInfoEnabled())
hint = "number: " + number.value();
program.stackPush(number);
program.step();
}
break;
case DIFFICULTY: {
DataWord difficulty = program.getDifficulty();
if (logger.isInfoEnabled())
hint = "difficulty: " + difficulty;
program.stackPush(difficulty);
program.step();
}
break;
case GASLIMIT: {
DataWord gaslimit = program.getGasLimit();
if (logger.isInfoEnabled())
hint = "gaslimit: " + gaslimit;
program.stackPush(gaslimit);
program.step();
}
break;
case POP: {
program.stackPop();
program.step();
} break;
case DUP1: case DUP2: case DUP3: case DUP4:
case DUP5: case DUP6: case DUP7: case DUP8:
case DUP9: case DUP10: case DUP11: case DUP12:
case DUP13: case DUP14: case DUP15: case DUP16:{
int n = op.val() - OpCode.DUP1.val() + 1;
DataWord word_1 = stack.get(stack.size() - n);
program.stackPush(word_1);
program.step();
} break;
case SWAP1: case SWAP2: case SWAP3: case SWAP4:
case SWAP5: case SWAP6: case SWAP7: case SWAP8:
case SWAP9: case SWAP10: case SWAP11: case SWAP12:
case SWAP13: case SWAP14: case SWAP15: case SWAP16:{
int n = op.val() - OpCode.SWAP1.val() + 2;
stack.swap(stack.size() - 1, stack.size() - n);
program.step();
}
break;
case LOG0:
case LOG1:
case LOG2:
case LOG3:
case LOG4: {
if (program.isStaticCall()) throw new Program.StaticCallModificationException();
DataWord address = program.getOwnerAddress();
DataWord memStart = stack.pop();
DataWord memOffset = stack.pop();
int nTopics = op.val() - OpCode.LOG0.val();
List<DataWord> topics = new ArrayList<>();
for (int i = 0; i < nTopics; ++i) {
DataWord topic = stack.pop();
topics.add(topic);
}
byte[] data = program.memoryChunk(memStart.intValueSafe(), memOffset.intValueSafe());
LogInfo logInfo =
new LogInfo(address.getLast20Bytes(), topics, data);
if (logger.isInfoEnabled())
hint = logInfo.toString();
program.getResult().addLogInfo(logInfo);
program.step();
}
break;
case MLOAD: {
DataWord addr = program.stackPop();
DataWord data = program.memoryLoad(addr);
if (logger.isInfoEnabled())
hint = "data: " + data;
program.stackPush(data);
program.step();
}
break;
case MSTORE: {
DataWord addr = program.stackPop();
DataWord value = program.stackPop();
if (logger.isInfoEnabled())
hint = "addr: " + addr + " value: " + value;
program.memorySave(addr, value);
program.step();
}
break;
case MSTORE8: {
DataWord addr = program.stackPop();
DataWord value = program.stackPop();
byte[] byteVal = {value.getData()[31]};
program.memorySave(addr.intValueSafe(), byteVal);
program.step();
}
break;
case SLOAD: {
DataWord key = program.stackPop();
DataWord val = program.storageLoad(key);
if (logger.isInfoEnabled())
hint = "key: " + key + " value: " + val;
if (val == null)
val = key.and(DataWord.ZERO);
program.stackPush(val);
program.step();
}
break;
case SSTORE: {
if (program.isStaticCall()) throw new Program.StaticCallModificationException();
DataWord addr = program.stackPop();
DataWord value = program.stackPop();
if (logger.isInfoEnabled())
hint = "[" + program.getOwnerAddress().toPrefixString() + "] key: " + addr + " value: " + value;
program.storageSave(addr, value);
program.step();
}
break;
case JUMP: {
DataWord pos = program.stackPop();
int nextPC = program.verifyJumpDest(pos);
if (logger.isInfoEnabled())
hint = "~> " + nextPC;
program.setPC(nextPC);
}
break;
case JUMPI: {
DataWord pos = program.stackPop();
DataWord cond = program.stackPop();
if (!cond.isZero()) {
int nextPC = program.verifyJumpDest(pos);
if (logger.isInfoEnabled())
hint = "~> " + nextPC;
program.setPC(nextPC);
} else {
program.step();
}
}
break;
case PC: {
int pc = program.getPC();
DataWord pcWord = DataWord.of(pc);
if (logger.isInfoEnabled())
hint = pcWord.toString();
program.stackPush(pcWord);
program.step();
}
break;
case MSIZE: {
int memSize = program.getMemSize();
DataWord wordMemSize = DataWord.of(memSize);
if (logger.isInfoEnabled())
hint = "" + memSize;
program.stackPush(wordMemSize);
program.step();
}
break;
case GAS: {
DataWord gas = program.getGas();
if (logger.isInfoEnabled())
hint = "" + gas;
program.stackPush(gas);
program.step();
}
break;
case PUSH1:
case PUSH2:
case PUSH3:
case PUSH4:
case PUSH5:
case PUSH6:
case PUSH7:
case PUSH8:
case PUSH9:
case PUSH10:
case PUSH11:
case PUSH12:
case PUSH13:
case PUSH14:
case PUSH15:
case PUSH16:
case PUSH17:
case PUSH18:
case PUSH19:
case PUSH20:
case PUSH21:
case PUSH22:
case PUSH23:
case PUSH24:
case PUSH25:
case PUSH26:
case PUSH27:
case PUSH28:
case PUSH29:
case PUSH30:
case PUSH31:
case PUSH32: {
program.step();
int nPush = op.val() - PUSH1.val() + 1;
byte[] data = program.sweep(nPush);
if (logger.isInfoEnabled())
hint = "" + toHexString(data);
program.stackPush(data);
}
break;
case JUMPDEST: {
program.step();
}
break;
case CREATE: {
if (program.isStaticCall()) throw new Program.StaticCallModificationException();
DataWord value = program.stackPop();
DataWord inOffset = program.stackPop();
DataWord inSize = program.stackPop();
if (logger.isInfoEnabled())
logger.info(logString, String.format("%5s", "[" + program.getPC() + "]"),
String.format("%-12s", op.name()),
program.getGas().value(),
program.getCallDeep(), hint);
program.createContract(value, inOffset, inSize);
program.step();
}
break;
case CREATE2: {
if (program.isStaticCall()) throw new Program.StaticCallModificationException();
DataWord value = program.stackPop();
DataWord inOffset = program.stackPop();
DataWord inSize = program.stackPop();
DataWord salt = program.stackPop();
if (logger.isInfoEnabled())
logger.info(logString, String.format("%5s", "[" + program.getPC() + "]"),
String.format("%-12s", op.name()),
program.getGas().value(),
program.getCallDeep(), hint);
program.createContract2(value, inOffset, inSize, salt);
program.step();
}
break;
case CALL:
case CALLCODE:
case DELEGATECALL:
case STATICCALL: {
program.stackPop(); // use adjustedCallGas instead of requested
DataWord codeAddress = program.stackPop();
DataWord value = op.callHasValue() ?
program.stackPop() : DataWord.ZERO;
if (program.isStaticCall() && op == CALL && !value.isZero())
throw new Program.StaticCallModificationException();
if (!value.isZero()) {
adjustedCallGas = adjustedCallGas.add(DataWord.of(gasCosts.getSTIPEND_CALL()));
}
DataWord inDataOffs = program.stackPop();
DataWord inDataSize = program.stackPop();
DataWord outDataOffs = program.stackPop();
DataWord outDataSize = program.stackPop();
if (logger.isInfoEnabled()) {
hint = "addr: " + toHexString(codeAddress.getLast20Bytes())
+ " gas: " + adjustedCallGas.shortHex()
+ " inOff: " + inDataOffs.shortHex()
+ " inSize: " + inDataSize.shortHex();
logger.info(logString, String.format("%5s", "[" + program.getPC() + "]"),
String.format("%-12s", op.name()),
program.getGas().value(),
program.getCallDeep(), hint);
}
program.memoryExpand(outDataOffs, outDataSize);
MessageCall msg = new MessageCall(
op, adjustedCallGas, codeAddress, value, inDataOffs, inDataSize,
outDataOffs, outDataSize);
PrecompiledContracts.PrecompiledContract contract =
PrecompiledContracts.getContractForAddress(codeAddress, blockchainConfig);
if (!op.callIsStateless()) {
program.getResult().addTouchAccount(codeAddress.getLast20Bytes());
}
if (contract != null) {
program.callToPrecompiledAddress(msg, contract);
} else {
program.callToAddress(msg);
}
program.step();
}
break;
case RETURN:
case REVERT: {
DataWord offset = program.stackPop();
DataWord size = program.stackPop();
byte[] hReturn = program.memoryChunk(offset.intValueSafe(), size.intValueSafe());
program.setHReturn(hReturn);
if (logger.isInfoEnabled())
hint = "data: " + toHexString(hReturn)
+ " offset: " + offset.value()
+ " size: " + size.value();
program.step();
program.stop();
if (op == REVERT) {
program.getResult().setRevert();
}
}
break;
case SUICIDE: {
if (program.isStaticCall()) throw new Program.StaticCallModificationException();
DataWord address = program.stackPop();
program.suicide(address);
program.getResult().addTouchAccount(address.getLast20Bytes());
if (logger.isInfoEnabled())
hint = "address: " + toHexString(program.getOwnerAddress().getLast20Bytes());
program.stop();
}
break;
default:
break;
}
program.setPreviouslyExecutedOp(op.val());
if (logger.isInfoEnabled() && !op.isCall())
logger.info(logString, String.format("%5s", "[" + program.getPC() + "]"),
String.format("%-12s",
op.name()), program.getGas().value(),
program.getCallDeep(), hint);
vmCounter++;
} catch (RuntimeException e) {
logger.warn("VM halted: [{}]", e);
program.spendAllGas();
program.resetFutureRefund();
program.stop();
throw e;
} finally {
program.fullTrace();
}
}
public void play(Program program) {
if (program.byTestingSuite()) return;
try {
if (hasHooks) {
onHookEvent(hook -> hook.startPlay(program));
}
while (!program.isStopped()) {
this.step(program);
}
} catch (RuntimeException e) {
program.setRuntimeFailure(e);
} catch (StackOverflowError soe) {
logger.error("\n !!! StackOverflowError: update your java run command with -Xss2M (-Xss8M for tests) !!!\n", soe);
System.exit(-1);
} finally {
if (hasHooks) {
onHookEvent(hook -> hook.stopPlay(program));
}
}
}
/**
* @deprecated Define your hook component as a Spring bean, instead of this method using.
* TODO: Remove after a few versions
*/
@Deprecated
public static void setVmHook(VMHook vmHook) {
logger.warn("VM.setVmHook(VMHook vmHook) is deprecated method. Define your hook component as a Spring bean.");
VM.deprecatedHook = vmHook;
}
/**
* Utility to calculate new total memory size needed for an operation.
* <br/> Basically just offset + size, unless size is 0, in which case the result is also 0.
*
* @param offset starting position of the memory
* @param size number of bytes needed
* @return offset + size, unless size is 0. In that case memNeeded is also 0.
*/
private static BigInteger memNeeded(DataWord offset, DataWord size) {
return size.isZero() ? BigInteger.ZERO : offset.value().add(size.value());
}
/*
* Dumping the VM state at the current operation in various styles
* - standard Not Yet Implemented
* - standard+ (owner address, program counter, operation, gas left)
* - pretty (stack, memory, storage, level, contract,
* vmCounter, internalSteps, operation
gasBefore, gasCost, memWords)
*/
private void dumpLine(OpCode op, long gasBefore, long gasCost, long memWords, Program program) {
if (config.dumpStyle().equals("standard+")) {
switch (op) {
case STOP:
case RETURN:
case SUICIDE:
ContractDetails details = program.getStorage()
.getContractDetails(program.getOwnerAddress().getLast20Bytes());
List<DataWord> storageKeys = new ArrayList<>(details.getStorage().keySet());
Collections.sort(storageKeys);
for (DataWord key : storageKeys) {
dumpLogger.trace("{} {}",
toHexString(key.getNoLeadZeroesData()),
toHexString(details.getStorage().get(key).getNoLeadZeroesData()));
}
break;
default:
break;
}
String addressString = toHexString(program.getOwnerAddress().getLast20Bytes());
String pcString = toHexString(DataWord.of(program.getPC()).getNoLeadZeroesData());
String opString = toHexString(new byte[]{op.val()});
String gasString = toHexString(program.getGas().getNoLeadZeroesData());
dumpLogger.trace("{} {} {} {}", addressString, pcString, opString, gasString);
} else if (config.dumpStyle().equals("pretty")) {
dumpLogger.trace(" STACK");
for (DataWord item : program.getStack()) {
dumpLogger.trace("{}", item);
}
dumpLogger.trace(" MEMORY");
String memoryString = program.memoryToString();
if (!"".equals(memoryString))
dumpLogger.trace("{}", memoryString);
dumpLogger.trace(" STORAGE");
ContractDetails details = program.getStorage()
.getContractDetails(program.getOwnerAddress().getLast20Bytes());
List<DataWord> storageKeys = new ArrayList<>(details.getStorage().keySet());
Collections.sort(storageKeys);
for (DataWord key : storageKeys) {
dumpLogger.trace("{}: {}",
key.shortHex(),
details.getStorage().get(key).shortHex());
}
int level = program.getCallDeep();
String contract = toHexString(program.getOwnerAddress().getLast20Bytes());
String internalSteps = String.format("%4s", Integer.toHexString(program.getPC())).replace(' ', '0').toUpperCase();
dumpLogger.trace("{} | {} | #{} | {} : {} | {} | -{} | {}x32",
level, contract, vmCounter, internalSteps, op,
gasBefore, gasCost, memWords);
}
}
}
| 61,927
| 39.186892
| 158
|
java
|
ethereumj
|
ethereumj-master/ethereumj-core/src/main/java/org/ethereum/vm/CallCreate.java
|
/*
* Copyright (c) [2016] [ <ether.camp> ]
* This file is part of the ethereumJ library.
*
* The ethereumJ library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The ethereumJ library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the ethereumJ library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.ethereum.vm;
/**
* @author Roman Mandeleil
* @since 03.07.2014
*/
public class CallCreate {
final byte[] data;
final byte[] destination;
final byte[] gasLimit;
final byte[] value;
public CallCreate(byte[] data, byte[] destination, byte[] gasLimit, byte[] value) {
this.data = data;
this.destination = destination;
this.gasLimit = gasLimit;
this.value = value;
}
public byte[] getData() {
return data;
}
public byte[] getDestination() {
return destination;
}
public byte[] getGasLimit() {
return gasLimit;
}
public byte[] getValue() {
return value;
}
}
| 1,485
| 26.018182
| 87
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.