Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# Environment Variables Configuration Example

# ============= RPC Configuration =============
# Multi-RPC Configuration - Use comma-separated RPC nodes for load balancing and fault tolerance
#
# Benefits of Multi-RPC Setup:
# - Load Distribution: Requests distributed across multiple endpoints
# - High Availability: Continues working if one RPC fails
# - Rate Limit Mitigation: Avoids hitting individual provider limits
# - Performance: Potential response time improvements
# - Cost Optimization: Distribute load across free/paid tiers
#
# Configuration Format: Use comma-separated URLs (no spaces)
# Example: "url1,url2,url3"

# L1 RPC node list (Ethereum mainnet or testnet)
L1_RPC_URLS="https://eth-sepolia.g.alchemy.com/v2/your_key1,https://rpc.ankr.com/eth_sepolia,https://sepolia.infura.io/v3/your_key2"

# L2 RPC node list (Optimism network)
L2_RPC_URLS="https://opt-sepolia.g.alchemy.com/v2/your_key1,https://optimism-sepolia.gateway.pokt.network/v1/lb/your_key2"

# Node RPC node list (OP Node endpoints for rollup operations)
NODE_RPC_URLS="https://light-radial-slug.optimism-sepolia.quiknode.pro/e9329f699b371572a8cc5dd22d19d5940bb842a5/,https://node2.optimism-sepolia.quiknode.pro/your_key2/"

# Single RPC Configuration (Legacy support):
# If you prefer single endpoints, just provide one URL:
# L1_RPC_URLS="https://eth-sepolia.g.alchemy.com/v2/your_key"

# ============= RPC Rate Limiting =============
# RPC request rate limit (requests per second)
RPC_RATE_LIMIT=5
# RPC request burst limit
RPC_RATE_BURST=2

# ============= Logging Configuration =============
# Log level: debug, info, warn, error, panic, fatal
LOG_LEVEL=info
# Log format: console, json
LOG_FORMAT=console

# ============= Database Configuration =============
MYSQL_DATA_SOURCE=root:123456@tcp(127.0.0.1:3306)/dispute_explorer?charset=utf8mb4&parseTime=True&loc=Local&multiStatements=true
MYSQL_MAX_IDLE_CONNS=10
MYSQL_MAX_OPEN_CONNS=20
MYSQL_CONN_MAX_LIFETIME=3600

# ============= Blockchain Configuration =============
# Blockchain network name
BLOCKCHAIN=sepolia

# Starting block number (ensure no games are missed)
FROM_BLOCK_NUMBER=5515562
# Starting block hash
FROM_BLOCK_HASH=0x5205c17557759edaef9120f56af802aeaa2827a60d674a0413e77e9c515bdfba

# Dispute Game Factory Proxy contract address
DISPUTE_GAME_PROXY_CONTRACT=0x05F9613aDB30026FFd634f38e5C4dFd30a197Fa1

# ============= API Configuration =============
# API server port
API_PORT=8088
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,11 @@ MYSQL_DATA_SOURCE=<data-source>
BLOCKCHAIN=<block-chain-name>

# l1 rpc url example: eth json rpc url
L1_RPC_URL=<l1-rpc>
L1_RPC_URLS=<l1-rpc>
# l2 rpc url example: op json rpc url
L2_RPC_URL=<l2-rpc>
L2_RPC_URLS=<l2-rpc>

NODE_RPCURL=<op-node-rpc>
NODE_RPCURLS=<op-node-rpc>

RPC_RATE_LIMIT=15
RPC_RATE_BURST=5
Expand Down
Binary file renamed build/dispute-explorer → dispute-explorer
Binary file not shown.
40 changes: 25 additions & 15 deletions internal/api/dispute_game_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,14 @@ import (
"net/http"

"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-service/sources" // 新增导入
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
gethlog "github.com/ethereum/go-ethereum/log"
config "github.com/optimism-java/dispute-explorer/internal/types"
"github.com/optimism-java/dispute-explorer/pkg/contract"
"github.com/optimism-java/dispute-explorer/pkg/rpc"

"github.com/spf13/cast"

Expand All @@ -24,20 +26,16 @@ import (
)

type DisputeGameHandler struct {
Config *config.Config
DB *gorm.DB
L1RPC *ethclient.Client
L2RPC *ethclient.Client
RollupClient *sources.RollupClient // 新增字段
Config *config.Config
DB *gorm.DB
RPCManager *rpc.Manager
}

func NewDisputeGameHandler(db *gorm.DB, l1rpc *ethclient.Client, l2rpc *ethclient.Client, config *config.Config, rollupClient *sources.RollupClient) *DisputeGameHandler {
func NewDisputeGameHandler(db *gorm.DB, rpcManager *rpc.Manager, config *config.Config) *DisputeGameHandler {
return &DisputeGameHandler{
DB: db,
L1RPC: l1rpc,
L2RPC: l2rpc,
Config: config,
RollupClient: rollupClient, // 新增赋值
DB: db,
RPCManager: rpcManager,
Config: config,
}
}

Expand Down Expand Up @@ -299,7 +297,17 @@ func (h DisputeGameHandler) getClaimRoot(blockNumber int64) (string, error) {
return "", fmt.Errorf("block number cannot be negative: %d", blockNumber)
}

output, err := h.RollupClient.OutputAtBlock(context.Background(), uint64(blockNumber))
// 使用RPCManager获取Node RPC URL,实现轮询
nodeRPCURL := h.RPCManager.GetNodeRPCURL()

// 创建RollupClient(每次使用不同的Node RPC)
rpcClient, err := client.NewRPC(context.Background(), gethlog.New(), nodeRPCURL)
if err != nil {
return "", fmt.Errorf("failed to connect to node RPC %s: %w", nodeRPCURL, err)
}
rollupClient := sources.NewRollupClient(rpcClient)

output, err := rollupClient.OutputAtBlock(context.Background(), uint64(blockNumber))
if err != nil {
return "", fmt.Errorf("failed to get output at block %d: %w", blockNumber, err)
}
Expand Down Expand Up @@ -340,7 +348,9 @@ func (h DisputeGameHandler) GetGamesClaimByPosition(c *gin.Context) {
}

func (h DisputeGameHandler) gamesClaimByPosition(req *CalculateClaim) (string, error) {
newDisputeGame, err := contract.NewDisputeGame(common.HexToAddress(req.DisputeGame), h.L1RPC)
// 获取L1客户端从RPCManager
l1Client := h.RPCManager.GetRawClient(true)
newDisputeGame, err := contract.NewDisputeGame(common.HexToAddress(req.DisputeGame), l1Client)
if err != nil {
return "", err
}
Expand Down
6 changes: 4 additions & 2 deletions internal/handler/frontend_move.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ func (h *FrontendMoveHandler) RecordFrontendMove(req *FrontendMoveRequest) error
DisputedClaim: req.DisputedClaim,
Status: schema.FrontendMoveStatusPending,
SubmittedAt: time.Now().Unix(),
IsSynced: false, // 新记录默认未同步
}

// Save to database
Expand All @@ -106,8 +107,9 @@ func (h *FrontendMoveHandler) monitorTransactionStatus(recordID int64, txHash st
for i := 0; i < maxRetries; i++ {
time.Sleep(retryInterval)

// Query transaction status
receipt, err := h.svc.L1RPC.TransactionReceipt(context.Background(), common.HexToHash(txHash))
// Query transaction status using RPCManager
l1Client := h.svc.RPCManager.GetRawClient(true)
receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(txHash))
if err != nil {
log.Debugf("[FrontendMoveHandler] Transaction %s not yet mined or error: %v", txHash, err)
continue
Expand Down
2 changes: 2 additions & 0 deletions internal/handler/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ func Run(ctx *svc.ServiceContext) {
go CalculateLostBond(ctx)
// sync claim len
go SyncClaimDataLen(ctx)
// sync frontend move transactions
go SyncFrontendMoveTransactions(ctx)
}

// startRPCMonitoring starts RPC monitoring (internal function)
Expand Down
2 changes: 1 addition & 1 deletion internal/handler/latestBlockNumber.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func LatestBlackNumber(ctx *svc.ServiceContext) {
}

ctx.LatestBlockNumber = cast.ToInt64(latest)
log.Infof("[Handler.LatestBlackNumber] Latest block number: %d (via RPC Manager)\n", latest)
log.Debugf("[Handler.LatestBlackNumber] Latest block number: %d (via RPC Manager)\n", latest)
time.Sleep(12 * time.Second)
}
}
6 changes: 3 additions & 3 deletions internal/handler/logFilter.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func LogFilter(ctx *svc.ServiceContext, block schema.SyncBlock, addresses []comm
if err != nil {
return nil, errors.WithStack(err)
}
log.Infof("[LogFilter] Event logs length is %d, block number is %d (via RPC Manager)\n", len(logs), block.BlockNumber)
log.Debugf("[LogFilter] Event logs length is %d, block number is %d (via RPC Manager)\n", len(logs), block.BlockNumber)
return LogsToEvents(ctx, logs, block.ID)
}

Expand All @@ -41,14 +41,14 @@ func LogsToEvents(ctx *svc.ServiceContext, logs []types.Log, syncBlockID int64)
contractAddress := vlog.Address
Event := blockchain.GetEvent(eventHash)
if Event == nil {
log.Infof("[LogsToEvents] logs[txHash: %s, contractAddress:%s, eventHash: %s]\n", vlog.TxHash, strings.ToLower(contractAddress.Hex()), eventHash)
log.Debugf("[LogsToEvents] logs[txHash: %s, contractAddress:%s, eventHash: %s]\n", vlog.TxHash, strings.ToLower(contractAddress.Hex()), eventHash)
continue
}

blockTime := blockTimes[cast.ToInt64(vlog.BlockNumber)]
if blockTime == 0 {
blockNumber := cast.ToInt64(vlog.BlockNumber)
log.Infof("[LogsToEvents] Fetching block info for block number: %d, txHash: %s", blockNumber, vlog.TxHash.Hex())
log.Debugf("[LogsToEvents] Fetching block info for block number: %d, txHash: %s", blockNumber, vlog.TxHash.Hex())

// Use unified RPC manager to get block (automatically applies rate limiting)
block, err := ctx.RPCManager.GetBlockByNumber(context.Background(), big.NewInt(blockNumber), true) // true indicates L1
Expand Down
2 changes: 1 addition & 1 deletion internal/handler/rpc_manager_migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ func LatestBlockNumberWithRateLimit(ctx *svc.ServiceContext) {
}

ctx.LatestBlockNumber = cast.ToInt64(latest)
log.Infof("[Handler.LatestBlockNumberWithRateLimit] Latest block number: %d (using RPC Manager)\n", latest)
log.Debugf("[Handler.LatestBlockNumberWithRateLimit] Latest block number: %d (using RPC Manager)\n", latest)
time.Sleep(12 * time.Second)
}
}
Expand Down
10 changes: 5 additions & 5 deletions internal/handler/syncBlock.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ func SyncBlock(ctx *svc.ServiceContext) {
ctx.SyncedBlockHash = common.HexToHash(syncedBlock.BlockHash)
}

log.Infof("[Handler.SyncBlock]SyncedBlockNumber: %d", ctx.SyncedBlockNumber)
log.Infof("[Handler.SyncBlock]SyncedBlockHash:%s", ctx.SyncedBlockHash.String())
log.Debugf("[Handler.SyncBlock]SyncedBlockNumber: %d", ctx.SyncedBlockNumber)
log.Debugf("[Handler.SyncBlock]SyncedBlockHash:%s", ctx.SyncedBlockHash.String())

for {
// Check pending blocks count before syncing new blocks
Expand All @@ -52,7 +52,7 @@ func SyncBlock(ctx *svc.ServiceContext) {
}

syncingBlockNumber := ctx.SyncedBlockNumber + 1
log.Infof("[Handler.SyncBlock] Try to sync block number: %d\n", syncingBlockNumber)
log.Debugf("[Handler.SyncBlock] Try to sync block number: %d\n", syncingBlockNumber)

if syncingBlockNumber > ctx.LatestBlockNumber {
time.Sleep(3 * time.Second)
Expand All @@ -68,7 +68,7 @@ func SyncBlock(ctx *svc.ServiceContext) {
continue
}
block := rpc.ParseJSONBlock(string(blockJSON))
log.Infof("[Handler.SyncBlock] Syncing block number: %d, hash: %v, parent hash: %v (via RPC Manager)\n", block.Number(), block.Hash(), block.ParentHash())
log.Debugf("[Handler.SyncBlock] Syncing block number: %d, hash: %v, parent hash: %v (via RPC Manager)\n", block.Number(), block.Hash(), block.ParentHash())

if common.HexToHash(block.ParentHash()) != ctx.SyncedBlockHash {
log.Errorf("[Handler.SyncBlock] ParentHash of the block being synchronized is inconsistent: %s \n", ctx.SyncedBlockHash)
Expand Down Expand Up @@ -104,7 +104,7 @@ func rollbackBlock(ctx *svc.ServiceContext) {
for {
rollbackBlockNumber := ctx.SyncedBlockNumber

log.Infof("[Handler.SyncBlock.RollBackBlock] Try to rollback block number: %d\n", rollbackBlockNumber)
log.Debugf("[Handler.SyncBlock.RollBackBlock] Try to rollback block number: %d\n", rollbackBlockNumber)

// use unified RPC manager for rollback operation (automatically applies rate limiting)
requestBody := "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBlockByNumber\",\"params\":[\"" + fmt.Sprintf("0x%X", rollbackBlockNumber) + "\", true],\"id\":1}"
Expand Down
3 changes: 2 additions & 1 deletion internal/handler/syncClaimDataLen.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ func SyncClaimDataLen(ctx *svc.ServiceContext) {
continue
}
if len(disputeGames) == 0 {
log.Infof("[Handler.SyncClaimDataLen] Pending games count is 0\n")
log.Debugf("[Handler.SyncClaimDataLen] Pending games count is 0\n")
time.Sleep(2 * time.Second)
continue
}
Expand All @@ -38,5 +38,6 @@ func SyncClaimDataLen(ctx *svc.ServiceContext) {
log.Errorf("[Handler.SyncClaimDataLen] update claim len err", errors.WithStack(err))
}
}
time.Sleep(3 * time.Second)
}
}
5 changes: 4 additions & 1 deletion internal/handler/syncCredit.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,10 @@ func SyncCredit(ctx *svc.ServiceContext) {
}
for _, disputeGame := range disputeGames {
game := common.HexToAddress(disputeGame.GameContract)
// 使用RPCManager获取L1客户端
l1Client := ctx.RPCManager.GetRawClient(true)
disputeClient, err := NewRetryDisputeGameClient(ctx.DB, game,
ctx.L1RPC, rate.Limit(ctx.Config.RPCRateLimit), ctx.Config.RPCRateBurst)
l1Client, rate.Limit(ctx.Config.RPCRateLimit), ctx.Config.RPCRateBurst)
if err != nil {
log.Errorf("[Handler.SyncCredit] NewRetryDisputeGameClient err: %s", err)
time.Sleep(5 * time.Second)
Expand All @@ -34,6 +36,7 @@ func SyncCredit(ctx *svc.ServiceContext) {
if err != nil {
log.Errorf("[Handler.SyncCredit] ProcessDisputeGameCredit err: %s", err)
}
time.Sleep(1 * time.Second)
}
time.Sleep(3 * time.Second)
}
Expand Down
11 changes: 6 additions & 5 deletions internal/handler/syncDispute.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@ import (
func SyncDispute(ctx *svc.ServiceContext) {
for {
var events []schema.SyncEvent
err := ctx.DB.Where("status=? OR status=?", schema.EventPending, schema.EventRollback).Order("block_number").Limit(50).Find(&events).Error
err := ctx.DB.Where("status=? OR status=?", schema.EventPending, schema.EventRollback).Order("block_number").Limit(20).Find(&events).Error
if err != nil {
time.Sleep(3 * time.Second)
continue
}
if len(events) == 0 {
log.Infof("[Handler.SyncDispute] Pending events count is 0\n")
log.Debugf("[Handler.SyncDispute] Pending events count is 0\n")
time.Sleep(2 * time.Second)
continue
}
Expand All @@ -45,6 +45,7 @@ func SyncDispute(ctx *svc.ServiceContext) {
log.Errorf("[Handler.SyncEvent] HandleRollbackBlock err: %s\n", errors.WithStack(err))
}
}
time.Sleep(1 * time.Second)
}
time.Sleep(3 * time.Second)
}
Expand Down Expand Up @@ -136,7 +137,7 @@ func HandlePendingEvent(ctx *svc.ServiceContext, event schema.SyncEvent) error {
return errors.WithStack(err)
}
disputeClient, err := NewRetryDisputeGameClient(ctx.DB, common.HexToAddress(disputeCreated.DisputeProxy),
ctx.L1RPC, rate.Limit(ctx.Config.RPCRateLimit), ctx.Config.RPCRateBurst)
ctx.RPCManager.GetRawClient(true), rate.Limit(ctx.Config.RPCRateLimit), ctx.Config.RPCRateBurst)
if err != nil {
log.Errorf("[handle.SyncDispute.HandlePendingEvent] init client for created err: %s", err)
return errors.WithStack(err)
Expand All @@ -148,7 +149,7 @@ func HandlePendingEvent(ctx *svc.ServiceContext, event schema.SyncEvent) error {
}
case event.EventName == disputeMove.Name() && event.EventHash == disputeMove.EventHash().String():
disputeClient, err := NewRetryDisputeGameClient(ctx.DB, common.HexToAddress(event.ContractAddress),
ctx.L1RPC, rate.Limit(ctx.Config.RPCRateLimit), ctx.Config.RPCRateBurst)
ctx.RPCManager.GetRawClient(true), rate.Limit(ctx.Config.RPCRateLimit), ctx.Config.RPCRateBurst)
if err != nil {
log.Errorf("[handle.SyncDispute.HandlePendingEvent] init client for move err: %s", err)
return errors.WithStack(err)
Expand All @@ -160,7 +161,7 @@ func HandlePendingEvent(ctx *svc.ServiceContext, event schema.SyncEvent) error {
}
case event.EventName == disputeResolved.Name() && event.EventHash == disputeResolved.EventHash().String():
disputeClient, err := NewRetryDisputeGameClient(ctx.DB, common.HexToAddress(event.ContractAddress),
ctx.L1RPC, rate.Limit(ctx.Config.RPCRateLimit), ctx.Config.RPCRateBurst)
ctx.RPCManager.GetRawClient(true), rate.Limit(ctx.Config.RPCRateLimit), ctx.Config.RPCRateBurst)
if err != nil {
log.Errorf("[handle.SyncDispute.HandlePendingEvent] init client for resolved err: %s", err)
return errors.WithStack(err)
Expand Down
Loading
Loading