Feat: aria2 download and transfer in slave node (#1040)
* Feat: retrieve nodes from data table * Feat: master node ping slave node in REST API * Feat: master send scheduled ping request * Feat: inactive nodes recover loop * Modify: remove database operations from aria2 RPC caller implementation * Feat: init aria2 client in master node * Feat: Round Robin load balancer * Feat: create and monitor aria2 task in master node * Feat: salve receive and handle heartbeat * Fix: Node ID will be 0 in download record generated in older version * Feat: sign request headers with all `X-` prefix * Feat: API call to slave node will carry meta data in headers * Feat: call slave aria2 rpc method from master * Feat: get slave aria2 task status Feat: encode slave response data using gob * Feat: aria2 callback to master node / cancel or select task to slave node * Fix: use dummy aria2 client when caller initialize failed in master node * Feat: slave aria2 status event callback / salve RPC auth * Feat: prototype for slave driven filesystem * Feat: retry for init aria2 client in master node * Feat: init request client with global options * Feat: slave receive async task from master * Fix: competition write in request header * Refactor: dependency initialize order * Feat: generic message queue implementation * Feat: message queue implementation * Feat: master waiting slave transfer result * Feat: slave transfer file in stateless policy * Feat: slave transfer file in slave policy * Feat: slave transfer file in local policy * Feat: slave transfer file in OneDrive policy * Fix: failed to initialize update checker http client * Feat: list slave nodes for dashboard * Feat: test aria2 rpc connection in slave * Feat: add and save node * Feat: add and delete node in node pool * Fix: temp file cannot be removed when aria2 task fails * Fix: delete node in admin panel * Feat: edit node and get node info * Modify: delete unused settings
This commit is contained in:
176
pkg/cluster/pool.go
Normal file
176
pkg/cluster/pool.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
model "github.com/cloudreve/Cloudreve/v3/models"
|
||||
"github.com/cloudreve/Cloudreve/v3/pkg/balancer"
|
||||
"github.com/cloudreve/Cloudreve/v3/pkg/util"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var Default *NodePool
|
||||
|
||||
// 需要分类的节点组
|
||||
var featureGroup = []string{"aria2"}
|
||||
|
||||
// Pool 节点池
|
||||
type Pool interface {
|
||||
// Returns active node selected by given feature and load balancer
|
||||
BalanceNodeByFeature(feature string, lb balancer.Balancer) (error, Node)
|
||||
|
||||
// Returns node by ID
|
||||
GetNodeByID(id uint) Node
|
||||
|
||||
// Add given node into pool. If node existed, refresh node.
|
||||
Add(node *model.Node)
|
||||
|
||||
// Delete and kill node from pool by given node id
|
||||
Delete(id uint)
|
||||
}
|
||||
|
||||
// NodePool 通用节点池
|
||||
type NodePool struct {
|
||||
active map[uint]Node
|
||||
inactive map[uint]Node
|
||||
|
||||
featureMap map[string][]Node
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// Init 初始化从机节点池
|
||||
func Init() {
|
||||
Default = &NodePool{
|
||||
featureMap: make(map[string][]Node),
|
||||
}
|
||||
if err := Default.initFromDB(); err != nil {
|
||||
util.Log().Warning("节点池初始化失败, %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *NodePool) buildIndexMap() {
|
||||
pool.lock.Lock()
|
||||
for _, feature := range featureGroup {
|
||||
pool.featureMap[feature] = make([]Node, 0)
|
||||
}
|
||||
|
||||
for _, v := range pool.active {
|
||||
for _, feature := range featureGroup {
|
||||
if v.IsFeatureEnabled(feature) {
|
||||
pool.featureMap[feature] = append(pool.featureMap[feature], v)
|
||||
}
|
||||
}
|
||||
}
|
||||
pool.lock.Unlock()
|
||||
}
|
||||
|
||||
func (pool *NodePool) GetNodeByID(id uint) Node {
|
||||
pool.lock.RLock()
|
||||
defer pool.lock.RUnlock()
|
||||
|
||||
if node, ok := pool.active[id]; ok {
|
||||
return node
|
||||
}
|
||||
|
||||
return pool.inactive[id]
|
||||
}
|
||||
|
||||
func (pool *NodePool) nodeStatusChange(isActive bool, id uint) {
|
||||
util.Log().Debug("从机节点 [ID=%d] 状态变更 [Active=%t]", id, isActive)
|
||||
pool.lock.Lock()
|
||||
if isActive {
|
||||
node := pool.inactive[id]
|
||||
delete(pool.inactive, id)
|
||||
pool.active[id] = node
|
||||
} else {
|
||||
node := pool.active[id]
|
||||
delete(pool.active, id)
|
||||
pool.inactive[id] = node
|
||||
}
|
||||
pool.lock.Unlock()
|
||||
|
||||
pool.buildIndexMap()
|
||||
}
|
||||
|
||||
func (pool *NodePool) initFromDB() error {
|
||||
nodes, err := model.GetNodesByStatus(model.NodeActive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pool.lock.Lock()
|
||||
pool.active = make(map[uint]Node)
|
||||
pool.inactive = make(map[uint]Node)
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
pool.add(&nodes[i])
|
||||
}
|
||||
pool.lock.Unlock()
|
||||
|
||||
pool.buildIndexMap()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pool *NodePool) add(node *model.Node) {
|
||||
newNode := NewNodeFromDBModel(node)
|
||||
if newNode.IsActive() {
|
||||
pool.active[node.ID] = newNode
|
||||
} else {
|
||||
pool.inactive[node.ID] = newNode
|
||||
}
|
||||
|
||||
// 订阅节点状态变更
|
||||
newNode.SubscribeStatusChange(func(isActive bool, id uint) {
|
||||
pool.nodeStatusChange(isActive, id)
|
||||
})
|
||||
}
|
||||
|
||||
func (pool *NodePool) Add(node *model.Node) {
|
||||
pool.lock.Lock()
|
||||
defer pool.buildIndexMap()
|
||||
defer pool.lock.Unlock()
|
||||
|
||||
if _, ok := pool.active[node.ID]; ok {
|
||||
// TODO: refresh node
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := pool.inactive[node.ID]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
pool.add(node)
|
||||
}
|
||||
|
||||
func (pool *NodePool) Delete(id uint) {
|
||||
pool.lock.Lock()
|
||||
defer pool.buildIndexMap()
|
||||
defer pool.lock.Unlock()
|
||||
|
||||
if node, ok := pool.active[id]; ok {
|
||||
node.Kill()
|
||||
delete(pool.active, id)
|
||||
return
|
||||
}
|
||||
|
||||
if node, ok := pool.inactive[id]; ok {
|
||||
node.Kill()
|
||||
delete(pool.inactive, id)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// BalanceNodeByFeature 根据 feature 和 LoadBalancer 取出节点
|
||||
func (pool *NodePool) BalanceNodeByFeature(feature string, lb balancer.Balancer) (error, Node) {
|
||||
pool.lock.RLock()
|
||||
defer pool.lock.RUnlock()
|
||||
if nodes, ok := pool.featureMap[feature]; ok {
|
||||
err, res := lb.NextPeer(nodes)
|
||||
if err == nil {
|
||||
return nil, res.(Node)
|
||||
}
|
||||
|
||||
return err, nil
|
||||
}
|
||||
|
||||
return ErrFeatureNotExist, nil
|
||||
}
|
||||
Reference in New Issue
Block a user