forked from mykola/lux
implement rpc cli for new-host and new-node
This commit is contained in:
parent
f14a95d032
commit
2541a561b1
3 changed files with 130 additions and 23 deletions
|
@ -194,6 +194,63 @@ func LuxKeyStoreIntoRpc(ks *LuxKeyStore) rpc.LuxRpcKeyStore {
|
|||
return keyStore
|
||||
}
|
||||
|
||||
func LuxKeyStoreFromRpc(rpcKs rpc.LuxRpcKeyStore, savePath string) (LuxKeyStore, error) {
|
||||
ks := NewLuxKeyStore(savePath)
|
||||
|
||||
for _, rpcKey := range rpcKs.Nodes {
|
||||
id, err := proto.ParseLuxID(rpcKey.ID)
|
||||
if err != nil {
|
||||
return ks, err
|
||||
}
|
||||
|
||||
key, err := base64.StdEncoding.DecodeString(rpcKey.KeyBlob)
|
||||
if err != nil {
|
||||
return ks, err
|
||||
}
|
||||
|
||||
iv, err := base64.StdEncoding.DecodeString(rpcKey.IVBlob)
|
||||
if err != nil {
|
||||
return ks, err
|
||||
}
|
||||
|
||||
ks.Put(LuxKey{
|
||||
Type: proto.LuxTypeNode,
|
||||
Id: id,
|
||||
Key: key,
|
||||
IV: iv,
|
||||
})
|
||||
}
|
||||
|
||||
for _, rpcKey := range rpcKs.Hosts {
|
||||
id, err := proto.ParseLuxID(rpcKey.ID)
|
||||
if err != nil {
|
||||
return ks, err
|
||||
}
|
||||
|
||||
key, err := base64.StdEncoding.DecodeString(rpcKey.KeyBlob)
|
||||
if err != nil {
|
||||
return ks, err
|
||||
}
|
||||
|
||||
iv, err := base64.StdEncoding.DecodeString(rpcKey.IVBlob)
|
||||
if err != nil {
|
||||
return ks, err
|
||||
}
|
||||
|
||||
ks.Put(LuxKey{
|
||||
Type: proto.LuxTypeHost,
|
||||
Id: id,
|
||||
Key: key,
|
||||
IV: iv,
|
||||
})
|
||||
}
|
||||
|
||||
if err := ks.Save(); err != nil {
|
||||
return ks, err
|
||||
}
|
||||
return ks, nil
|
||||
}
|
||||
|
||||
func (ks *LuxKeyStore) Handle(request rpc.LuxRpcRequest, rpcType rpc.LuxRpcType) (rpc.LuxRpcResponse, rpc.LuxRpcError, bool) {
|
||||
var rpcRes rpc.LuxRpcResponse
|
||||
|
||||
|
|
50
main.go
50
main.go
|
@ -251,6 +251,56 @@ func rpcMain() {
|
|||
fmt.Println(route)
|
||||
}
|
||||
}
|
||||
|
||||
if rpcNewHost != "" {
|
||||
rpcRes, rpcErr, err := cl.Execute(rpc.LuxRpcRequest{
|
||||
RequestID: counter,
|
||||
Controller: "node",
|
||||
Command: "new-host",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to send request: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
counter++
|
||||
|
||||
if rpcErr.ErrorCode != 0 {
|
||||
// we got error
|
||||
fmt.Fprintf(os.Stderr, "RPC error %d: %s\n", rpcErr.ErrorCode, rpcErr.Message)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// deserialize keystore
|
||||
_, err = crypto.LuxKeyStoreFromRpc(rpcRes.Keystore, rpcNewHost)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to save host keystore: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if rpcNewNode != "" {
|
||||
rpcRes, rpcErr, err := cl.Execute(rpc.LuxRpcRequest{
|
||||
RequestID: counter,
|
||||
Controller: "node",
|
||||
Command: "new-node",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to send request: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
counter++
|
||||
|
||||
if rpcErr.ErrorCode != 0 {
|
||||
// we got error
|
||||
fmt.Fprintf(os.Stderr, "RPC error %d: %s\n", rpcErr.ErrorCode, rpcErr.Message)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// deserialize keystore
|
||||
_, err = crypto.LuxKeyStoreFromRpc(rpcRes.Keystore, rpcNewHost)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to save node keystore: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -13,17 +13,17 @@ const LUX_XML_ERROR_END = "</error>"
|
|||
type LuxRpcDefragState int
|
||||
|
||||
const (
|
||||
LuxRpcDefragStateOff = 0
|
||||
defragStateOff = 0
|
||||
|
||||
LuxRpcDefragStateReading = 1
|
||||
LuxRpcDefragStateReadingRequest = 1
|
||||
LuxRpcDefragStateReadingResponse = 2
|
||||
LuxRpcDefragStateReadingError = 3
|
||||
defragStateReading = 1
|
||||
defragStateReadingRequest = 1
|
||||
defragStateReadingResponse = 2
|
||||
defragStateReadingError = 3
|
||||
|
||||
LuxRpcDefragStateHasData = 4
|
||||
LuxRpcDefragStateHasRequest = 4
|
||||
LuxRpcDefragStateHasResponse = 5
|
||||
LuxRpcDefragStateHasError = 6
|
||||
defragStateHasData = 4
|
||||
defragStateHasRequest
|
||||
defragStateHasResponse = 5
|
||||
defragStateHasError = 6
|
||||
)
|
||||
|
||||
type LuxRpcDefrag struct {
|
||||
|
@ -36,22 +36,22 @@ type LuxRpcDefrag struct {
|
|||
func NewLuxRpcDefrag() LuxRpcDefrag {
|
||||
return LuxRpcDefrag{
|
||||
buffer: proto.NewLuxBuffer(),
|
||||
state: LuxRpcDefragStateOff,
|
||||
state: defragStateOff,
|
||||
beginOff: 0,
|
||||
endOff: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (def *LuxRpcDefrag) HasRequest() bool {
|
||||
return def.state == LuxRpcDefragStateHasRequest
|
||||
return def.state == defragStateHasRequest
|
||||
}
|
||||
|
||||
func (def *LuxRpcDefrag) HasResponse() bool {
|
||||
return def.state == LuxRpcDefragStateHasResponse
|
||||
return def.state == defragStateHasResponse
|
||||
}
|
||||
|
||||
func (def *LuxRpcDefrag) HasError() bool {
|
||||
return def.state == LuxRpcDefragStateHasError
|
||||
return def.state == defragStateHasError
|
||||
}
|
||||
|
||||
func matchTag(slice []byte, tag string) bool {
|
||||
|
@ -66,20 +66,20 @@ func matchTag(slice []byte, tag string) bool {
|
|||
// will return true if data is complete
|
||||
func (def *LuxRpcDefrag) Feed(data []byte) bool {
|
||||
// try find tag opening at beginning
|
||||
if def.state == LuxRpcDefragStateOff {
|
||||
if def.state == defragStateOff {
|
||||
for i := 0; i < len(data); i++ {
|
||||
slice := data[i:]
|
||||
|
||||
if matchTag(slice, LUX_XML_REQUEST_BEGIN) {
|
||||
def.state = LuxRpcDefragStateReadingRequest
|
||||
def.state = defragStateReadingRequest
|
||||
def.beginOff = i + len(LUX_XML_REQUEST_BEGIN)
|
||||
break
|
||||
} else if matchTag(slice, LUX_XML_RESPONSE_BEGIN) {
|
||||
def.state = LuxRpcDefragStateReadingResponse
|
||||
def.state = defragStateReadingResponse
|
||||
def.beginOff = i + len(LUX_XML_RESPONSE_BEGIN)
|
||||
break
|
||||
} else if matchTag(slice, LUX_XML_ERROR_BEGIN) {
|
||||
def.state = LuxRpcDefragStateReadingError
|
||||
def.state = defragStateReadingError
|
||||
def.beginOff = i + len(LUX_XML_ERROR_BEGIN)
|
||||
break
|
||||
}
|
||||
|
@ -87,27 +87,27 @@ func (def *LuxRpcDefrag) Feed(data []byte) bool {
|
|||
}
|
||||
|
||||
// now check if slice has ending tag, if so - complete data
|
||||
if def.state >= LuxRpcDefragStateReading {
|
||||
if def.state >= defragStateReading {
|
||||
for i := def.beginOff; i < len(data); i++ {
|
||||
slice := data[i:]
|
||||
|
||||
if matchTag(slice, LUX_XML_REQUEST_END) {
|
||||
def.state = LuxRpcDefragStateHasRequest
|
||||
def.state = defragStateHasRequest
|
||||
def.endOff = i + len(LUX_XML_REQUEST_END)
|
||||
break
|
||||
} else if matchTag(slice, LUX_XML_RESPONSE_END) {
|
||||
def.state = LuxRpcDefragStateHasResponse
|
||||
def.state = defragStateHasResponse
|
||||
def.endOff = i + len(LUX_XML_RESPONSE_END)
|
||||
break
|
||||
} else if matchTag(slice, LUX_XML_ERROR_END) {
|
||||
def.state = LuxRpcDefragStateHasError
|
||||
def.state = defragStateHasError
|
||||
def.endOff = i + len(LUX_XML_ERROR_END)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// got end tag? push exact 0:endOff slice
|
||||
if def.state >= LuxRpcDefragStateHasData {
|
||||
if def.state >= defragStateHasData {
|
||||
def.buffer.WriteBytes(data[:def.endOff])
|
||||
|
||||
return true // we got complete data!
|
||||
|
@ -128,7 +128,7 @@ func (def *LuxRpcDefrag) GetAndForget() []byte {
|
|||
|
||||
// reset state
|
||||
def.buffer = proto.NewLuxBuffer()
|
||||
def.state = LuxRpcDefragStateOff
|
||||
def.state = defragStateOff
|
||||
def.beginOff = 0
|
||||
def.endOff = 0
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue