前面我們完成了一個CQRS模式的資料採集(錄入)平臺。可以預見:資料的產生是線上下各式各樣的終端系統中,包括web、桌面、移動終端。那麼,為了實現一個完整的系統,必須把前端裝置通過某種網路連線形式與資料採集平臺整合為一體。有兩種方式可以實現需要的網路連線:Restful-api, gRPC。由於gRPC支援http/2通訊協議,支援持久連線方式及雙向資料流。所以對於POS裝置這樣的前端選擇gRPC作為網路連線方式來實現實時的操作控制應該是正確的選擇,畢竟採用恆久連線和雙向資料流效率會高很多。gRPC是google公司的標準,基於protobuffer訊息:一種二進位制序列化資料交換機制。gRPC的優勢在這裡就不再細說,讀者可以參考前面有關gRPC的討論博文。
下面是系統結構示意圖:
這篇討論焦點集中在gRPC的server,client兩頭的具體實現。剛才提過,gRPC是google公司的開源庫,同時還提供了各種語言的客戶端,有:java, C++,python,go ... 但就是沒有scala的,只能找第三方的scala客戶端了。現在市面可供選擇的gRPC-scala-客戶端有scalaPB和akka-grpc兩個,akka-grpc是基於akka-stream和akka-http構建的,按理來說會更合適,但由於還是處於preview版本,以後再說吧,現在只有scalaPB可選了。scalaPB是一個比較成熟的gRPC客戶端,在前面的部落格裡我們也進行了介紹和示範。下面我們就用scalaPB來實現上面這個例子的客戶端-平臺整合。
首先,gRPC是通過protobuffer進行序列化資料傳輸的。下面是這個例子的.proto定義檔案:
syntax = "proto3";
import "google/protobuf/wrappers.proto";
import "google/protobuf/any.proto";
import "scalapb/scalapb.proto";
option (scalapb.options) = {
// use a custom Scala package name
// package_name: "io.ontherocks.introgrpc.demo"
// don't append file name to package
flat_package: true
// generate one Scala file for all messages (services still get their own file)
single_file: true
// add imports to generated file
// useful when extending traits or using custom types
// import: "io.ontherocks.hellogrpc.RockingMessage"
// code to put at the top of generated file
// works only with `single_file: true`
//preamble: "sealed trait SomeSealedTrait"
};
package com.datatech.pos.messages;
message PBVchState { //單據狀態
string opr = 1; //收款員
int64 jseq = 2; //begin journal sequence for read-side replay
int32 num = 3; //當前單號
int32 seq = 4; //當前序號
bool void = 5; //取消模式
bool refd = 6; //退款模式
bool susp = 7; //掛單
bool canc = 8; //廢單
bool due = 9; //當前餘額
string su = 10; //主管編號
string mbr = 11; //會員號
int32 mode = 12; //當前操作流程:0=logOff, 1=LogOn, 2=Payment
}
message PBTxnItem { //交易記錄
string txndate = 1; //交易日期
string txntime = 2; //錄入時間
string opr = 3; //操作員
int32 num = 4; //銷售單號
int32 seq = 5; //交易序號
int32 txntype = 6; //交易型別
int32 salestype = 7; //銷售型別
int32 qty = 8; //交易數量
int32 price = 9; //單價(分)
int32 amount = 10; //碼洋(分)
int32 disc = 11; //折扣率 (%)
int32 dscamt = 12; //折扣額:負值 net實洋 = amount + dscamt
string member = 13; //會員卡號
string code = 14; //編號(商品、卡號...)
string acct = 15; //賬號
string dpt = 16; //部類
}
message PBPOSResponse {
int32 sts = 1;
string msg = 2;
PBVchState voucher = 3;
repeated PBTxnItem txnitems = 4;
}
message PBPOSCommand {
int64 shopid = 1;
string commandname = 2;
string delimitedparams = 3; //for multiple parameters, use ; to delimit
}
service SendCommand {
rpc SingleResponse(PBPOSCommand) returns (PBPOSResponse) {};
rpc MultiResponse(PBPOSCommand) returns (stream PBPOSResponse) {};
}
前端通過兩種模式向平臺傳送指令PBPOSCommand: SingleResponse代表傳統的request/response互動模式,MultiResponse,又或者server-streaming,代表前端傳送一個指令,服務端返回一串Response, 或response-stream。Command和PBCommand、POSResponse和PBPOSResponse之間必須具備相互轉換函式:
package com.datatech.pos.cloud
import Messages._
import com.datatech.pos.messages._
object PBConverter {
implicit class PBConverter(pbmsg: PBPOSCommand) {
def toPOSComand: POSMessage = pbmsg.commandname.toUpperCase match {
case "LOGON" => POSMessage(pbmsg.shopid,LogOn(pbmsg.delimitedparams))
case "LOGOFF" => POSMessage(pbmsg.shopid,LogOff)
...
}
}
implicit class POSResponseConvert(resp: POSResponse) {
def toPBPOSResponse: PBPOSResponse = new PBPOSResponse(
sts = resp.sts,
msg = resp.msg,
voucher = Some(resp.voucher.toPBVchState),
txnitems = resp.txnItems.map(_.toPBTxnItem)
)
}
implicit class VchStateConvert(state: VchStates) {
def toPBVchState: PBVchState = new PBVchState(
opr = state.opr, //收款員
jseq = state.jseq, //begin journal sequence for read-side replay
num = state.num, //當前單號
seq = state.seq, //當前序號
void = state.void, //取消模式
refd = state.refd, //退款模式
susp = state.susp, //掛單
canc = state.canc, //廢單
due = state.due, //當前餘額
su = state.su, //主管編號
mbr = state.mbr, //會員號
mode = state.mode //當前操作流程:0=logOff, 1=LogOn, 2=Payment
)
}
implicit class TxnItemConvert(item: TxnItem) {
def toPBTxnItem: PBTxnItem = new PBTxnItem(
txndate = item.txndate, //交易日期
txntime = item.txntime, //錄入時間
opr = item.opr, //操作員
num = item.num, //銷售單號
seq = item.seq, //交易序號
txntype = item.txntype, //交易型別
salestype = item.salestype, //銷售型別
qty = item.qty, //交易數量
price = item.price, //單價(分)
amount = item.amount, //碼洋(分)
disc = item.disc, //折扣率 (%)
dscamt = item.dscamt, //折扣額:負值 net實洋 = amount + dscamt
member = item.member, //會員卡號
code = item.code, //編號(商品、卡號...)
acct = item.acct, //賬號
dpt = item.dpt //部類
)
}
}
然後可以開始實現平臺端POS介面服務了:
package com.datatech.pos.cloud
import com.datatech.pos.messages._
import io.grpc.stub.StreamObserver
import PBConverter._
import akka.actor.ActorRef
import akka.pattern.ask
import scala.concurrent.duration._
import akka.util.Timeout
import Messages._
import scala.concurrent.{Await, Future}
import com.typesafe.config.ConfigFactory
import com.datatech.sdp
import sdp.logging._
class gRPCServices(writerRouter: ActorRef) extends SendCommandGrpc.SendCommand with LogSupport {
import gRPCServices._
import PBConverter._
var posConfig: com.typesafe.config.Config = _
var exetimeout: Int = 5
try {
posConfig = ConfigFactory.load("pos.conf").getConfig("pos.server")
exetimeout = posConfig.getInt("executimeout")
}
catch {
case excp : Throwable =>
log.warn(s"gRPCServices: ${excp.getMessage}")
exetimeout = 5
}
override def singleResponse(request: PBPOSCommand): Future[PBPOSResponse] = {
getPBResponse(writerRouter,request.toPOSComand, exetimeout)
}
override def multiResponse(request: PBPOSCommand, responseObserver: StreamObserver[PBPOSResponse]): Unit = ???
}
object gRPCServices {
import scala.concurrent.ExecutionContext.Implicits.global
def getPBResponse(ref: ActorRef, cmd: POSMessage, executimeout: Int = 5): Future[PBPOSResponse] = {
implicit val timeout = Timeout(executimeout second)
val futRes: Future[POSResponse] = ask(ref, cmd).mapTo[POSResponse]
futRes.map(_.toPBPOSResponse)
}
}
現在需要把gRPCService與POS系統整合為一體,這樣前端發來的PBCommand轉換成Command後經POSAgent轉發給叢集分片writerRouter,writeRouter再發給writer去進行具體的操作處理,完後把POSResponse轉換成PBPOSResponse通過service再返回前端:
def getPBResponse(ref: ActorRef, cmd: POSMessage, executimeout: Int = 5): Future[PBPOSResponse] = {
implicit val timeout = Timeout(executimeout second)
val futRes: Future[POSResponse] = ask(ref, cmd).mapTo[POSResponse]
futRes.map(_.toPBPOSResponse)
}
可以看到上面使用了ask()模式來進行雙向溝通。這個ref是一箇中間資訊互動actor (POSAgent):
var config = ConfigFactory.parseString("akka.remote.netty.tcp.port=\"" + port + "\"")
.withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.hostname=\"" + host + "\""))
.withFallback(ConfigFactory.parseString("cassandra-journal.contact-points=[\"" + host + "\"]"))
.withFallback(ConfigFactory.parseString("cassandra-snapshot-store.contact-points=[\"" + host + "\"]"))
if (!seednodes.isEmpty)
config = config.withFallback(ConfigFactory.parseString("akka.cluster.seed-nodes=[" + seednodes + "]"))
//roles can be deployed on this node
config = config.withFallback(ConfigFactory.parseString("akka.cluster.roles = [poswriter]"))
.withFallback(ConfigFactory.load())
val posSystem = ActorSystem(systemName, config)
posSystem.actorOf(ClusterMonitor.props, "cps-cluster-monitor")
posSystem.actorOf(ActionReader.readerProps(showSteps),"reader")
val readerRouter = posSystem.actorOf(ReaderRouter.props(showSteps),"reader-router")
WriterShard.deployShard(posSystem)(ReaderInfo(readerRouter,writeOnly),showSteps)
val posHandler = ClusterSharding(posSystem).shardRegion(WriterShard.shardName)
val posref = posSystem.actorOf(WriterRouter.props(posHandler), "writer-router")
val passer = posSystem.actorOf(POSAgent.props(posref), "pos-agent")
val svc = SendCommandGrpc.bindService(new gRPCServices(passer), posSystem.dispatcher)
runServer(svc)
...
package com.datatech.pos.cloud
import akka.actor._
import com.datatech.sdp
import sdp.logging._
import Messages._
object POSAgent {
def props(pos: ActorRef) = Props(new WriterRouter(pos))
}
class POSAgent(posHandler: ActorRef) extends Actor with LogSupport {
var _sender: ActorRef = _
override def receive: Receive = {
case msg @ POSMessage(_,_) =>
_sender = sender()
posHandler ! msg
case resp: POSResponse => _sender ! resp
}
}
...
package com.datatech.pos.cloud
import akka.actor._
import com.datatech.sdp
import sdp.logging._
import Messages._
object WriterRouter {
def props(pos: ActorRef) = Props(new WriterRouter(pos))
}
class WriterRouter(posHandler: ActorRef) extends Actor with LogSupport {
var _sender: ActorRef = _
override def receive: Receive = {
case msg @ POSMessage(_,_) =>
_sender = sender()
posHandler ! msg
case resp: POSResponse => _sender ! resp
// log.info(s"*********response from server: $resp *********")
}
}
前端是gRPC的客戶端。我們構建一個來測試後臺控制邏輯:
package poc.client
import scala.concurrent.Future
import com.datatech.pos.messages._
import com.datatech.pos.messages.SendCommandGrpc
import io.grpc.netty.{NegotiationType, NettyChannelBuilder}
object POCClient {
def main(args: Array[String]): Unit = {
val channel = NettyChannelBuilder
.forAddress("192.168.11.189",50051)
.negotiationType(NegotiationType.PLAINTEXT)
.build()
/*
//build connection channel
val channel = io.grpc.ManagedChannelBuilder
.forAddress("192.168.11.189",50051)
.usePlaintext(true)
.build()
val pbCommand = PBPOSCommand(1022,"LogOn","888")
//async call
val asyncStub = SendCommandGrpc.blockingStub(channel)
val futResponse: Future[PBPOSResponse] = asyncStub.singleResponse(pbCommand)
import scala.concurrent.ExecutionContext.Implicits.global
futResponse.foreach(result => println(result)) */
val pbCommand = PBPOSCommand(1022,"LogOn","888")
val syncStub1: SendCommandGrpc.SendCommandBlockingClient = SendCommandGrpc.blockingStub(channel)
val response1: PBPOSResponse = syncStub1.singleResponse(pbCommand)
println(s"${response1.msg}")
val pbCommand2 = PBPOSCommand(1022,"LogOff","")
//sync call
val syncStub: SendCommandGrpc.SendCommandBlockingClient = SendCommandGrpc.blockingStub(channel)
val response: PBPOSResponse = syncStub.singleResponse(pbCommand2)
println(s"${response.msg}")
scala.io.StdIn.readLine()
channel.shutdown()
}
}
這裡有幾點必須注意:
1、protobuffer物件的強名稱必須一致。在客戶端用了同一個posmessages.proto定義檔案:
syntax = "proto3";
import "google/protobuf/wrappers.proto";
import "google/protobuf/any.proto";
import "scalapb/scalapb.proto";
option (scalapb.options) = {
// use a custom Scala package name
// package_name: "io.ontherocks.introgrpc.demo"
// don't append file name to package
flat_package: true
// generate one Scala file for all messages (services still get their own file)
single_file: true
// add imports to generated file
// useful when extending traits or using custom types
// import: "io.ontherocks.hellogrpc.RockingMessage"
// code to put at the top of generated file
// works only with `single_file: true`
//preamble: "sealed trait SomeSealedTrait"
};
package com.datatech.pos.messages;
message PBVchState { //單據狀態
string opr = 1; //收款員
int64 jseq = 2; //begin journal sequence for read-side replay
int32 num = 3; //當前單號
int32 seq = 4; //當前序號
bool void = 5; //取消模式
bool refd = 6; //退款模式
bool susp = 7; //掛單
bool canc = 8; //廢單
bool due = 9; //當前餘額
string su = 10; //主管編號
string mbr = 11; //會員號
int32 mode = 12; //當前操作流程:0=logOff, 1=LogOn, 2=Payment
}
message PBTxnItem { //交易記錄
string txndate = 1; //交易日期
string txntime = 2; //錄入時間
string opr = 3; //操作員
int32 num = 4; //銷售單號
int32 seq = 5; //交易序號
int32 txntype = 6; //交易型別
int32 salestype = 7; //銷售型別
int32 qty = 8; //交易數量
int32 price = 9; //單價(分)
int32 amount = 10; //碼洋(分)
int32 disc = 11; //折扣率 (%)
int32 dscamt = 12; //折扣額:負值 net實洋 = amount + dscamt
string member = 13; //會員卡號
string code = 14; //編號(商品、卡號...)
string acct = 15; //賬號
string dpt = 16; //部類
}
message PBPOSResponse {
int32 sts = 1;
string msg = 2;
PBVchState voucher = 3;
repeated PBTxnItem txnitems = 4;
}
message PBPOSCommand {
int64 shopid = 1;
string commandname = 2;
string delimitedparams = 3;
}
service SendCommand {
rpc SingleResponse(PBPOSCommand) returns (PBPOSResponse) {};
rpc MultiResponse(PBPOSCommand) returns (stream PBPOSResponse) {};
}
注意package com.datatech.pos.messages, 這項在服務端和客戶端必須一致。
2、另外就是客戶端的channelbuilder:在scalaPB例子裡使用的是ManagedChannelBuilder,這是一個實驗階段的東東:
//build connection channel
val channel = io.grpc.ManagedChannelBuilder
.forAddress("132.232.229.60",50051)
.usePlaintext(true)
.build()
要用gRPC中正式的channelbuilder:
val channel = NettyChannelBuilder
.forAddress("192.168.11.189",50051)
.negotiationType(NegotiationType.PLAINTEXT)
.build()
上面這個NettyChannelBuilder的設定與那個io.grpc.ManagedChannelBuilder功能相等。但NettyChannelBuilder還具備更多的設定引數,如ssl/tls設定。
3、還有:因為客戶端是按照順序來傳送操作指令的,每發一個指令,等待返回結果後才能再發下一個指令。所以必須使用同步客戶端呼叫函式blockingStub。
下面是本次示範的一些配置文件:
project/plugins.sbt
addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.9") addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.2") addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.15") addSbtPlugin("com.thesamet" % "sbt-protoc" % "0.99.21") libraryDependencies += "com.thesamet.scalapb" %% "compilerplugin" % "0.9.0-M6"
build.sbt
name := "pos-on-cloud" version := "0.1" scalaVersion := "2.12.8" scalacOptions += "-Ypartial-unification" val akkaVersion = "2.5.23" libraryDependencies := Seq( "com.typesafe.akka" %% "akka-cluster-metrics" % akkaVersion, "com.typesafe.akka" %% "akka-cluster-sharding" % akkaVersion, "com.typesafe.akka" %% "akka-persistence" % akkaVersion, "com.lightbend.akka" %% "akka-stream-alpakka-cassandra" % "1.0.1", "org.mongodb.scala" %% "mongo-scala-driver" % "2.6.0", "com.lightbend.akka" %% "akka-stream-alpakka-mongodb" % "1.0.1", "com.typesafe.akka" %% "akka-persistence-query" % akkaVersion, "com.typesafe.akka" %% "akka-persistence-cassandra" % "0.97", "com.datastax.cassandra" % "cassandra-driver-core" % "3.6.0", "com.datastax.cassandra" % "cassandra-driver-extras" % "3.6.0", "ch.qos.logback" % "logback-classic" % "1.2.3", "io.monix" %% "monix" % "3.0.0-RC2", "org.typelevel" %% "cats-core" % "2.0.0-M1", "io.grpc" % "grpc-netty" % scalapb.compiler.Version.grpcJavaVersion, "com.thesamet.scalapb" %% "scalapb-runtime" % scalapb.compiler.Version.scalapbVersion % "protobuf", "com.thesamet.scalapb" %% "scalapb-runtime-grpc" % scalapb.compiler.Version.scalapbVersion ) PB.targets in Compile := Seq( scalapb.gen() -> (sourceManaged in Compile).value ) enablePlugins(JavaAppPackaging)
resouces/application.conf
akka.actor.warn-about-java-serializer-usage = off akka.log-dead-letters-during-shutdown = off akka.log-dead-letters = off akka.remote.use-passive-connections=off akka { loglevel = INFO actor { provider = "cluster" } remote { log-remote-lifecycle-events = on netty.tcp { hostname = "127.0.0.1" # port set to 0 for netty to randomly choose from port = 0 } } cluster { seed-nodes = [ "akka.tcp://cloud-pos-server@172.27.0.8:2551" ,"akka.tcp://cloud-pos-server@172.27.0.7:2551" ] log-info = off sharding { role = "poswriter" passivate-idle-entity-after = 30 m } } persistence { journal.plugin = "cassandra-journal" snapshot-store.plugin = "cassandra-snapshot-store" } } cassandra-journal { contact-points = [ "172.27.0.8", "172.27.0.7", "172.27.0.15" ] } cassandra-snapshot-store { contact-points = [ "172.27.0.8", "172.27.0.7", "172.27.0.15" ] } # Enable metrics extension in akka-cluster-metrics. akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] akka.actor.deployment { /reader-router/readerRouter = { # Router type provided by metrics extension. router = cluster-metrics-adaptive-group # Router parameter specific for metrics extension. # metrics-selector = heap # metrics-selector = load # metrics-selector = cpu metrics-selector = mix # routees.paths = ["/user/reader"] cluster { max-nr-of-instances-per-node = 10 max-total-nr-of-instances = 1000 enabled = on #set to on when there is a instance of routee created #on the same node as the router #very important to set this off, could cause missing msg in local cluster allow-local-routees = on } } } dbwork-dispatcher { # Dispatcher is the name of the event-based dispatcher type = Dispatcher # What kind of ExecutionService to use executor = "fork-join-executor" # Configuration for the fork join pool fork-join-executor { # Min number of threads to cap factor-based parallelism number to parallelism-min = 2 # Parallelism (threads) ... ceil(available processors * factor) parallelism-factor = 2.0 # Max number of threads to cap factor-based parallelism number to parallelism-max = 10 } # Throughput defines the maximum number of messages to be # processed per actor before the thread jumps to the next actor. # Set to 1 for as fair as possible. throughput = 100 }
resources/logback.xml
<?xml version="1.0" encoding="UTF-8"?> <configuration> <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <encoder> <Pattern> %d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n </Pattern> </encoder> </appender> <logger name="com.datatech" level="info" additivity="false"> <appender-ref ref="STDOUT" /> </logger> <logger name="com.datatech.sdp" level="info" additivity="false"> <appender-ref ref="STDOUT" /> </logger> <root level="warn"> <appender-ref ref="STDOUT" /> </root> </configuration>
resources/pos.conf
pos { server { debug = false cqlport = 9042 readinterval = 1000 executimeout = 5 } }