memcached是一种常见的缓存服务,之前开了一个专门的博客专栏用来讲解博客。
在memecached中官网提供了client,C++的版本是libmemcached。这里我们想用brpc去替换到官方的libmemcached完成客户端client的访问,服务端的memcached当然还是一样的。
1、brpc优势
这里会给出相对于libmemcached,利用brpc写的client有以下的优势:
- 线程安全。用户不需要为每个线程建立独立的client。
- 支持同步、异步、半同步等访问方式,能使用ParallelChannel等组合访问方式。
- 支持多种连接方式。支持超时、backup request、取消、tracing、内置服务等一系列brpc提供的福利。
- 有明确的request和response。而libmemcached是没有的,收到的消息不能直接和发出的消息对应上,用户得做额外开发,而且并没有那么容易做对。
2、访问单台的memcached
创建访问memcached的channel
#include <brpc/memcache.h>
#include <brpc/channel.h>
//创建channel,注意在协议出配置为PROTOCOL_MEMCACHE,访问memcached的形式
brpc::Channel channel;
brpc::ChannelOptions options;
options.protocol = brpc::PROTOCOL_MEMCACHE;
if (channel.Init("0.0.0.0:11211", &options) != 0) { // 11211是memcached的默认端口
LOG(FATAL) << "Fail to init channel to memcached";
return -1;
}
...
往memcached中设置一份数据
// 写入key="hello" value="world" flags=0xdeadbeef,10秒失效,无视cas。
brpc::MemcacheRequest request;
brpc::MemcacheResponse response;
brpc::Controller cntl;
if (!request.Set("hello", "world", 0xdeadbeef/*flags*/, 10/*expiring seconds*/, 0/*ignore cas*/)) {
LOG(FATAL) << "Fail to SET request";
return -1;
}
channel.CallMethod(NULL, &cntl, &request, &response, NULL/*done*/);
if (cntl.Failed()) {
LOG(FATAL) << "Fail to access memcached, " << cntl.ErrorText();
return -1;
}
if (!response.PopSet(NULL)) {
LOG(FATAL) << "Fail to SET memcached, " << response.LastError();
return -1;
}
...
上述代码的说明:
-
请求类型必须为MemcacheRequest,回复类型必须为MemcacheResponse,否则CallMethod会失败。不需要stub,直接调用channel.CallMethod,method填NULL。
-
调用request.XXX()增加操作,本例XXX=Set,一个request多次调用不同的操作,这些操作会被同时送到memcached(常被称为pipeline模式)。
-
依次调用response.PopXXX()弹出操作结果,本例XXX=Set,成功返回true,失败返回false,调用response.LastError()可获得错误信息。XXX必须和request的依次对应,否则失败。本例中若用PopGet就会失败,错误信息为“not a GET response"。
-
Pop结果独立于RPC结果。即使“不能把某个值设入memcached”,RPC可能还是成功的。RPC失败指连接断开,超时之类的。如果业务上认为要成功操作才算成功,那么你不仅要判RPC成功,还要判PopXXX是成功的。
3、rpc支持memcached对应的api
目前支持的请求操作有:
bool Set(const Slice& key, const Slice& value, uint32_t flags, uint32_t exptime, uint64_t cas_value);
bool Add(const Slice& key, const Slice& value, uint32_t flags, uint32_t exptime, uint64_t cas_value);
bool Replace(const Slice& key, const Slice& value, uint32_t flags, uint32_t exptime, uint64_t cas_value);
bool Append(const Slice& key, const Slice& value, uint32_t flags, uint32_t exptime, uint64_t cas_value);
bool Prepend(const Slice& key, const Slice& value, uint32_t flags, uint32_t exptime, uint64_t cas_value);
bool Delete(const Slice& key);
bool Flush(uint32_t timeout);
bool Increment(const Slice& key, uint64_t delta, uint64_t initial_value, uint32_t exptime);
bool Decrement(const Slice& key, uint64_t delta, uint64_t initial_value, uint32_t exptime);
bool Touch(const Slice& key, uint32_t exptime);
bool Version();
对应的回复操作:
// Call LastError() of the response to check the error text when any following operation fails.
bool PopGet(IOBuf* value, uint32_t* flags, uint64_t* cas_value);
bool PopGet(std::string* value, uint32_t* flags, uint64_t* cas_value);
bool PopSet(uint64_t* cas_value);
bool PopAdd(uint64_t* cas_value);
bool PopReplace(uint64_t* cas_value);
bool PopAppend(uint64_t* cas_value);
bool PopPrepend(uint64_t* cas_value);
bool PopDelete();
bool PopFlush();
bool PopIncrement(uint64_t* new_value, uint64_t* cas_value);
bool PopDecrement(uint64_t* new_value, uint64_t* cas_value);
bool PopTouch();
bool PopVersion(std::string* version);
4.实例
这里给出brpc开源的一个简单访问memcached的实例,是一个对memcached操作最简单的set get操作,具体代码如下:
#include <gflags/gflags.h>
#include <bthread/bthread.h>
#include <butil/logging.h>
#include <butil/string_printf.h>
#include <brpc/channel.h>
#include <brpc/memcache.h>
#include <brpc/policy/couchbase_authenticator.h>
DEFINE_int32(thread_num, 10, "Number of threads to send requests");
DEFINE_bool(use_bthread, false, "Use bthread to send requests");
DEFINE_bool(use_couchbase, false, "Use couchbase.");
DEFINE_string(connection_type, "", "Connection type. Available values: single, pooled, short");
DEFINE_string(server, "0.0.0.0:11211", "IP Address of server");
DEFINE_string(bucket_name, "", "Couchbase bucktet name");
DEFINE_string(bucket_password, "", "Couchbase bucket password");
DEFINE_string(load_balancer, "", "The algorithm for load balancing");
DEFINE_int32(timeout_ms, 100, "RPC timeout in milliseconds");
DEFINE_int32(max_retry, 3, "Max retries(not including the first RPC)");
DEFINE_bool(dont_fail, false, "Print fatal when some call failed");
DEFINE_int32(exptime, 0, "The to-be-got data will be expired after so many seconds");
DEFINE_string(key, "hello", "The key to be get");
DEFINE_string(value, "world", "The value associated with the key");
DEFINE_int32(batch, 1, "Pipelined Operations");
bvar::LatencyRecorder g_latency_recorder("client");
bvar::Adder<int> g_error_count("client_error_count");
butil::static_atomic<int> g_sender_count = BUTIL_STATIC_ATOMIC_INIT(0);
static void* sender(void* arg) {
google::protobuf::RpcChannel* channel =
static_cast<google::protobuf::RpcChannel*>(arg);
const int base_index = g_sender_count.fetch_add(1, butil::memory_order_relaxed);
std::string value;
std::vector<std::pair<std::string, std::string> > kvs;
kvs.resize(FLAGS_batch);
//获取key和value值
for (int i = 0; i < FLAGS_batch; ++i) {
kvs[i].first = butil::string_printf("%s%d", FLAGS_key.c_str(), base_index + i);
kvs[i].second = butil::string_printf("%s%d", FLAGS_value.c_str(), base_index + i);
}
brpc::MemcacheRequest request;
for (int i = 0; i < FLAGS_batch; ++i) {
CHECK(request.Get(kvs[i].first));
}
while (!brpc::IsAskedToQuit()) {
// We will receive response synchronously, safe to put variables
// on stack.
brpc::MemcacheResponse response;
brpc::Controller cntl;
// Because `done'(last parameter) is NULL, this function waits until
// the response comes back or error occurs(including timedout).
channel->CallMethod(NULL, &cntl, &request, &response, NULL);
const int64_t elp = cntl.latency_us();
//读取对应的延时
if (!cntl.Failed()) {
g_latency_recorder << cntl.latency_us();
for (int i = 0; i < FLAGS_batch; ++i) {
uint32_t flags;
if (!response.PopGet(&value, &flags, NULL)) {
LOG(INFO) << "Fail to GET the key, " << response.LastError();
brpc::AskToQuit();
return NULL;
}
CHECK(flags == 0xdeadbeef + base_index + i)
<< "flags=" << flags;
CHECK(kvs[i].second == value)
<< "base=" << base_index << " i=" << i << " value=" << value;
}
} else {
g_error_count << 1;
CHECK(brpc::IsAskedToQuit() || !FLAGS_dont_fail)
<< "error=" << cntl.ErrorText() << " latency=" << elp;
// We can't connect to the server, sleep a while. Notice that this
// is a specific sleeping to prevent this thread from spinning too
// fast. You should continue the business logic in a production
// server rather than sleeping.
bthread_usleep(50000);
}
}
return NULL;
}
int main(int argc, char* argv[]) {
// Parse gflags. We recommend you to use gflags as well.
GFLAGS_NS::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_exptime < 0) {
FLAGS_exptime = 0;
}
// A Channel represents a communication line to a Server. Notice that
// Channel is thread-safe and can be shared by all threads in your program.
brpc::Channel channel;
//创建一个访问memcached的channel
// Initialize the channel, NULL means using default options.
brpc::ChannelOptions options;
options.protocol = brpc::PROTOCOL_MEMCACHE;
options.connection_type = FLAGS_connection_type;
options.timeout_ms = FLAGS_timeout_ms/*milliseconds*/;
options.max_retry = FLAGS_max_retry;
if (FLAGS_use_couchbase && !FLAGS_bucket_name.empty()) {
brpc::policy::CouchbaseAuthenticator* auth =
new brpc::policy::CouchbaseAuthenticator(FLAGS_bucket_name,
FLAGS_bucket_password);
options.auth = auth;
}
if (channel.Init(FLAGS_server.c_str(), FLAGS_load_balancer.c_str(), &options) != 0) {
LOG(ERROR) << "Fail to initialize channel";
return -1;
}
// 想memcached里面写入对应的key,value值
// Pipeline #batch * #thread_num SET requests into memcache so that we
// have keys to get.
brpc::MemcacheRequest request;
brpc::MemcacheResponse response;
brpc::Controller cntl;
//注意这里也是多个线程对memcached进行访问kv
for (int i = 0; i < FLAGS_batch * FLAGS_thread_num; ++i) {
if (!request.Set(butil::string_printf("%s%d", FLAGS_key.c_str(), i),
butil::string_printf("%s%d", FLAGS_value.c_str(), i),
0xdeadbeef + i, FLAGS_exptime, 0)) {
LOG(ERROR) << "Fail to SET " << i << "th request";
return -1;
}
}
//调用call method去主动连接指定ip地址的memcached
channel.CallMethod(NULL, &cntl, &request, &response, NULL);
if (cntl.Failed()) {
LOG(ERROR) << "Fail to access memcache, " << cntl.ErrorText();
return -1;
}
//执行memcached中的pop操作,用来弹出访问memcached的访问操作结果。pop成功表示memcached读写成功,要不然可能rpc成功,但是业务访问不成功。
for (int i = 0; i < FLAGS_batch * FLAGS_thread_num; ++i) {
if (!response.PopSet(NULL)) {
LOG(ERROR) << "Fail to SET memcache, i=" << i
<< ", " << response.LastError();
return -1;
}
}
if (FLAGS_exptime > 0) {
LOG(INFO) << "Set " << FLAGS_batch * FLAGS_thread_num
<< " values, expired after " << FLAGS_exptime << " seconds";
} else {
LOG(INFO) << "Set " << FLAGS_batch * FLAGS_thread_num
<< " values, never expired";
}
//通过多线程去访问,指定每个线程回调函数都是sender
std::vector<bthread_t> bids;
std::vector<pthread_t> pids;
if (!FLAGS_use_bthread) {
pids.resize(FLAGS_thread_num);
for (int i = 0; i < FLAGS_thread_num; ++i) {
if (pthread_create(&pids[i], NULL, sender, &channel) != 0) {
LOG(ERROR) << "Fail to create pthread";
return -1;
}
}
} else {
bids.resize(FLAGS_thread_num);
for (int i = 0; i < FLAGS_thread_num; ++i) {
if (bthread_start_background(
&bids[i], NULL, sender, &channel) != 0) {
LOG(ERROR) << "Fail to create bthread";
return -1;
}
}
}
while (!brpc::IsAskedToQuit()) {
sleep(1);
LOG(INFO) << "Accessing memcache server at qps=" << g_latency_recorder.qps(1)
<< " latency=" << g_latency_recorder.latency(1);
}
LOG(INFO) << "memcache_client is going to quit";
for (int i = 0; i < FLAGS_thread_num; ++i) {
if (!FLAGS_use_bthread) {
pthread_join(pids[i], NULL);
} else {
bthread_join(bids[i], NULL);
}
}
if (options.auth) {
delete options.auth;
}
return 0;
}
参考
https://github.com/brpc/brpc/blob/master/docs/cn/memcache_client.md