-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtestAllCachePolicy.cpp
More file actions
324 lines (283 loc) · 11.5 KB
/
testAllCachePolicy.cpp
File metadata and controls
324 lines (283 loc) · 11.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
#include <algorithm>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <random>
#include <string>
#include <vector>
#include "XArcCache/XArcCache.h"
#include "XCachePolicy.h"
#include "XLFUCache.h"
#include "XLRUCache.h"
#include "XWTinyLFUCache.h"
class Timer {
public:
Timer() : start_(std::chrono::high_resolution_clock::now()) {}
double elapsed() {
auto now = std::chrono::high_resolution_clock::now();
return std::chrono::duration_cast<std::chrono::milliseconds>(now - start_)
.count();
}
private:
std::chrono::time_point<std::chrono::high_resolution_clock> start_;
};
// 辅助函数:打印结果
void printResults(const std::string &testName, int capacity,
const std::vector<int> &get_operations,
const std::vector<int> &hits) {
std::cout << "=== " << testName << " 结果汇总 ===" << std::endl;
std::cout << "缓存大小: " << capacity << std::endl;
// 假设对应的算法名称已在测试函数中定义
std::vector<std::string> names;
if (hits.size() == 3) {
names = {"LRU", "LFU", "ARC"};
} else if (hits.size() == 4) {
names = {"LRU", "LFU", "ARC", "LRU-K"};
} else if (hits.size() == 5) {
names = {"LRU", "LFU", "ARC", "LRU-K", "LFU-Aging"};
} else if (hits.size() == 6) {
names = {"LRU", "LFU", "ARC", "LRU-K", "LFU-Aging", "W-TinyLFU"};
}
for (size_t i = 0; i < hits.size(); ++i) {
double hitRate = 100.0 * hits[i] / get_operations[i];
std::cout << (i < names.size() ? names[i]
: "Algorithm " + std::to_string(i + 1))
<< " - 命中率: " << std::fixed << std::setprecision(2) << hitRate
<< "% ";
// 添加具体命中次数和总操作次数
std::cout << "(" << hits[i] << "/" << get_operations[i] << ")" << std::endl;
}
std::cout << std::endl; // 添加空行,使输出更清晰
}
void testHotDataAccess() {
std::cout << "\n=== 测试场景1:热点数据访问测试 ===" << std::endl;
const int CAPACITY = 20; // 缓存容量
const int OPERATIONS = 500000; // 总操作次数
const int HOT_KEYS = 20; // 热点数据数量
const int COLD_KEYS = 5000; // 冷数据数量
XCache::XLRUCache<int, std::string> lru(CAPACITY);
XCache::XLFUCache<int, std::string> lfu(CAPACITY);
XCache::XArcCache<int, std::string> arc(CAPACITY);
// 为LRU-K设置合适的参数:
// - 主缓存容量与其他算法相同
// - 历史记录容量设为可能访问的所有键数量
// - k=2表示数据被访问2次后才会进入缓存,适合区分热点和冷数据
XCache::XLRUKCache<int, std::string> lruk(CAPACITY, 2);
// 优化的LFU-Aging:更激进的衰减策略
XCache::XLFUCache<int, std::string> lfuAging(CAPACITY, 50000, 5000, 0.7);
// W-TinyLFU缓存:高效的窗口+受害者缓存策略
XCache::XWTinyLFUCache<int, std::string> wTinyLFU(CAPACITY);
std::random_device rd;
std::mt19937 gen(rd());
// 基类指针指向派生类对象,替换Adaptive为W-TinyLFU
std::array<XCache::XCachePolicy<int, std::string> *, 6> caches = {
&lru, &lfu, &arc, &lruk, &lfuAging, &wTinyLFU};
std::vector<int> hits(6, 0);
std::vector<int> get_operations(6, 0);
std::vector<std::string> names = {"LRU", "LFU", "ARC",
"LRU-K", "LFU-Aging", "W-TinyLFU"};
// 为所有的缓存对象进行相同的操作序列测试
for (int i = 0; i < caches.size(); ++i) {
// 先预热缓存,插入一些数据
for (int key = 0; key < HOT_KEYS; ++key) {
std::string value = "value" + std::to_string(key);
caches[i]->put(key, value);
}
// 交替进行put和get操作,模拟真实场景
for (int op = 0; op < OPERATIONS; ++op) {
// 大多数缓存系统中读操作比写操作频繁
// 所以设置30%概率进行写操作
bool isPut = (gen() % 100 < 30);
int key;
// 70%概率访问热点数据,30%概率访问冷数据
if (gen() % 100 < 70) {
key = gen() % HOT_KEYS; // 热点数据
} else {
key = HOT_KEYS + (gen() % COLD_KEYS); // 冷数据
}
if (isPut) {
// 执行put操作
std::string value =
"value" + std::to_string(key) + "_v" + std::to_string(op % 100);
caches[i]->put(key, value);
} else {
// 执行get操作并记录命中情况
std::string result;
get_operations[i]++;
if (caches[i]->get(key, result)) {
hits[i]++;
}
}
}
}
// 打印测试结果
printResults("热点数据访问测试", CAPACITY, get_operations, hits);
}
void testLoopPattern() {
std::cout << "\n=== 测试场景2:循环扫描测试 ===" << std::endl;
const int CAPACITY = 50; // 缓存容量
const int LOOP_SIZE = 500; // 循环范围大小
const int OPERATIONS = 200000; // 总操作次数
XCache::XLRUCache<int, std::string> lru(CAPACITY);
XCache::XLFUCache<int, std::string> lfu(CAPACITY);
XCache::XArcCache<int, std::string> arc(CAPACITY);
// 为LRU-K设置合适的参数:
// - 历史记录容量设为总循环大小的两倍,覆盖范围内和范围外的数据
// - k=2,对于循环访问,这是一个合理的阈值
XCache::XLRUKCache<int, std::string> lruk(CAPACITY, 2);
// 循环扫描场景:更频繁的衰减,适应循环模式
XCache::XLFUCache<int, std::string> lfuAging(CAPACITY, 10000, 2000, 0.6);
// W-TinyLFU缓存:高效的窗口+受害者缓存策略
XCache::XWTinyLFUCache<int, std::string> wTinyLFU(CAPACITY);
std::array<XCache::XCachePolicy<int, std::string> *, 6> caches = {
&lru, &lfu, &arc, &lruk, &lfuAging, &wTinyLFU};
std::vector<int> hits(6, 0);
std::vector<int> get_operations(6, 0);
std::vector<std::string> names = {"LRU", "LFU", "ARC",
"LRU-K", "LFU-Aging", "W-TinyLFU"};
std::random_device rd;
std::mt19937 gen(rd());
// 为每种缓存算法运行相同的测试
for (int i = 0; i < caches.size(); ++i) {
// 先预热一部分数据(只加载20%的数据)
for (int key = 0; key < LOOP_SIZE / 5; ++key) {
std::string value = "loop" + std::to_string(key);
caches[i]->put(key, value);
}
// 设置循环扫描的当前位置
int current_pos = 0;
// 交替进行读写操作,模拟真实场景
for (int op = 0; op < OPERATIONS; ++op) {
// 20%概率是写操作,80%概率是读操作
bool isPut = (gen() % 100 < 20);
int key;
// 按照不同模式选择键
if (op % 100 < 60) { // 60%顺序扫描
key = current_pos;
current_pos = (current_pos + 1) % LOOP_SIZE;
} else if (op % 100 < 90) { // 30%随机跳跃
key = gen() % LOOP_SIZE;
} else { // 10%访问范围外数据
key = LOOP_SIZE + (gen() % LOOP_SIZE);
}
if (isPut) {
// 执行put操作,更新数据
std::string value =
"loop" + std::to_string(key) + "_v" + std::to_string(op % 100);
caches[i]->put(key, value);
} else {
// 执行get操作并记录命中情况
std::string result;
get_operations[i]++;
if (caches[i]->get(key, result)) {
hits[i]++;
}
}
}
}
printResults("循环扫描测试", CAPACITY, get_operations, hits);
}
void testWorkloadShift() {
std::cout << "\n=== 测试场景3:工作负载剧烈变化测试 ===" << std::endl;
const int CAPACITY = 30; // 缓存容量
const int OPERATIONS = 80000; // 总操作次数
const int PHASE_LENGTH = OPERATIONS / 5; // 每个阶段的长度
XCache::XLRUCache<int, std::string> lru(CAPACITY);
XCache::XLFUCache<int, std::string> lfu(CAPACITY);
XCache::XArcCache<int, std::string> arc(CAPACITY);
// 工作负载变化场景:适中的历史记录和k值
XCache::XLRUKCache<int, std::string> lruk(CAPACITY, 3);
// 工作负载变化场景:非常激进的衰减策略
XCache::XLFUCache<int, std::string> lfuAging(CAPACITY, 8000, 1000, 0.5);
// W-TinyLFU缓存:高效的窗口+受害者缓存策略
XCache::XWTinyLFUCache<int, std::string> wTinyLFU(CAPACITY);
std::random_device rd;
std::mt19937 gen(rd());
std::array<XCache::XCachePolicy<int, std::string> *, 6> caches = {
&lru, &lfu, &arc, &lruk, &lfuAging, &wTinyLFU};
std::vector<int> hits(6, 0);
std::vector<int> get_operations(6, 0);
std::vector<std::string> names = {"LRU", "LFU", "ARC",
"LRU-K", "LFU-Aging", "W-TinyLFU"};
// 为每种缓存算法运行相同的测试
for (int i = 0; i < caches.size(); ++i) {
// 先预热缓存,只插入少量初始数据
for (int key = 0; key < 30; ++key) {
std::string value = "init" + std::to_string(key);
caches[i]->put(key, value);
}
// 进行多阶段测试,每个阶段有不同的访问模式
for (int op = 0; op < OPERATIONS; ++op) {
// 确定当前阶段
int phase = op / PHASE_LENGTH;
// 每个阶段的读写比例不同
int putProbability;
switch (phase) {
case 0:
putProbability = 15;
break; // 阶段1: 热点访问,15%写入更合理
case 1:
putProbability = 30;
break; // 阶段2: 大范围随机,写比例为30%
case 2:
putProbability = 10;
break; // 阶段3: 顺序扫描,10%写入保持不变
case 3:
putProbability = 25;
break; // 阶段4: 局部性随机,微调为25%
case 4:
putProbability = 20;
break; // 阶段5: 混合访问,调整为20%
default:
putProbability = 20;
}
// 确定是读还是写操作
bool isPut = (gen() % 100 < putProbability);
// 根据不同阶段选择不同的访问模式生成key - 优化后的访问范围
int key;
if (op < PHASE_LENGTH) { // 阶段1: 热点访问 - 热点数量5,使热点更集中
key = gen() % 5;
} else if (op <
PHASE_LENGTH *
2) { // 阶段2: 大范围随机 - 范围400,更适合30大小的缓存
key = gen() % 400;
} else if (op < PHASE_LENGTH * 3) { // 阶段3: 顺序扫描 - 保持100个键
key = (op - PHASE_LENGTH * 2) % 100;
} else if (op <
PHASE_LENGTH * 4) { // 阶段4: 局部性随机 - 优化局部性区域大小
// 产生5个局部区域,每个区域大小为15个键,与缓存大小20接近但略小
int locality = (op / 800) % 5; // 调整为5个局部区域
key = locality * 15 + (gen() % 15); // 每区域15个键
} else { // 阶段5: 混合访问 - 增加热点访问比例
int r = gen() % 100;
if (r < 40) { // 40%概率访问热点(从30%增加)
key = gen() % 5; // 5个热点键
} else if (r < 70) { // 30%概率访问中等范围
key = 5 + (gen() % 45); // 缩小中等范围为50个键
} else { // 30%概率访问大范围(从40%减少)
key = 50 + (gen() % 350); // 大范围也相应缩小
}
}
if (isPut) {
// 执行写操作
std::string value =
"value" + std::to_string(key) + "_p" + std::to_string(phase);
caches[i]->put(key, value);
} else {
// 执行读操作并记录命中情况
std::string result;
get_operations[i]++;
if (caches[i]->get(key, result)) {
hits[i]++;
}
}
}
}
printResults("工作负载剧烈变化测试", CAPACITY, get_operations, hits);
}
int main() {
testHotDataAccess();
testLoopPattern();
testWorkloadShift();
return 0;
}