Pau Cabre (Gerrit)
2018-11-10 22:37:17 UTC
Pau Cabre has uploaded this change for review. (
https://gem5-review.googlesource.com/c/public/gem5/+/14215
Change subject: cpu: Fixed useful counter handling in LTAGE
......................................................................
cpu: Fixed useful counter handling in LTAGE
Increased to 2 bits of useful counter per TAGE entry as described in the
LTAGE paper (and made the size configurable)
Changed how the useful counters are incremented/decremented as described
ini the LTAGE paper
Change-Id: I8c692cc7c180d29897cb77781681ff498a1d16c8
Signed-off-by: Pau Cabre <***@metempsy.com>
---
M src/cpu/pred/BranchPredictor.py
M src/cpu/pred/ltage.cc
M src/cpu/pred/ltage.hh
3 files changed, 36 insertions(+), 9 deletions(-)
diff --git a/src/cpu/pred/BranchPredictor.py
b/src/cpu/pred/BranchPredictor.py
index 1b400c2..3a6d5f1 100644
--- a/src/cpu/pred/BranchPredictor.py
+++ b/src/cpu/pred/BranchPredictor.py
@@ -96,6 +96,7 @@
logSizeLoopPred = Param.Unsigned(8, "Log size of the loop predictor")
nHistoryTables = Param.Unsigned(12, "Number of history tables")
tagTableCounterBits = Param.Unsigned(3, "Number of tag table counter
bits")
+ tagTableUBits = Param.Unsigned(2, "Number of tag table u bits")
histBufferSize = Param.Unsigned(2097152,
"A large number to track all branch histories(2MEntries
default)")
minHist = Param.Unsigned(4, "Minimum history size of LTAGE")
diff --git a/src/cpu/pred/ltage.cc b/src/cpu/pred/ltage.cc
index 251fb2e..ad34542 100644
--- a/src/cpu/pred/ltage.cc
+++ b/src/cpu/pred/ltage.cc
@@ -54,12 +54,18 @@
logSizeLoopPred(params->logSizeLoopPred),
nHistoryTables(params->nHistoryTables),
tagTableCounterBits(params->tagTableCounterBits),
+ tagTableUBits(params->tagTableUBits),
histBufferSize(params->histBufferSize),
minHist(params->minHist),
maxHist(params->maxHist),
minTagWidth(params->minTagWidth),
threadHistory(params->numThreads)
{
+ // Current method for periodically resetting the u counter bits only
+ // works for 1 or 2 bits
+ // Also make sure that it is not 0
+ assert(tagTableUBits <= 2 && (tagTableUBits > 0));
+
assert(params->histBufferSize > params->maxHist * 2);
useAltPredForNewlyAllocated = 0;
logTick = 19;
@@ -204,6 +210,20 @@
}
}
+// Up-down unsigned saturating counter
+void
+LTAGE::unsignedCtrUpdate(uint8_t & ctr, bool up, unsigned nbits)
+{
+ assert(nbits <= sizeof(uint8_t) << 3);
+ if (up) {
+ if (ctr < ((1 << nbits) - 1))
+ ctr++;
+ } else {
+ if (ctr)
+ ctr--;
+ }
+}
+
// Bimodal prediction
bool
LTAGE::getBimodePred(Addr pc, BranchInfo* bi) const
@@ -540,7 +560,7 @@
if (alloc) {
// is there some "unuseful" entry to allocate
- int8_t min = 1;
+ uint8_t min = 1;
for (int i = nHistoryTables; i > bi->hitBank; i--) {
if (gtable[i][bi->tableIndices[i]].u < min) {
min = gtable[i][bi->tableIndices[i]].u;
@@ -569,7 +589,6 @@
if ((gtable[i][bi->tableIndices[i]].u == 0)) {
gtable[i][bi->tableIndices[i]].tag = bi->tableTags[i];
gtable[i][bi->tableIndices[i]].ctr = (taken) ? 0 : -1;
- gtable[i][bi->tableIndices[i]].u = 0; //?
}
}
}
@@ -606,12 +625,9 @@
}
// update the u counter
- if (longest_match_pred != bi->altTaken) {
- if (longest_match_pred == taken) {
- if (gtable[bi->hitBank][bi->hitBankIndex].u < 1) {
- gtable[bi->hitBank][bi->hitBankIndex].u++;
- }
- }
+ if (bi->tagePred != bi->altTaken) {
+ unsignedCtrUpdate(gtable[bi->hitBank][bi->hitBankIndex].u,
+ bi->tagePred == taken, tagTableUBits);
}
} else {
baseUpdate(pc, taken, bi);
diff --git a/src/cpu/pred/ltage.hh b/src/cpu/pred/ltage.hh
index 60c3467..abe38fd 100644
--- a/src/cpu/pred/ltage.hh
+++ b/src/cpu/pred/ltage.hh
@@ -103,7 +103,7 @@
{
int8_t ctr;
uint16_t tag;
- int8_t u;
+ uint8_t u;
TageEntry() : ctr(0), tag(0), u(0) { }
};
@@ -247,6 +247,15 @@
void ctrUpdate(int8_t & ctr, bool taken, int nbits);
/**
+ * Updates an unsigned counter based on up/down parameter
+ * @param ctr Reference to counter to update.
+ * @param up Boolean indicating if the counter is
incremented/decremented
+ * If true it is incremented, if false it is decremented
+ * @param nbits Counter width.
+ */
+ void unsignedCtrUpdate(uint8_t & ctr, bool taken, unsigned nbits);
+
+ /**
* Get a branch prediction from the bimodal
* predictor.
* @param pc The unshifted branch PC.
@@ -359,6 +368,7 @@
const unsigned logSizeLoopPred;
const unsigned nHistoryTables;
const unsigned tagTableCounterBits;
+ const unsigned tagTableUBits;
const unsigned histBufferSize;
const unsigned minHist;
const unsigned maxHist;
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/14215
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: master
Gerrit-Change-Id: I8c692cc7c180d29897cb77781681ff498a1d16c8
Gerrit-Change-Number: 14215
Gerrit-PatchSet: 1
Gerrit-Owner: Pau Cabre <***@metempsy.com>
Gerrit-MessageType: newchange
https://gem5-review.googlesource.com/c/public/gem5/+/14215
Change subject: cpu: Fixed useful counter handling in LTAGE
......................................................................
cpu: Fixed useful counter handling in LTAGE
Increased to 2 bits of useful counter per TAGE entry as described in the
LTAGE paper (and made the size configurable)
Changed how the useful counters are incremented/decremented as described
ini the LTAGE paper
Change-Id: I8c692cc7c180d29897cb77781681ff498a1d16c8
Signed-off-by: Pau Cabre <***@metempsy.com>
---
M src/cpu/pred/BranchPredictor.py
M src/cpu/pred/ltage.cc
M src/cpu/pred/ltage.hh
3 files changed, 36 insertions(+), 9 deletions(-)
diff --git a/src/cpu/pred/BranchPredictor.py
b/src/cpu/pred/BranchPredictor.py
index 1b400c2..3a6d5f1 100644
--- a/src/cpu/pred/BranchPredictor.py
+++ b/src/cpu/pred/BranchPredictor.py
@@ -96,6 +96,7 @@
logSizeLoopPred = Param.Unsigned(8, "Log size of the loop predictor")
nHistoryTables = Param.Unsigned(12, "Number of history tables")
tagTableCounterBits = Param.Unsigned(3, "Number of tag table counter
bits")
+ tagTableUBits = Param.Unsigned(2, "Number of tag table u bits")
histBufferSize = Param.Unsigned(2097152,
"A large number to track all branch histories(2MEntries
default)")
minHist = Param.Unsigned(4, "Minimum history size of LTAGE")
diff --git a/src/cpu/pred/ltage.cc b/src/cpu/pred/ltage.cc
index 251fb2e..ad34542 100644
--- a/src/cpu/pred/ltage.cc
+++ b/src/cpu/pred/ltage.cc
@@ -54,12 +54,18 @@
logSizeLoopPred(params->logSizeLoopPred),
nHistoryTables(params->nHistoryTables),
tagTableCounterBits(params->tagTableCounterBits),
+ tagTableUBits(params->tagTableUBits),
histBufferSize(params->histBufferSize),
minHist(params->minHist),
maxHist(params->maxHist),
minTagWidth(params->minTagWidth),
threadHistory(params->numThreads)
{
+ // Current method for periodically resetting the u counter bits only
+ // works for 1 or 2 bits
+ // Also make sure that it is not 0
+ assert(tagTableUBits <= 2 && (tagTableUBits > 0));
+
assert(params->histBufferSize > params->maxHist * 2);
useAltPredForNewlyAllocated = 0;
logTick = 19;
@@ -204,6 +210,20 @@
}
}
+// Up-down unsigned saturating counter
+void
+LTAGE::unsignedCtrUpdate(uint8_t & ctr, bool up, unsigned nbits)
+{
+ assert(nbits <= sizeof(uint8_t) << 3);
+ if (up) {
+ if (ctr < ((1 << nbits) - 1))
+ ctr++;
+ } else {
+ if (ctr)
+ ctr--;
+ }
+}
+
// Bimodal prediction
bool
LTAGE::getBimodePred(Addr pc, BranchInfo* bi) const
@@ -540,7 +560,7 @@
if (alloc) {
// is there some "unuseful" entry to allocate
- int8_t min = 1;
+ uint8_t min = 1;
for (int i = nHistoryTables; i > bi->hitBank; i--) {
if (gtable[i][bi->tableIndices[i]].u < min) {
min = gtable[i][bi->tableIndices[i]].u;
@@ -569,7 +589,6 @@
if ((gtable[i][bi->tableIndices[i]].u == 0)) {
gtable[i][bi->tableIndices[i]].tag = bi->tableTags[i];
gtable[i][bi->tableIndices[i]].ctr = (taken) ? 0 : -1;
- gtable[i][bi->tableIndices[i]].u = 0; //?
}
}
}
@@ -606,12 +625,9 @@
}
// update the u counter
- if (longest_match_pred != bi->altTaken) {
- if (longest_match_pred == taken) {
- if (gtable[bi->hitBank][bi->hitBankIndex].u < 1) {
- gtable[bi->hitBank][bi->hitBankIndex].u++;
- }
- }
+ if (bi->tagePred != bi->altTaken) {
+ unsignedCtrUpdate(gtable[bi->hitBank][bi->hitBankIndex].u,
+ bi->tagePred == taken, tagTableUBits);
}
} else {
baseUpdate(pc, taken, bi);
diff --git a/src/cpu/pred/ltage.hh b/src/cpu/pred/ltage.hh
index 60c3467..abe38fd 100644
--- a/src/cpu/pred/ltage.hh
+++ b/src/cpu/pred/ltage.hh
@@ -103,7 +103,7 @@
{
int8_t ctr;
uint16_t tag;
- int8_t u;
+ uint8_t u;
TageEntry() : ctr(0), tag(0), u(0) { }
};
@@ -247,6 +247,15 @@
void ctrUpdate(int8_t & ctr, bool taken, int nbits);
/**
+ * Updates an unsigned counter based on up/down parameter
+ * @param ctr Reference to counter to update.
+ * @param up Boolean indicating if the counter is
incremented/decremented
+ * If true it is incremented, if false it is decremented
+ * @param nbits Counter width.
+ */
+ void unsignedCtrUpdate(uint8_t & ctr, bool taken, unsigned nbits);
+
+ /**
* Get a branch prediction from the bimodal
* predictor.
* @param pc The unshifted branch PC.
@@ -359,6 +368,7 @@
const unsigned logSizeLoopPred;
const unsigned nHistoryTables;
const unsigned tagTableCounterBits;
+ const unsigned tagTableUBits;
const unsigned histBufferSize;
const unsigned minHist;
const unsigned maxHist;
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/14215
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: master
Gerrit-Change-Id: I8c692cc7c180d29897cb77781681ff498a1d16c8
Gerrit-Change-Number: 14215
Gerrit-PatchSet: 1
Gerrit-Owner: Pau Cabre <***@metempsy.com>
Gerrit-MessageType: newchange