mirror of
https://github.com/matrix-org/dendrite
synced 2024-12-13 14:52:47 +00:00
Fix transaction IDs in transaction cache have global scope (#772)
This commit is contained in:
parent
83f8e05032
commit
66bf615360
4 changed files with 48 additions and 13 deletions
|
@ -50,7 +50,7 @@ func SendEvent(
|
||||||
) util.JSONResponse {
|
) util.JSONResponse {
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
// Try to fetch response from transactionsCache
|
// Try to fetch response from transactionsCache
|
||||||
if res, ok := txnCache.FetchTransaction(*txnID); ok {
|
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
||||||
return *res
|
return *res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ func SendEvent(
|
||||||
}
|
}
|
||||||
// Add response to transactionsCache
|
// Add response to transactionsCache
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
txnCache.AddTransaction(*txnID, &res)
|
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
|
@ -22,7 +22,14 @@ import (
|
||||||
// DefaultCleanupPeriod represents the default time duration after which cacheCleanService runs.
|
// DefaultCleanupPeriod represents the default time duration after which cacheCleanService runs.
|
||||||
const DefaultCleanupPeriod time.Duration = 30 * time.Minute
|
const DefaultCleanupPeriod time.Duration = 30 * time.Minute
|
||||||
|
|
||||||
type txnsMap map[string]*util.JSONResponse
|
type txnsMap map[CacheKey]*util.JSONResponse
|
||||||
|
|
||||||
|
// CacheKey is the type for the key in a transactions cache.
|
||||||
|
// This is needed because the spec requires transaction IDs to have a per-access token scope.
|
||||||
|
type CacheKey struct {
|
||||||
|
AccessToken string
|
||||||
|
TxnID string
|
||||||
|
}
|
||||||
|
|
||||||
// Cache represents a temporary store for response entries.
|
// Cache represents a temporary store for response entries.
|
||||||
// Entries are evicted after a certain period, defined by cleanupPeriod.
|
// Entries are evicted after a certain period, defined by cleanupPeriod.
|
||||||
|
@ -50,14 +57,14 @@ func NewWithCleanupPeriod(cleanupPeriod time.Duration) *Cache {
|
||||||
return &t
|
return &t
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchTransaction looks up an entry for txnID in Cache.
|
// FetchTransaction looks up an entry for the (accessToken, txnID) tuple in Cache.
|
||||||
// Looks in both the txnMaps.
|
// Looks in both the txnMaps.
|
||||||
// Returns (JSON response, true) if txnID is found, else the returned bool is false.
|
// Returns (JSON response, true) if txnID is found, else the returned bool is false.
|
||||||
func (t *Cache) FetchTransaction(txnID string) (*util.JSONResponse, bool) {
|
func (t *Cache) FetchTransaction(accessToken, txnID string) (*util.JSONResponse, bool) {
|
||||||
t.RLock()
|
t.RLock()
|
||||||
defer t.RUnlock()
|
defer t.RUnlock()
|
||||||
for _, txns := range t.txnsMaps {
|
for _, txns := range t.txnsMaps {
|
||||||
res, ok := txns[txnID]
|
res, ok := txns[CacheKey{accessToken, txnID}]
|
||||||
if ok {
|
if ok {
|
||||||
return res, true
|
return res, true
|
||||||
}
|
}
|
||||||
|
@ -65,13 +72,13 @@ func (t *Cache) FetchTransaction(txnID string) (*util.JSONResponse, bool) {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddTransaction adds an entry for txnID in Cache for later access.
|
// AddTransaction adds an entry for the (accessToken, txnID) tuple in Cache.
|
||||||
// Adds to the front txnMap.
|
// Adds to the front txnMap.
|
||||||
func (t *Cache) AddTransaction(txnID string, res *util.JSONResponse) {
|
func (t *Cache) AddTransaction(accessToken, txnID string, res *util.JSONResponse) {
|
||||||
t.Lock()
|
t.Lock()
|
||||||
defer t.Unlock()
|
defer t.Unlock()
|
||||||
|
|
||||||
t.txnsMaps[0][txnID] = res
|
t.txnsMaps[0][CacheKey{accessToken, txnID}] = res
|
||||||
}
|
}
|
||||||
|
|
||||||
// cacheCleanService is responsible for cleaning up entries after cleanupPeriod.
|
// cacheCleanService is responsible for cleaning up entries after cleanupPeriod.
|
||||||
|
|
|
@ -24,27 +24,54 @@ type fakeType struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
fakeTxnID = "aRandomTxnID"
|
fakeAccessToken = "aRandomAccessToken"
|
||||||
fakeResponse = &util.JSONResponse{Code: http.StatusOK, JSON: fakeType{ID: "0"}}
|
fakeAccessToken2 = "anotherRandomAccessToken"
|
||||||
|
fakeTxnID = "aRandomTxnID"
|
||||||
|
fakeResponse = &util.JSONResponse{
|
||||||
|
Code: http.StatusOK, JSON: fakeType{ID: "0"},
|
||||||
|
}
|
||||||
|
fakeResponse2 = &util.JSONResponse{
|
||||||
|
Code: http.StatusOK, JSON: fakeType{ID: "1"},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestCache creates a New Cache and tests AddTransaction & FetchTransaction
|
// TestCache creates a New Cache and tests AddTransaction & FetchTransaction
|
||||||
func TestCache(t *testing.T) {
|
func TestCache(t *testing.T) {
|
||||||
fakeTxnCache := New()
|
fakeTxnCache := New()
|
||||||
fakeTxnCache.AddTransaction(fakeTxnID, fakeResponse)
|
fakeTxnCache.AddTransaction(fakeAccessToken, fakeTxnID, fakeResponse)
|
||||||
|
|
||||||
// Add entries for noise.
|
// Add entries for noise.
|
||||||
for i := 1; i <= 100; i++ {
|
for i := 1; i <= 100; i++ {
|
||||||
fakeTxnCache.AddTransaction(
|
fakeTxnCache.AddTransaction(
|
||||||
|
fakeAccessToken,
|
||||||
fakeTxnID+string(i),
|
fakeTxnID+string(i),
|
||||||
&util.JSONResponse{Code: http.StatusOK, JSON: fakeType{ID: string(i)}},
|
&util.JSONResponse{Code: http.StatusOK, JSON: fakeType{ID: string(i)}},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
testResponse, ok := fakeTxnCache.FetchTransaction(fakeTxnID)
|
testResponse, ok := fakeTxnCache.FetchTransaction(fakeAccessToken, fakeTxnID)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Error("Failed to retrieve entry for txnID: ", fakeTxnID)
|
t.Error("Failed to retrieve entry for txnID: ", fakeTxnID)
|
||||||
} else if testResponse.JSON != fakeResponse.JSON {
|
} else if testResponse.JSON != fakeResponse.JSON {
|
||||||
t.Error("Fetched response incorrect. Expected: ", fakeResponse.JSON, " got: ", testResponse.JSON)
|
t.Error("Fetched response incorrect. Expected: ", fakeResponse.JSON, " got: ", testResponse.JSON)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestCacheScope ensures transactions with the same transaction ID are not shared
|
||||||
|
// across multiple access tokens.
|
||||||
|
func TestCacheScope(t *testing.T) {
|
||||||
|
cache := New()
|
||||||
|
cache.AddTransaction(fakeAccessToken, fakeTxnID, fakeResponse)
|
||||||
|
cache.AddTransaction(fakeAccessToken2, fakeTxnID, fakeResponse2)
|
||||||
|
|
||||||
|
if res, ok := cache.FetchTransaction(fakeAccessToken, fakeTxnID); !ok {
|
||||||
|
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
||||||
|
} else if res.JSON != fakeResponse.JSON {
|
||||||
|
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse.JSON, res.JSON)
|
||||||
|
}
|
||||||
|
if res, ok := cache.FetchTransaction(fakeAccessToken2, fakeTxnID); !ok {
|
||||||
|
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
||||||
|
} else if res.JSON != fakeResponse2.JSON {
|
||||||
|
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse2.JSON, res.JSON)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
1
testfile
1
testfile
|
@ -159,6 +159,7 @@ Inbound federation rejects remote attempts to kick local users to rooms
|
||||||
An event which redacts itself should be ignored
|
An event which redacts itself should be ignored
|
||||||
A pair of events which redact each other should be ignored
|
A pair of events which redact each other should be ignored
|
||||||
Full state sync includes joined rooms
|
Full state sync includes joined rooms
|
||||||
|
A message sent after an initial sync appears in the timeline of an incremental sync.
|
||||||
Can add tag
|
Can add tag
|
||||||
Can remove tag
|
Can remove tag
|
||||||
Can list tags for a room
|
Can list tags for a room
|
||||||
|
|
Loading…
Reference in a new issue