Atlas Runtime
log_mgr.hpp
Go to the documentation of this file.
1 /*
2  * (c) Copyright 2016 Hewlett Packard Enterprise Development LP
3  *
4  * This program is free software: you can redistribute it and/or modify
5  * it under the terms of the GNU Lesser General Public License as
6  * published by the Free Software Foundation, either version 3 of the
7  * License, or (at your option) any later version. This program is
8  * distributed in the hope that it will be useful, but WITHOUT ANY
9  * WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11  * for more details. You should have received a copy of the GNU Lesser
12  * General Public License along with this program. If not, see
13  * <http://www.gnu.org/licenses/>.
14  */
15 
16 
17 #ifndef LOG_MGR_HPP
18 #define LOG_MGR_HPP
19 
20 #include <cassert>
21 #include <atomic>
22 #include <vector>
23 
24 #include <stdint.h>
25 #include <pthread.h>
26 
27 #include "atlas_api.h"
28 
29 #include "pregion_configs.hpp"
30 #include "pregion_mgr.hpp"
31 #include "log_configs.hpp"
32 #include "log_structure.hpp"
33 #include "happens_before.hpp"
34 #include "cache_flush_configs.hpp"
35 #include "circular_buffer.hpp"
36 #include "log_elision.hpp"
37 #include "stats.hpp"
38 
39 #include "util.hpp"
40 
41 namespace Atlas {
42 
43 void * helper(void*);
44 
45 typedef std::vector<LogEntry*> LogEntryVec;
46 
47 class LogMgr {
48  static LogMgr *Instance_;
49 public:
50 
51  // serial mode only
52  static LogMgr& createInstance() {
53  assert(!Instance_);
54  Instance_ = new LogMgr();
55  Instance_->init();
56  return *Instance_;
57  }
58 
59  // serial mode only
61  assert(!Instance_);
62  Instance_ = new LogMgr();
63  return *Instance_;
64  }
65 
66  // serial mode only
67  static void deleteInstance() {
68  if (Instance_) {
69  Instance_->finalize();
70  delete Instance_;
71  Instance_ = nullptr;
72  }
73  }
74 
75  static LogMgr& getInstance() {
76  assert(Instance_);
77  return *Instance_;
78  }
79 
80  static bool hasInstance() {
81  if (!Instance_) return false;
82  return Instance_->IsInitialized_;
83  }
84 
85  void setRegionId(region_id_t id) { RegionId_ = id; }
86  region_id_t getRegionId() const { return RegionId_; }
87 
89  { int status = pthread_mutex_lock(&HelperLock_); assert(!status); }
91  { int status = pthread_mutex_unlock(&HelperLock_); assert(!status); }
92  void waitLogReady() {
93  int status = pthread_cond_wait(&HelperCondition_, &HelperLock_);
94  assert(!status);
95  }
97  { int status = pthread_cond_signal(&HelperCondition_); assert(!status); }
98 
100  LogStructure *desired,
101  std::memory_order success,
102  std::memory_order failure) {
103  return LogStructureHeaderPtr_->compare_exchange_weak(
104  expected, desired, success, failure);
105  }
107  LogStructure *desired,
108  std::memory_order success,
109  std::memory_order failure) {
110  return RecoveryTimeLsp_.compare_exchange_weak(
111  expected, desired, success, failure);
112  }
113 
114  // Log creation
115  void logNonTemporal(
116  LogEntry *le, void *addr, size_t sz, LogType le_type);
117  void logAcquire(void*);
118  void logRelease(void*);
119  void logRdLock(void*);
120  void logWrLock(void*);
121  void logRWUnlock(void*);
122  void logBeginDurable();
123  void logEndDurable();
124  void logStore(void *addr, size_t sz);
125  void logMemset(void *addr, size_t sz);
126  void logMemcpy(void *dst, size_t sz);
127  void logMemmove(void *dst, size_t sz);
128  void logStrcpy(void *dst, size_t sz);
129  void logStrcat(void *dst, size_t sz);
130  void logAlloc(void *addr);
131  void logFree(void *addr);
132 
134 
135  // Cache flush handling
136  // TODO separate flush object
137  void psync(void *start_addr, size_t sz);
138  void psyncWithAcquireBarrier(void *start_addr, size_t sz);
139  void asyncLogFlush(void *p);
140  void syncLogFlush();
141 
142  void asyncDataFlush(void *p);
143  void asyncMemOpDataFlush(void *dst, size_t sz);
144  void syncDataFlush();
145 
146  void flushAtEndOfFase();
147  void collectCacheLines(SetOfInts *cl_set, void *addr, size_t sz);
148  void flushCacheLines(const SetOfInts & cl_set);
149  void flushCacheLinesUnconstrained(const SetOfInts & cl_set);
150  void flushLogUncond(void*);
151  void flushLogPointer() { NVM_FLUSH(LogStructureHeaderPtr_); }
152  void flushRecoveryLogPointer() { NVM_FLUSH(&RecoveryTimeLsp_); }
153 
154  bool areUserThreadsDone() const
155  { return AllDone_.load(std::memory_order_acquire) == 1; }
156 
157  LogStructure *getRecoveryLogPointer(std::memory_order mem_order) const
158  { return RecoveryTimeLsp_.load(mem_order); }
160  std::memory_order mem_order)
161  { RecoveryTimeLsp_.store(log_ptr, mem_order); }
162  LogStructure *getLogPointer(std::memory_order mem_order) const
163  { return (*LogStructureHeaderPtr_).load(mem_order); }
164 
165  void deleteOwnerInfo(LogEntry *le);
166  void deleteEntry(LogEntry *addr)
167  { deleteEntry<LogEntry>(CbLogList_, addr); }
168 
170  { assert(Stats_); Stats_->acquireLock(); }
172  { assert(Stats_); Stats_->releaseLock(); }
173  void printStats() const
174  { assert(Stats_); Stats_->print(); }
175 
176 private:
177 
178  // All member of LogMgr are transient. The logs themselves are persistent
179  // and maintained in a persistent region
180 
184 
185  region_id_t RegionId_; // Persistent region holding the logs
186 
187  // This is the helper thread that is created at initialization time
188  // and joined at finalization time. It is manipulated by the main thread
189  // alone avoiding a race.
190  pthread_t HelperThread_;
191 
192  // pointer to the list of circular buffers containing the log entries
193  std::atomic<CbListNode<LogEntry>*> CbLogList_;
194 
195  // This is the topmost pointer to the entire global log structure
196  std::atomic<LogStructure*> *LogStructureHeaderPtr_;
197 
198  // Same as above but during recovery
199  std::atomic<LogStructure*> RecoveryTimeLsp_;
200 
201  // indicator whether the user threads are done
202  std::atomic<int> AllDone_;
203 
204  // Condition variable thru which user threads signal the helper thread
205  pthread_cond_t HelperCondition_;
206 
207  // Mutex for the above condition variable
208  pthread_mutex_t HelperLock_;
209 
210  // Used to map a lock address to a pointer to LastReleaseInfo, the
211  // structure used to maintain information about the last release
212  // of the lock
213  std::atomic<LastReleaseInfo*> ReleaseInfoTab_[kHashTableSize];
214 
215  // Used to map a lock address to a pointer to LockReleaseCount
216  // that maintains the total number of releases of that lock. Used
217  // in log elision analysis.
218  std::atomic<LockReleaseCount*> LockReleaseHistory_[kHashTableSize];
219 
220  Stats *Stats_;
221 
222  bool IsInitialized_;
223 
224  //
225  // Start of thread local members
226  //
227 
228  // Used to signal helper thread once a certain number of log entries
229  // has been created by this thread
230  thread_local static uint32_t TL_LogCount_;
231 
232  // TODO this is passed around in the code unnecessarily
233 
234  // pointer to the current log circular buffer that is used to
235  // satisfy new allocation requests for log entries
236  thread_local static CbLog<LogEntry> *TL_CbLog_;
237 
238  // Every time the information in a log entry is over-written (either
239  // because it is newly created or because it is repurposed), a
240  // monotonically increasing generation number is assigned to
241  // it. Since these circular buffers are never de-allocated, there
242  // is no way a log entry address can be used by multiple
243  // threads. So a thread local generation number suffices.
244  thread_local static uint64_t TL_GenNum_;
245 
246  // Log tracker pointing to the last log entry of this thread
247  thread_local static LogEntry *TL_LastLogEntry_;
248 
249  // Count of locks held. A non-zero value indicates that execution is
250  // within a Failure Atomic SEction (FASE). POSIX says that if an
251  // unlock is attempted on an already-released lock, undefined
252  // behavior results. So this simple detection of FASE is sufficient.
253  thread_local static intptr_t TL_NumHeldLocks_;
254 
255  // A set of locks on which the current thread is conditioned. In
256  // other words, the current thread's execution may have to be
257  // undone if there is a failure and at least one of those locks
258  // has not been released one more time. Used in log elision analysis.
259  thread_local static MapOfLockInfo *TL_UndoLocks_;
260 
261  // A tracker indicating whether a user thread just executed the
262  // first statement that is outside a critical section
263  thread_local static bool TL_IsFirstNonCSStmt_;
264 
265  // A cache with the current intention
266  thread_local static bool TL_ShouldLogNonCSStmt_;
267 
268  // Total number of logs created by this thread
269  thread_local static uint64_t TL_LogCounter_;
270 
271  // Set of cache lines that need to be flushed at end of FASE
272  thread_local static SetOfInts *TL_FaseFlushPtr_;
273 
274  // Used to track unique address/size pair within a consistent section
275  thread_local static SetOfPairs *TL_UniqueLoc_;
276 
277 #if 0 // unused
278  thread_local static intptr_t TL_LogFlushTab_[kFlushTableSize];
279 #endif
280 
281  thread_local static intptr_t TL_DataFlushTab_[kFlushTableSize];
282 
283  //
284  // End of thread local members
285  //
286 
287  // TODO ensure all are initialized
288  LogMgr() :
289  RegionId_{kMaxNumPRegions_},
290  CbLogList_{nullptr},
291  LogStructureHeaderPtr_{nullptr},
292  RecoveryTimeLsp_{nullptr},
293  AllDone_{0},
294  Stats_{nullptr},
295  IsInitialized_{false}
296  {
297  pthread_cond_init(&HelperCondition_, nullptr);
298  pthread_mutex_init(&HelperLock_, nullptr);
299  }
300 
301  ~LogMgr()
302  {
303 #if defined(_FLUSH_LOCAL_COMMIT) && !defined(DISABLE_FLUSHES)
304  delete TL_FaseFlushPtr_;
305  TL_FaseFlushPtr_ = nullptr;
306 #endif
307  }
308 
309  void init();
310  void finalize();
311 
312  // Given a lock address, get a pointer to the bucket for the last
313  // release
314  std::atomic<LastReleaseInfo*> *getLastReleaseRoot(void *addr) {
315  return ReleaseInfoTab_ + (
316  ((reinterpret_cast<uint64_t>(addr)) >> kShift) &
317  kHashTableMask); }
318 
319  std::atomic<LockReleaseCount*> *getLockReleaseCountRoot(void *addr) {
320  return LockReleaseHistory_ + (
321  ((reinterpret_cast<uint64_t>(addr)) >> kShift) &
322  kHashTableMask); }
323 
324  // Log entry creation functions
325  LogEntry *allocLogEntry();
326  LogEntry *createSectionLogEntry(
327  void *lock_address, LogType le_type);
328  LogEntry *createAllocationLogEntry(
329  void *addr, LogType le_type);
330  LogEntry *createStrLogEntry(
331  void * addr, size_t size_in_bits);
332  LogEntry *createMemStrLogEntry(
333  void *addr, size_t sz, LogType le_type);
334  LogEntry *createDummyLogEntry();
335 
336  void publishLogEntry(
337  LogEntry *le);
338  void signalHelper();
339  void finishAcquire(
340  void *lock_address, LogEntry *le);
341  void finishRelease(
342  LogEntry *le, const MapOfLockInfo& undo_locks);
343  void markEndFase(
344  LogEntry *le);
345  void finishWrite(
346  LogEntry * le, void * addr);
347  void assertOneCacheLine(LogEntry *le) {
348 #if !defined(_LOG_WITH_NVM_ALLOC) && !defined(_LOG_WITH_MALLOC)
349  // The entire log entry must be on the same cache line
351 #endif
352  }
353 
354  // Happens before tracker
355  LastReleaseInfo *getLastReleaseHeader(
356  void *lock_address);
357  LastReleaseInfo *findLastReleaseOfLock(
358  void *hash_address);
359  LastReleaseInfo *findLastReleaseOfLogEntry(
360  LogEntry *candidate_le);
361  void addLogToLastReleaseInfo(
362  LogEntry *le, const MapOfLockInfo& undo_locks);
363  ImmutableInfo *createNewImmutableInfo(
364  LogEntry *le, const MapOfLockInfo& undo_locks, bool is_deleted);
365  LastReleaseInfo *createNewLastReleaseInfo(
366  LogEntry *le, const MapOfLockInfo& undo_locks);
367  void setHappensBeforeForAllocFree(
368  LogEntry *le);
369 
370  // Log elision
371  bool tryLogElision(
372  void *addr, size_t sz);
373  bool doesNeedLogging(
374  void *addr, size_t sz);
375  bool canElideLogging();
376  void addLockReleaseCount(
377  void *lock_address, uint64_t count);
378  LockReleaseCount *getLockReleaseCountHeader(
379  void *lock_address);
380  LockReleaseCount *findLockReleaseCount(
381  void *lock_address);
382  uint64_t removeLockFromUndoInfo(
383  void *lock_address);
384 
385  bool isAddrSizePairAlreadySeen(
386  void *addr, size_t sz);
387 
388  // Circular buffer management
389  template<class T> CbLog<T> *getNewCb(
390  uint32_t size, uint32_t rid, CbLog<T> **log_p,
391  std::atomic<CbListNode<T>*> *cb_list_p);
392  template<class T> T *getNewSlot(
393  uint32_t rid, CbLog<T> **log_p,
394  std::atomic<CbListNode<T>*> *cb_list_p);
395  template<class T> void deleteEntry(
396  const std::atomic<CbListNode<T>*>& cb_list, T *addr);
397  template<class T> void deleteSlot(
398  CbLog<T> *cb, T *addr);
399 
400 };
401 
402 inline void LogMgr::logAcquire(void *lock_address)
403 {
404  LogEntry *le = createSectionLogEntry(lock_address, LE_acquire);
405  assert(le);
406 
407  finishAcquire(lock_address, le);
408 }
409 
410 inline void LogMgr::logRdLock(void *lock_address)
411 {
412  LogEntry *le = createSectionLogEntry(lock_address, LE_rwlock_rdlock);
413  assert(le);
414 
415  finishAcquire(lock_address, le);
416 }
417 
418 inline void LogMgr::logWrLock(void *lock_address)
419 {
420  LogEntry *le = createSectionLogEntry(lock_address, LE_rwlock_wrlock);
421  assert(le);
422 
423  finishAcquire(lock_address, le);
424 }
425 
427 {
428  LogEntry *le = createSectionLogEntry(NULL, LE_begin_durable);
429  assert(le);
430 
431  finishAcquire(NULL, le);
432 }
433 
434 inline void LogMgr::logStore(void *addr, size_t sz)
435 {
436  if (!NVM_IsInOpenPR(addr, sz/8)) return;
437  if (tryLogElision(addr, sz/8)) return;
438  LogEntry *le = createStrLogEntry(addr, sz);
439  finishWrite(le, addr);
440 }
441 
442 inline void LogMgr::logMemset(void *addr, size_t sz)
443 {
444  if (!NVM_IsInOpenPR(addr, sz)) return;
445  if (tryLogElision(addr, sz)) return;
446  LogEntry *le = createMemStrLogEntry(addr, sz, LE_memset);
447  finishWrite(le, addr);
448 }
449 
450 inline void LogMgr::logMemcpy(void *dst, size_t sz)
451 {
452  if (!NVM_IsInOpenPR(dst, sz)) return;
453  if (tryLogElision(dst, sz)) return;
454  LogEntry *le = createMemStrLogEntry(dst, sz, LE_memcpy);
455  finishWrite(le, dst);
456 }
457 
458 inline void LogMgr::logMemmove(void *dst, size_t sz)
459 {
460  if (!NVM_IsInOpenPR(dst, sz)) return;
461  if (tryLogElision(dst, sz)) return;
462  LogEntry *le = createMemStrLogEntry(dst, sz, LE_memmove);
463  finishWrite(le, dst);
464 }
465 
466 inline void LogMgr::logStrcpy(void *dst, size_t sz)
467 {
468  if (!NVM_IsInOpenPR((void *)dst, sz)) return;
469  if (tryLogElision((void *)dst, sz)) return;
470  LogEntry *le = createMemStrLogEntry((void *)dst, sz, LE_strcpy);
471  finishWrite(le, dst);
472 }
473 
474 inline void LogMgr::logStrcat(void *dst, size_t sz)
475 {
476  if (!NVM_IsInOpenPR(dst, sz)) return;
477  if (tryLogElision(dst, sz)) return;
478  LogEntry *le = createMemStrLogEntry(dst, sz, LE_strcat);
479  finishWrite(le, dst);
480 }
481 
482 inline LastReleaseInfo *LogMgr::getLastReleaseHeader(void *lock_address)
483 {
484  std::atomic<LastReleaseInfo*> *table_ptr =
485  getLastReleaseRoot(lock_address);
486  return (*table_ptr).load(std::memory_order_acquire);
487 }
488 
489 inline LockReleaseCount *LogMgr::getLockReleaseCountHeader(void *lock_address)
490 {
491  std::atomic<LockReleaseCount*> *entry =
492  getLockReleaseCountRoot(lock_address);
493  return (*entry).load(std::memory_order_acquire);
494 }
495 
496 inline void LogMgr::flushLogUncond(void *p)
497 {
498 #if (!defined(DISABLE_FLUSHES) && !defined(_DISABLE_LOG_FLUSH))
499 #if defined(_LOG_FLUSH_OPT)
500  // TODO: this needs more work. It is incomplete.
501  AsyncLogFlush(p);
502 #else
503  NVM_FLUSH(p);
504 #endif
505 #endif
506 }
507 
508 } // namespace Atlas
509 
510 #endif
Definition: log_configs.hpp:36
bool cmpXchngWeakRecoveryLogPointer(LogStructure *expected, LogStructure *desired, std::memory_order success, std::memory_order failure)
Definition: log_mgr.hpp:106
void logEndDurable()
Definition: log_mgr.cpp:211
void signalLogReady()
Definition: log_mgr.hpp:96
static bool hasInstance()
Definition: log_mgr.hpp:80
void acquireStatsLock()
Definition: log_mgr.hpp:169
void logBeginDurable()
Definition: log_mgr.hpp:426
const uint64_t kHashTableSize
Definition: log_configs.hpp:24
void syncLogFlush()
const uint64_t kHashTableMask
Definition: log_configs.hpp:25
void logMemmove(void *dst, size_t sz)
Definition: log_mgr.hpp:458
LogStructure * getRecoveryLogPointer(std::memory_order mem_order) const
Definition: log_mgr.hpp:157
void deleteEntry(LogEntry *addr)
Definition: log_mgr.hpp:166
Definition: log_structure.hpp:30
void collectCacheLines(SetOfInts *cl_set, void *addr, size_t sz)
void acquireLogReadyLock()
Definition: log_mgr.hpp:88
Definition: log_configs.hpp:33
void releaseLock()
Definition: stats.hpp:51
std::map< void *, uint64_t > MapOfLockInfo
Definition: happens_before.hpp:27
LogType
Definition: log_configs.hpp:32
void releaseStatsLock()
Definition: log_mgr.hpp:171
void logMemcpy(void *dst, size_t sz)
Definition: log_mgr.hpp:450
void logRelease(void *)
Entry point into log manager for a lock release.
Definition: log_mgr.cpp:159
const uint32_t kMaxNumPRegions_
Definition: pregion_configs.hpp:29
void waitLogReady()
Definition: log_mgr.hpp:92
Definition: happens_before.hpp:61
Definition: stats.hpp:28
void acquireLock()
Definition: stats.hpp:49
uint32_t region_id_t
Definition: pregion_configs.hpp:22
void logRdLock(void *)
Definition: log_mgr.hpp:410
void logStore(void *addr, size_t sz)
Definition: log_mgr.hpp:434
int NVM_IsInOpenPR(void *addr, size_t sz)
Definition: pregion_mgr_api.cpp:90
void logAcquire(void *)
Definition: log_mgr.hpp:402
Definition: log_configs.hpp:35
const int32_t kFlushTableSize
Definition: cache_flush_configs.hpp:22
void print()
Definition: stats.cpp:44
void setRegionId(region_id_t id)
Definition: log_mgr.hpp:85
Definition: log_elision.hpp:24
bool areUserThreadsDone() const
Definition: log_mgr.hpp:154
static LogMgr & createInstance()
Definition: log_mgr.hpp:52
Definition: happens_before.hpp:33
void logRWUnlock(void *)
Definition: log_mgr.cpp:190
bool cmpXchngWeakLogPointer(LogStructure *expected, LogStructure *desired, std::memory_order success, std::memory_order failure)
Definition: log_mgr.hpp:99
LogStructure * createLogStructure(LogEntry *le)
Create a thread specific log header.
Definition: log_entry_create.cpp:226
void logNonTemporal(LogEntry *le, void *addr, size_t sz, LogType le_type)
#define NVM_FLUSH(p)
Definition: atlas_api.h:106
void logMemset(void *addr, size_t sz)
Definition: log_mgr.hpp:442
void psync(void *start_addr, size_t sz)
Definition: generic.cpp:42
void asyncLogFlush(void *p)
Definition: log_configs.hpp:33
void printStats() const
Definition: log_mgr.hpp:173
Definition: log_configs.hpp:34
Definition: log_configs.hpp:35
region_id_t getRegionId() const
Definition: log_mgr.hpp:86
Definition: circular_buffer.hpp:25
static LogMgr & getInstance()
Definition: log_mgr.hpp:75
void flushLogPointer()
Definition: log_mgr.hpp:151
void * helper(void *arg_lsp)
Definition: helper_driver.cpp:33
Definition: log_structure.hpp:77
Definition: circular_buffer.hpp:58
void flushCacheLinesUnconstrained(const SetOfInts &cl_set)
std::set< AddrSizePairType, CmpAddrSizePair > SetOfPairs
Definition: util.hpp:49
const uint32_t kShift
Definition: log_configs.hpp:26
int size(COW_AL *cal)
Definition: cow_array_list.c:183
std::set< uint64_t > SetOfInts
Definition: util.hpp:77
void logStrcat(void *dst, size_t sz)
Definition: log_mgr.hpp:474
void logAlloc(void *addr)
Definition: log_mgr.cpp:229
void releaseLogReadyLock()
Definition: log_mgr.hpp:90
Definition: log_configs.hpp:33
void asyncDataFlush(void *p)
void flushAtEndOfFase()
Definition: generic.cpp:48
void flushCacheLines(const SetOfInts &cl_set)
void syncDataFlush()
Definition: log_mgr.hpp:47
void deleteOwnerInfo(LogEntry *le)
Definition: happens_before.cpp:132
void logWrLock(void *)
Definition: log_mgr.hpp:418
void setRecoveryLogPointer(LogStructure *log_ptr, std::memory_order mem_order)
Definition: log_mgr.hpp:159
void asyncMemOpDataFlush(void *dst, size_t sz)
void flushLogUncond(void *)
Definition: log_mgr.hpp:496
Definition: log_configs.hpp:36
void flushRecoveryLogPointer()
Definition: log_mgr.hpp:152
static bool is_on_different_cache_line(void *p1, void *p2)
Definition: pmalloc_util.hpp:109
std::vector< LogEntry * > LogEntryVec
Definition: log_mgr.hpp:45
Definition: atlas_alloc_cpp.hpp:21
static void deleteInstance()
Definition: log_mgr.hpp:67
LogStructure * getLogPointer(std::memory_order mem_order) const
Definition: log_mgr.hpp:162
static LogMgr & createRecoveryInstance()
Definition: log_mgr.hpp:60
void psyncWithAcquireBarrier(void *start_addr, size_t sz)
Definition: generic.cpp:21
Definition: log_configs.hpp:35
void logStrcpy(void *dst, size_t sz)
Definition: log_mgr.hpp:466
void logFree(void *addr)
Definition: log_mgr.cpp:249
#define LAST_LOG_ELEM(p)
Definition: log_structure.hpp:71