Skip to content
Snippets Groups Projects
Commit da0d48e6 authored by Jeromy Johnson's avatar Jeromy Johnson
Browse files

Add locking interface to blockstore


The addition of a locking interface to the blockstore allows us to
perform atomic operations on the underlying datastore without having to
worry about different operations happening in the background, such as
garbage collection.

License: MIT
Signed-off-by: default avatarJeromy <jeromyj@gmail.com>
parent de50b215
Branches
No related tags found
1 merge request!1New
......@@ -4,6 +4,7 @@ package blockstore
import (
"errors"
"sync"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
dsns "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/namespace"
......@@ -35,7 +36,14 @@ type Blockstore interface {
AllKeysChan(ctx context.Context) (<-chan key.Key, error)
}
func NewBlockstore(d ds.ThreadSafeDatastore) Blockstore {
type GCBlockstore interface {
Blockstore
Lock() func()
RLock() func()
}
func NewBlockstore(d ds.ThreadSafeDatastore) *blockstore {
dd := dsns.Wrap(d, BlockPrefix)
return &blockstore{
datastore: dd,
......@@ -46,6 +54,8 @@ type blockstore struct {
datastore ds.Batching
// cant be ThreadSafeDatastore cause namespace.Datastore doesnt support it.
// we do check it on `NewBlockstore` though.
lk sync.RWMutex
}
func (bs *blockstore) Get(k key.Key) (*blocks.Block, error) {
......@@ -172,3 +182,13 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan key.Key, error) {
return output, nil
}
func (bs *blockstore) Lock() func() {
bs.lk.Lock()
return bs.lk.Unlock
}
func (bs *blockstore) RLock() func() {
bs.lk.RLock()
return bs.lk.RUnlock
}
......@@ -8,7 +8,7 @@ import (
)
// WriteCached returns a blockstore that caches up to |size| unique writes (bs.Put).
func WriteCached(bs Blockstore, size int) (Blockstore, error) {
func WriteCached(bs Blockstore, size int) (*writecache, error) {
c, err := lru.New(size)
if err != nil {
return nil, err
......@@ -58,3 +58,11 @@ func (w *writecache) PutMany(bs []*blocks.Block) error {
func (w *writecache) AllKeysChan(ctx context.Context) (<-chan key.Key, error) {
return w.blockstore.AllKeysChan(ctx)
}
func (w *writecache) Lock() func() {
return w.blockstore.(GCBlockstore).Lock()
}
func (w *writecache) RLock() func() {
return w.blockstore.(GCBlockstore).RLock()
}
package key
import (
"sync"
)
type KeySet interface {
Add(Key)
Has(Key) bool
Remove(Key)
Keys() []Key
}
type ks struct {
lock sync.RWMutex
data map[Key]struct{}
type keySet struct {
keys map[Key]struct{}
}
func NewKeySet() KeySet {
return &ks{
data: make(map[Key]struct{}),
}
return &keySet{make(map[Key]struct{})}
}
func (wl *ks) Add(k Key) {
wl.lock.Lock()
defer wl.lock.Unlock()
wl.data[k] = struct{}{}
func (gcs *keySet) Add(k Key) {
gcs.keys[k] = struct{}{}
}
func (wl *ks) Remove(k Key) {
wl.lock.Lock()
defer wl.lock.Unlock()
delete(wl.data, k)
func (gcs *keySet) Has(k Key) bool {
_, has := gcs.keys[k]
return has
}
func (wl *ks) Keys() []Key {
wl.lock.RLock()
defer wl.lock.RUnlock()
keys := make([]Key, 0)
for k := range wl.data {
keys = append(keys, k)
func (ks *keySet) Keys() []Key {
var out []Key
for k, _ := range ks.keys {
out = append(out, k)
}
return keys
return out
}
func (ks *keySet) Remove(k Key) {
delete(ks.keys, k)
}
// TODO: implement disk-backed keyset for working with massive DAGs
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment