diff --git a/collections/codec/indexing.go b/collections/codec/indexing.go new file mode 100644 index 000000000000..01f3740bd588 --- /dev/null +++ b/collections/codec/indexing.go @@ -0,0 +1,102 @@ +package codec + +import ( + "encoding/json" + "fmt" + + "cosmossdk.io/schema" +) + +// HasSchemaCodec is an interface that all codec's should implement in order +// to properly support indexing. It is not required by KeyCodec or ValueCodec +// in order to preserve backwards compatibility, but a future version of collections +// may make it required and all codec's should aim to implement it. If it is not +// implemented, fallback defaults will be used for indexing that may be sub-optimal. +// +// Implementations of HasSchemaCodec should test that they are conformant using +// schema.ValidateObjectKey or schema.ValidateObjectValue depending on whether +// the codec is a KeyCodec or ValueCodec respectively. +type HasSchemaCodec[T any] interface { + // SchemaCodec returns the schema codec for the collections codec. + SchemaCodec() (SchemaCodec[T], error) +} + +// SchemaCodec is a codec that supports converting collection codec values to and +// from schema codec values. +type SchemaCodec[T any] struct { + // Fields are the schema fields that the codec represents. If this is empty, + // it will be assumed that this codec represents no value (such as an item key + // or key set value). + Fields []schema.Field + + // ToSchemaType converts a codec value of type T to a value corresponding to + // a schema object key or value (depending on whether this is a key or value + // codec). The returned value should pass validation with schema.ValidateObjectKey + // or schema.ValidateObjectValue with the fields specified in Fields. + // If this function is nil, it will be assumed that T already represents a + // value that conforms to a schema value without any further conversion. + ToSchemaType func(T) (any, error) + + // FromSchemaType converts a schema object key or value to T. + // If this function is nil, it will be assumed that T already represents a + // value that conforms to a schema value without any further conversion. + FromSchemaType func(any) (T, error) +} + +// KeySchemaCodec gets the schema codec for the provided KeyCodec either +// by casting to HasSchemaCodec or returning a fallback codec. +func KeySchemaCodec[K any](cdc KeyCodec[K]) (SchemaCodec[K], error) { + if indexable, ok := cdc.(HasSchemaCodec[K]); ok { + return indexable.SchemaCodec() + } else { + return FallbackSchemaCodec[K](), nil + } +} + +// ValueSchemaCodec gets the schema codec for the provided ValueCodec either +// by casting to HasSchemaCodec or returning a fallback codec. +func ValueSchemaCodec[V any](cdc ValueCodec[V]) (SchemaCodec[V], error) { + if indexable, ok := cdc.(HasSchemaCodec[V]); ok { + return indexable.SchemaCodec() + } else { + return FallbackSchemaCodec[V](), nil + } +} + +// FallbackSchemaCodec returns a fallback schema codec for T when one isn't explicitly +// specified with HasSchemaCodec. It maps all simple types directly to schema kinds +// and converts everything else to JSON String. +func FallbackSchemaCodec[T any]() SchemaCodec[T] { + var t T + kind := schema.KindForGoValue(t) + if err := kind.Validate(); err == nil { + return SchemaCodec[T]{ + Fields: []schema.Field{{ + // we don't set any name so that this can be set to a good default by the caller + Name: "", + Kind: kind, + }}, + // these can be nil because T maps directly to a schema value for this kind + ToSchemaType: nil, + FromSchemaType: nil, + } + } else { + // we default to encoding everything to JSON String + return SchemaCodec[T]{ + Fields: []schema.Field{{Kind: schema.StringKind}}, + ToSchemaType: func(t T) (any, error) { + bz, err := json.Marshal(t) + return string(json.RawMessage(bz)), err + }, + FromSchemaType: func(a any) (T, error) { + var t T + sz, ok := a.(string) + if !ok { + return t, fmt.Errorf("expected string, got %T", a) + } + err := json.Unmarshal([]byte(sz), &t) + return t, err + }, + } + } +} diff --git a/collections/indexes/multi.go b/collections/indexes/multi.go new file mode 100644 index 000000000000..3554346906cb --- /dev/null +++ b/collections/indexes/multi.go @@ -0,0 +1,186 @@ +package indexes + +import ( + "context" + "errors" + + "cosmossdk.io/collections" + "cosmossdk.io/collections/codec" +) + +type multiOptions struct { + uncheckedValue bool +} + +// WithMultiUncheckedValue is an option that can be passed to NewMulti to +// ignore index values different from '[]byte{}' and continue with the operation. +// This should be used only to behave nicely in case you have used values different +// from '[]byte{}' in your storage before migrating to collections. Refer to +// WithKeySetUncheckedValue for more information. +func WithMultiUncheckedValue() func(*multiOptions) { + return func(o *multiOptions) { + o.uncheckedValue = true + } +} + +// Multi defines the most common index. It can be used to create a reference between +// a field of value and its primary key. Multiple primary keys can be mapped to the same +// reference key as the index does not enforce uniqueness constraints. +type Multi[ReferenceKey, PrimaryKey, Value any] struct { + getRefKey func(pk PrimaryKey, value Value) (ReferenceKey, error) + refKeys collections.KeySet[collections.Pair[ReferenceKey, PrimaryKey]] +} + +// NewMulti instantiates a new Multi instance given a schema, +// a Prefix, the humanized name for the index, the reference key key codec +// and the primary key key codec. The getRefKeyFunc is a function that +// given the primary key and value returns the referencing key. +func NewMulti[ReferenceKey, PrimaryKey, Value any]( + schema *collections.SchemaBuilder, + prefix collections.Prefix, + name string, + refCodec codec.KeyCodec[ReferenceKey], + pkCodec codec.KeyCodec[PrimaryKey], + getRefKeyFunc func(pk PrimaryKey, value Value) (ReferenceKey, error), + options ...func(*multiOptions), +) *Multi[ReferenceKey, PrimaryKey, Value] { + o := new(multiOptions) + for _, opt := range options { + opt(o) + } + if o.uncheckedValue { + return &Multi[ReferenceKey, PrimaryKey, Value]{ + getRefKey: getRefKeyFunc, + refKeys: collections.NewKeySet( + schema, + prefix, + name, + collections.PairKeyCodec(refCodec, pkCodec), + collections.WithKeySetUncheckedValue(), + collections.WithKeySetSecondaryIndex(), + ), + } + } + + return &Multi[ReferenceKey, PrimaryKey, Value]{ + getRefKey: getRefKeyFunc, + refKeys: collections.NewKeySet( + schema, + prefix, + name, + collections.PairKeyCodec(refCodec, pkCodec), + collections.WithKeySetSecondaryIndex(), + ), + } +} + +func (m *Multi[ReferenceKey, PrimaryKey, Value]) Reference(ctx context.Context, pk PrimaryKey, newValue Value, lazyOldValue func() (Value, error)) error { + oldValue, err := lazyOldValue() + switch { + // if no error it means the value existed, and we need to remove the old indexes + case err == nil: + err = m.unreference(ctx, pk, oldValue) + if err != nil { + return err + } + // if error is ErrNotFound, it means that the object does not exist, so we're creating indexes for the first time. + // we do nothing. + case errors.Is(err, collections.ErrNotFound): + // default case means that there was some other error + default: + return err + } + // create new indexes + refKey, err := m.getRefKey(pk, newValue) + if err != nil { + return err + } + return m.refKeys.Set(ctx, collections.Join(refKey, pk)) +} + +func (m *Multi[ReferenceKey, PrimaryKey, Value]) Unreference(ctx context.Context, pk PrimaryKey, getValue func() (Value, error)) error { + value, err := getValue() + if err != nil { + return err + } + return m.unreference(ctx, pk, value) +} + +func (m *Multi[ReferenceKey, PrimaryKey, Value]) unreference(ctx context.Context, pk PrimaryKey, value Value) error { + refKey, err := m.getRefKey(pk, value) + if err != nil { + return err + } + return m.refKeys.Remove(ctx, collections.Join(refKey, pk)) +} + +func (m *Multi[ReferenceKey, PrimaryKey, Value]) Iterate(ctx context.Context, ranger collections.Ranger[collections.Pair[ReferenceKey, PrimaryKey]]) (MultiIterator[ReferenceKey, PrimaryKey], error) { + iter, err := m.refKeys.Iterate(ctx, ranger) + return (MultiIterator[ReferenceKey, PrimaryKey])(iter), err +} + +func (m *Multi[ReferenceKey, PrimaryKey, Value]) Walk( + ctx context.Context, + ranger collections.Ranger[collections.Pair[ReferenceKey, PrimaryKey]], + walkFunc func(indexingKey ReferenceKey, indexedKey PrimaryKey) (stop bool, err error), +) error { + return m.refKeys.Walk(ctx, ranger, func(key collections.Pair[ReferenceKey, PrimaryKey]) (bool, error) { + return walkFunc(key.K1(), key.K2()) + }) +} + +// MatchExact returns a MultiIterator containing all the primary keys referenced by the provided reference key. +func (m *Multi[ReferenceKey, PrimaryKey, Value]) MatchExact(ctx context.Context, refKey ReferenceKey) (MultiIterator[ReferenceKey, PrimaryKey], error) { + return m.Iterate(ctx, collections.NewPrefixedPairRange[ReferenceKey, PrimaryKey](refKey)) +} + +func (m *Multi[K1, K2, Value]) KeyCodec() codec.KeyCodec[collections.Pair[K1, K2]] { + return m.refKeys.KeyCodec() +} + +// MultiIterator is just a KeySetIterator with key as Pair[ReferenceKey, PrimaryKey]. +type MultiIterator[ReferenceKey, PrimaryKey any] collections.KeySetIterator[collections.Pair[ReferenceKey, PrimaryKey]] + +// PrimaryKey returns the iterator's current primary key. +func (i MultiIterator[ReferenceKey, PrimaryKey]) PrimaryKey() (PrimaryKey, error) { + fullKey, err := i.FullKey() + return fullKey.K2(), err +} + +// PrimaryKeys fully consumes the iterator and returns the list of primary keys. +func (i MultiIterator[ReferenceKey, PrimaryKey]) PrimaryKeys() ([]PrimaryKey, error) { + fullKeys, err := i.FullKeys() + if err != nil { + return nil, err + } + pks := make([]PrimaryKey, len(fullKeys)) + for i, fullKey := range fullKeys { + pks[i] = fullKey.K2() + } + return pks, nil +} + +// FullKey returns the current full reference key as Pair[ReferenceKey, PrimaryKey]. +func (i MultiIterator[ReferenceKey, PrimaryKey]) FullKey() (collections.Pair[ReferenceKey, PrimaryKey], error) { + return (collections.KeySetIterator[collections.Pair[ReferenceKey, PrimaryKey]])(i).Key() +} + +// FullKeys fully consumes the iterator and returns all the list of full reference keys. +func (i MultiIterator[ReferenceKey, PrimaryKey]) FullKeys() ([]collections.Pair[ReferenceKey, PrimaryKey], error) { + return (collections.KeySetIterator[collections.Pair[ReferenceKey, PrimaryKey]])(i).Keys() +} + +// Next advances the iterator. +func (i MultiIterator[ReferenceKey, PrimaryKey]) Next() { + (collections.KeySetIterator[collections.Pair[ReferenceKey, PrimaryKey]])(i).Next() +} + +// Valid asserts if the iterator is still valid or not. +func (i MultiIterator[ReferenceKey, PrimaryKey]) Valid() bool { + return (collections.KeySetIterator[collections.Pair[ReferenceKey, PrimaryKey]])(i).Valid() +} + +// Close closes the iterator. +func (i MultiIterator[ReferenceKey, PrimaryKey]) Close() error { + return (collections.KeySetIterator[collections.Pair[ReferenceKey, PrimaryKey]])(i).Close() +} diff --git a/collections/indexes/reverse_pair.go b/collections/indexes/reverse_pair.go new file mode 100644 index 000000000000..4b85b62c6c97 --- /dev/null +++ b/collections/indexes/reverse_pair.go @@ -0,0 +1,170 @@ +package indexes + +import ( + "context" + + "cosmossdk.io/collections" + "cosmossdk.io/collections/codec" +) + +type reversePairOptions struct { + uncheckedValue bool +} + +// WithReversePairUncheckedValue is an option that can be passed to NewReversePair to +// ignore index values different from '[]byte{}' and continue with the operation. +// This should be used only if you are migrating to collections and have used a different +// placeholder value in your storage index keys. +// Refer to WithKeySetUncheckedValue for more information. +func WithReversePairUncheckedValue() func(*reversePairOptions) { + return func(o *reversePairOptions) { + o.uncheckedValue = true + } +} + +// ReversePair is an index that is used with collections.Pair keys. It indexes objects by their second part of the key. +// When the value is being indexed by collections.IndexedMap then ReversePair will create a relationship between +// the second part of the primary key and the first part. +type ReversePair[K1, K2, Value any] struct { + refKeys collections.KeySet[collections.Pair[K2, K1]] // refKeys has the relationships between Join(K2, K1) +} + +// TODO(tip): this is an interface to cast a collections.KeyCodec +// to a pair codec. currently we return it as a KeyCodec[Pair[K1, K2]] +// to improve dev experience with type inference, which means we cannot +// get the concrete implementation which exposes KeyCodec1 and KeyCodec2. +type pairKeyCodec[K1, K2 any] interface { + KeyCodec1() codec.KeyCodec[K1] + KeyCodec2() codec.KeyCodec[K2] +} + +// NewReversePair instantiates a new ReversePair index. +// NOTE: when using this function you will need to type hint: doing NewReversePair[Value]() +// Example: if the value of the indexed map is string, you need to do NewReversePair[string](...) +func NewReversePair[Value, K1, K2 any]( + sb *collections.SchemaBuilder, + prefix collections.Prefix, + name string, + pairCodec codec.KeyCodec[collections.Pair[K1, K2]], + options ...func(*reversePairOptions), +) *ReversePair[K1, K2, Value] { + pkc := pairCodec.(pairKeyCodec[K1, K2]) + o := new(reversePairOptions) + for _, option := range options { + option(o) + } + if o.uncheckedValue { + return &ReversePair[K1, K2, Value]{ + refKeys: collections.NewKeySet( + sb, + prefix, + name, + collections.PairKeyCodec(pkc.KeyCodec2(), pkc.KeyCodec1()), + collections.WithKeySetUncheckedValue(), + collections.WithKeySetSecondaryIndex(), + ), + } + } + + mi := &ReversePair[K1, K2, Value]{ + refKeys: collections.NewKeySet( + sb, + prefix, + name, + collections.PairKeyCodec(pkc.KeyCodec2(), pkc.KeyCodec1()), + collections.WithKeySetSecondaryIndex(), + ), + } + + return mi +} + +// Iterate exposes the raw iterator API. +func (i *ReversePair[K1, K2, Value]) Iterate(ctx context.Context, ranger collections.Ranger[collections.Pair[K2, K1]]) (iter ReversePairIterator[K2, K1], err error) { + sIter, err := i.refKeys.Iterate(ctx, ranger) + if err != nil { + return + } + return (ReversePairIterator[K2, K1])(sIter), nil +} + +// MatchExact will return an iterator containing only the primary keys starting with the provided second part of the multipart pair key. +func (i *ReversePair[K1, K2, Value]) MatchExact(ctx context.Context, key K2) (ReversePairIterator[K2, K1], error) { + return i.Iterate(ctx, collections.NewPrefixedPairRange[K2, K1](key)) +} + +// Reference implements collections.Index +func (i *ReversePair[K1, K2, Value]) Reference(ctx context.Context, pk collections.Pair[K1, K2], _ Value, _ func() (Value, error)) error { + return i.refKeys.Set(ctx, collections.Join(pk.K2(), pk.K1())) +} + +// Unreference implements collections.Index +func (i *ReversePair[K1, K2, Value]) Unreference(ctx context.Context, pk collections.Pair[K1, K2], _ func() (Value, error)) error { + return i.refKeys.Remove(ctx, collections.Join(pk.K2(), pk.K1())) +} + +func (i *ReversePair[K1, K2, Value]) Walk( + ctx context.Context, + ranger collections.Ranger[collections.Pair[K2, K1]], + walkFunc func(indexingKey K2, indexedKey K1) (stop bool, err error), +) error { + return i.refKeys.Walk(ctx, ranger, func(key collections.Pair[K2, K1]) (bool, error) { + return walkFunc(key.K1(), key.K2()) + }) +} + +func (i *ReversePair[K1, K2, Value]) IterateRaw( + ctx context.Context, start, end []byte, order collections.Order, +) ( + iter collections.Iterator[collections.Pair[K2, K1], collections.NoValue], err error, +) { + return i.refKeys.IterateRaw(ctx, start, end, order) +} + +func (i *ReversePair[K1, K2, Value]) KeyCodec() codec.KeyCodec[collections.Pair[K2, K1]] { + return i.refKeys.KeyCodec() +} + +// ReversePairIterator is a helper type around a collections.KeySetIterator when used to work +// with ReversePair indexes iterations. +type ReversePairIterator[K2, K1 any] collections.KeySetIterator[collections.Pair[K2, K1]] + +// PrimaryKey returns the primary key from the index. The index is composed like a reverse +// pair key. So we just fetch the pair key from the index and return the reverse. +func (m ReversePairIterator[K2, K1]) PrimaryKey() (pair collections.Pair[K1, K2], err error) { + reversePair, err := m.FullKey() + if err != nil { + return pair, err + } + pair = collections.Join(reversePair.K2(), reversePair.K1()) + return pair, nil +} + +// PrimaryKeys returns all the primary keys contained in the iterator. +func (m ReversePairIterator[K2, K1]) PrimaryKeys() (pairs []collections.Pair[K1, K2], err error) { + defer m.Close() + for ; m.Valid(); m.Next() { + pair, err := m.PrimaryKey() + if err != nil { + return nil, err + } + pairs = append(pairs, pair) + } + return pairs, err +} + +func (m ReversePairIterator[K2, K1]) FullKey() (p collections.Pair[K2, K1], err error) { + return (collections.KeySetIterator[collections.Pair[K2, K1]])(m).Key() +} + +func (m ReversePairIterator[K2, K1]) Next() { + (collections.KeySetIterator[collections.Pair[K2, K1]])(m).Next() +} + +func (m ReversePairIterator[K2, K1]) Valid() bool { + return (collections.KeySetIterator[collections.Pair[K2, K1]])(m).Valid() +} + +func (m ReversePairIterator[K2, K1]) Close() error { + return (collections.KeySetIterator[collections.Pair[K2, K1]])(m).Close() +} diff --git a/collections/indexing.go b/collections/indexing.go new file mode 100644 index 000000000000..bb039e7be256 --- /dev/null +++ b/collections/indexing.go @@ -0,0 +1,182 @@ +package collections + +import ( + "bytes" + "fmt" + "strings" + + "github.com/tidwall/btree" + + "cosmossdk.io/collections/codec" + "cosmossdk.io/schema" +) + +// IndexingOptions are indexing options for the collections schema. +type IndexingOptions struct { + // RetainDeletionsFor is the list of collections to retain deletions for. + RetainDeletionsFor []string +} + +// ModuleCodec returns the ModuleCodec for this schema for the provided options. +func (s Schema) ModuleCodec(opts IndexingOptions) (schema.ModuleCodec, error) { + decoder := moduleDecoder{ + collectionLookup: &btree.Map[string, *collectionSchemaCodec]{}, + } + + retainDeletions := make(map[string]bool) + for _, collName := range opts.RetainDeletionsFor { + retainDeletions[collName] = true + } + + var types []schema.Type + for _, collName := range s.collectionsOrdered { + coll := s.collectionsByName[collName] + + // skip secondary indexes + if coll.isSecondaryIndex() { + continue + } + + cdc, err := coll.schemaCodec() + if err != nil { + return schema.ModuleCodec{}, err + } + + if retainDeletions[coll.GetName()] { + cdc.objectType.RetainDeletions = true + } + + types = append(types, cdc.objectType) + + decoder.collectionLookup.Set(string(coll.GetPrefix()), cdc) + } + + modSchema, err := schema.CompileModuleSchema(types...) + if err != nil { + return schema.ModuleCodec{}, err + } + + return schema.ModuleCodec{ + Schema: modSchema, + KVDecoder: decoder.decodeKV, + }, nil +} + +type moduleDecoder struct { + // collectionLookup lets us efficiently look the correct collection based on raw key bytes + collectionLookup *btree.Map[string, *collectionSchemaCodec] +} + +func (m moduleDecoder) decodeKV(update schema.KVPairUpdate) ([]schema.StateObjectUpdate, error) { + key := update.Key + ks := string(key) + var cd *collectionSchemaCodec + // we look for the collection whose prefix is less than this key + m.collectionLookup.Descend(ks, func(prefix string, cur *collectionSchemaCodec) bool { + bytesPrefix := cur.coll.GetPrefix() + if bytes.HasPrefix(key, bytesPrefix) { + cd = cur + return true + } + return false + }) + if cd == nil { + return nil, nil + } + + return cd.decodeKVPair(update) +} + +func (c collectionSchemaCodec) decodeKVPair(update schema.KVPairUpdate) ([]schema.StateObjectUpdate, error) { + // strip prefix + key := update.Key + key = key[len(c.coll.GetPrefix()):] + + k, err := c.keyDecoder(key) + if err != nil { + return []schema.StateObjectUpdate{ + {TypeName: c.coll.GetName()}, + }, err + } + + if update.Remove { + return []schema.StateObjectUpdate{ + {TypeName: c.coll.GetName(), Key: k, Delete: true}, + }, nil + } + + v, err := c.valueDecoder(update.Value) + if err != nil { + return []schema.StateObjectUpdate{ + {TypeName: c.coll.GetName(), Key: k}, + }, err + } + + return []schema.StateObjectUpdate{ + {TypeName: c.coll.GetName(), Key: k, Value: v}, + }, nil +} + +func (c collectionImpl[K, V]) schemaCodec() (*collectionSchemaCodec, error) { + res := &collectionSchemaCodec{ + coll: c, + } + res.objectType.Name = c.GetName() + + keyDecoder, err := codec.KeySchemaCodec(c.m.kc) + if err != nil { + return nil, err + } + res.objectType.KeyFields = keyDecoder.Fields + res.keyDecoder = func(i []byte) (any, error) { + _, x, err := c.m.kc.Decode(i) + if err != nil { + return nil, err + } + if keyDecoder.ToSchemaType == nil { + return x, nil + } + return keyDecoder.ToSchemaType(x) + } + ensureFieldNames(c.m.kc, "key", res.objectType.KeyFields) + + valueDecoder, err := codec.ValueSchemaCodec(c.m.vc) + if err != nil { + return nil, err + } + res.objectType.ValueFields = valueDecoder.Fields + res.valueDecoder = func(i []byte) (any, error) { + x, err := c.m.vc.Decode(i) + if err != nil { + return nil, err + } + return valueDecoder.ToSchemaType(x) + } + ensureFieldNames(c.m.vc, "value", res.objectType.ValueFields) + + return res, nil +} + +// ensureFieldNames makes sure that all fields have valid names - either the +// names were specified by user or they get filled +func ensureFieldNames(x any, defaultName string, cols []schema.Field) { + var names []string = nil + if hasName, ok := x.(interface{ Name() string }); ok { + name := hasName.Name() + if name != "" { + names = strings.Split(hasName.Name(), ",") + } + } + for i, col := range cols { + if names != nil && i < len(names) { + col.Name = names[i] + } else if col.Name == "" { + if i == 0 && len(cols) == 1 { + col.Name = defaultName + } else { + col.Name = fmt.Sprintf("%s%d", defaultName, i+1) + } + } + cols[i] = col + } +} diff --git a/collections/keyset.go b/collections/keyset.go new file mode 100644 index 000000000000..ee8e5ba011bc --- /dev/null +++ b/collections/keyset.go @@ -0,0 +1,152 @@ +package collections + +import ( + "bytes" + "context" + "fmt" + + "cosmossdk.io/collections/codec" +) + +// WithKeySetUncheckedValue changes the behavior of the KeySet when it encounters +// a value different from '[]byte{}', by default the KeySet errors when this happens. +// This option allows to ignore the value and continue with the operation, in turn +// the value will be cleared out and set to '[]byte{}'. +// You should never use this option if you're creating a new state object from scratch. +// This should be used only to behave nicely in case you have used values different +// from '[]byte{}' in your storage before migrating to collections. +func WithKeySetUncheckedValue() func(opt *keySetOptions) { + return func(opt *keySetOptions) { + opt.uncheckedValue = true + } +} + +// WithKeySetSecondaryIndex changes the behavior of the KeySet to be a secondary index. +func WithKeySetSecondaryIndex() func(opt *keySetOptions) { + return func(opt *keySetOptions) { + opt.isSecondaryIndex = true + } +} + +type keySetOptions struct { + uncheckedValue bool + isSecondaryIndex bool +} + +// KeySet builds on top of a Map and represents a collection retaining only a set +// of keys and no value. It can be used, for example, in an allow list. +type KeySet[K any] Map[K, NoValue] + +// NewKeySet returns a KeySet given a Schema, Prefix a human name for the collection +// and a KeyCodec for the key K. +func NewKeySet[K any]( + schema *SchemaBuilder, + prefix Prefix, + name string, + keyCodec codec.KeyCodec[K], + options ...func(opt *keySetOptions), +) KeySet[K] { + o := new(keySetOptions) + for _, opt := range options { + opt(o) + } + vc := noValueCodec + if o.uncheckedValue { + vc = codec.NewAltValueCodec(vc, func(_ []byte) (NoValue, error) { return NoValue{}, nil }) + } + return (KeySet[K])(NewMap(schema, prefix, name, keyCodec, vc, withMapSecondaryIndex(o.isSecondaryIndex))) +} + +// Set adds the key to the KeySet. Errors on encoding problems. +func (k KeySet[K]) Set(ctx context.Context, key K) error { + return (Map[K, NoValue])(k).Set(ctx, key, NoValue{}) +} + +// Has returns if the key is present in the KeySet. +// An error is returned only in case of encoding problems. +func (k KeySet[K]) Has(ctx context.Context, key K) (bool, error) { + return (Map[K, NoValue])(k).Has(ctx, key) +} + +// Remove removes the key for the KeySet. An error is returned in case of +// encoding error, it won't report through the error if the key was +// removed or not. +func (k KeySet[K]) Remove(ctx context.Context, key K) error { + return (Map[K, NoValue])(k).Remove(ctx, key) +} + +// Iterate iterates over the keys given the provided Ranger. If ranger is nil, +// the KeySetIterator will include all the existing keys within the KeySet. +func (k KeySet[K]) Iterate(ctx context.Context, ranger Ranger[K]) (KeySetIterator[K], error) { + iter, err := (Map[K, NoValue])(k).Iterate(ctx, ranger) + if err != nil { + return KeySetIterator[K]{}, err + } + + return (KeySetIterator[K])(iter), nil +} + +func (k KeySet[K]) IterateRaw(ctx context.Context, start, end []byte, order Order) (Iterator[K, NoValue], error) { + return (Map[K, NoValue])(k).IterateRaw(ctx, start, end, order) +} + +// Walk provides the same functionality as Map.Walk, but callbacks the walk +// function only with the key. +func (k KeySet[K]) Walk(ctx context.Context, ranger Ranger[K], walkFunc func(key K) (stop bool, err error)) error { + return (Map[K, NoValue])(k).Walk(ctx, ranger, func(key K, value NoValue) (bool, error) { return walkFunc(key) }) +} + +// Clear clears the KeySet using the provided Ranger. Refer to Map.Clear for +// behavioral documentation. +func (k KeySet[K]) Clear(ctx context.Context, ranger Ranger[K]) error { + return (Map[K, NoValue])(k).Clear(ctx, ranger) +} + +func (k KeySet[K]) KeyCodec() codec.KeyCodec[K] { return (Map[K, NoValue])(k).KeyCodec() } +func (k KeySet[K]) ValueCodec() codec.ValueCodec[NoValue] { return (Map[K, NoValue])(k).ValueCodec() } + +// KeySetIterator works like an Iterator, but it does not expose any API to deal with values. +type KeySetIterator[K any] Iterator[K, NoValue] + +func (i KeySetIterator[K]) Key() (K, error) { return (Iterator[K, NoValue])(i).Key() } +func (i KeySetIterator[K]) Keys() ([]K, error) { return (Iterator[K, NoValue])(i).Keys() } +func (i KeySetIterator[K]) Next() { (Iterator[K, NoValue])(i).Next() } +func (i KeySetIterator[K]) Valid() bool { return (Iterator[K, NoValue])(i).Valid() } +func (i KeySetIterator[K]) Close() error { return (Iterator[K, NoValue])(i).Close() } + +var noValueCodec codec.ValueCodec[NoValue] = NoValue{} + +const noValueValueType = "no_value" + +// NoValue is a type that can be used to represent a non-existing value. +type NoValue struct{} + +func (n NoValue) EncodeJSON(_ NoValue) ([]byte, error) { + return nil, nil +} + +func (n NoValue) DecodeJSON(b []byte) (NoValue, error) { + if b != nil { + return NoValue{}, fmt.Errorf("%w: expected nil json bytes, got: %x", ErrEncoding, b) + } + return NoValue{}, nil +} + +func (NoValue) Encode(_ NoValue) ([]byte, error) { + return []byte{}, nil +} + +func (NoValue) Decode(b []byte) (NoValue, error) { + if !bytes.Equal(b, []byte{}) { + return NoValue{}, fmt.Errorf("%w: invalid value, wanted an empty non-nil byte slice, got: %x", ErrEncoding, b) + } + return NoValue{}, nil +} + +func (NoValue) Stringify(_ NoValue) string { + return noValueValueType +} + +func (n NoValue) ValueType() string { + return noValueValueType +} diff --git a/collections/map.go b/collections/map.go new file mode 100644 index 000000000000..3fda806df380 --- /dev/null +++ b/collections/map.go @@ -0,0 +1,290 @@ +package collections + +import ( + "bytes" + "context" + "fmt" + + "cosmossdk.io/collections/codec" + "cosmossdk.io/core/store" +) + +// Map represents the basic collections object. +// It is used to map arbitrary keys to arbitrary +// objects. +type Map[K, V any] struct { + kc codec.KeyCodec[K] + vc codec.ValueCodec[V] + + // store accessor + sa func(context.Context) store.KVStore + prefix []byte + name string + + // isSecondaryIndex indicates that this map represents a secondary index + // on another collection and that it should be skipped when generating + // a user facing schema + isSecondaryIndex bool +} + +// withMapSecondaryIndex changes the behavior of the Map to be a secondary index. +func withMapSecondaryIndex(isSecondaryIndex bool) func(opt *mapOptions) { + return func(opt *mapOptions) { + opt.isSecondaryIndex = isSecondaryIndex + } +} + +type mapOptions struct { + isSecondaryIndex bool +} + +// NewMap returns a Map given a StoreKey, a Prefix, human-readable name and the relative value and key encoders. +// Name and prefix must be unique within the schema and name must match the format specified by NameRegex, or +// else this method will panic. +func NewMap[K, V any]( + schemaBuilder *SchemaBuilder, + prefix Prefix, + name string, + keyCodec codec.KeyCodec[K], + valueCodec codec.ValueCodec[V], + options ...func(opt *mapOptions), +) Map[K, V] { + o := new(mapOptions) + for _, opt := range options { + opt(o) + } + m := Map[K, V]{ + kc: keyCodec, + vc: valueCodec, + sa: schemaBuilder.schema.storeAccessor, + prefix: prefix.Bytes(), + name: name, + isSecondaryIndex: o.isSecondaryIndex, + } + schemaBuilder.addCollection(collectionImpl[K, V]{m}) + return m +} + +func (m Map[K, V]) GetName() string { + return m.name +} + +func (m Map[K, V]) GetPrefix() []byte { + return m.prefix +} + +// Set maps the provided value to the provided key in the store. +// Errors with ErrEncoding if key or value encoding fails. +func (m Map[K, V]) Set(ctx context.Context, key K, value V) error { + bytesKey, err := EncodeKeyWithPrefix(m.prefix, m.kc, key) + if err != nil { + return err + } + + valueBytes, err := m.vc.Encode(value) + if err != nil { + return fmt.Errorf("%w: value encode: %w", ErrEncoding, err) + } + + kvStore := m.sa(ctx) + return kvStore.Set(bytesKey, valueBytes) +} + +// Get returns the value associated with the provided key, +// errors with ErrNotFound if the key does not exist, or +// with ErrEncoding if the key or value decoding fails. +func (m Map[K, V]) Get(ctx context.Context, key K) (v V, err error) { + bytesKey, err := EncodeKeyWithPrefix(m.prefix, m.kc, key) + if err != nil { + return v, err + } + + kvStore := m.sa(ctx) + valueBytes, err := kvStore.Get(bytesKey) + if err != nil { + return v, err + } + if valueBytes == nil { + return v, fmt.Errorf("%w: key '%s' of type %s", ErrNotFound, m.kc.Stringify(key), m.vc.ValueType()) + } + + v, err = m.vc.Decode(valueBytes) + if err != nil { + return v, fmt.Errorf("%w: value decode: %w", ErrEncoding, err) + } + return v, nil +} + +// Has reports whether the key is present in storage or not. +// Errors with ErrEncoding if key encoding fails. +func (m Map[K, V]) Has(ctx context.Context, key K) (bool, error) { + bytesKey, err := EncodeKeyWithPrefix(m.prefix, m.kc, key) + if err != nil { + return false, err + } + kvStore := m.sa(ctx) + return kvStore.Has(bytesKey) +} + +// Remove removes the key from the storage. +// Errors with ErrEncoding if key encoding fails. +// If the key does not exist then this is a no-op. +func (m Map[K, V]) Remove(ctx context.Context, key K) error { + bytesKey, err := EncodeKeyWithPrefix(m.prefix, m.kc, key) + if err != nil { + return err + } + kvStore := m.sa(ctx) + return kvStore.Delete(bytesKey) +} + +// Iterate provides an Iterator over K and V. It accepts a Ranger interface. +// A nil ranger equals to iterate over all the keys in ascending order. +func (m Map[K, V]) Iterate(ctx context.Context, ranger Ranger[K]) (Iterator[K, V], error) { + return iteratorFromRanger(ctx, m, ranger) +} + +// Walk iterates over the Map with the provided range, calls the provided +// walk function with the decoded key and value. If the callback function +// returns true then the walking is stopped. +// A nil ranger equals to walking over the entire key and value set. +func (m Map[K, V]) Walk(ctx context.Context, ranger Ranger[K], walkFunc func(key K, value V) (stop bool, err error)) error { + iter, err := m.Iterate(ctx, ranger) + if err != nil { + return err + } + defer iter.Close() + + for ; iter.Valid(); iter.Next() { + kv, err := iter.KeyValue() + if err != nil { + return err + } + stop, err := walkFunc(kv.Key, kv.Value) + if err != nil { + return err + } + if stop { + return nil + } + } + return nil +} + +// Clear clears the collection contained within the provided key range. +// A nil ranger equals to clearing the whole collection. +// NOTE: this API needs to be used with care, considering that as of today +// cosmos-sdk stores the deletion records to be committed in a memory cache, +// clearing a lot of data might make the node go OOM. +func (m Map[K, V]) Clear(ctx context.Context, ranger Ranger[K]) error { + startBytes, endBytes, _, err := parseRangeInstruction(m.prefix, m.kc, ranger) + if err != nil { + return err + } + return deleteDomain(m.sa(ctx), startBytes, endBytes) +} + +const clearBatchSize = 10000 + +// deleteDomain deletes the domain of an iterator, the key difference +// is that it uses batches to clear the store meaning that it will read +// the keys within the domain close the iterator and then delete them. +func deleteDomain(s store.KVStore, start, end []byte) error { + for { + iter, err := s.Iterator(start, end) + if err != nil { + return err + } + + keys := make([][]byte, 0, clearBatchSize) + for ; iter.Valid() && len(keys) < clearBatchSize; iter.Next() { + keys = append(keys, iter.Key()) + } + + // we close the iterator here instead of deferring + err = iter.Close() + if err != nil { + return err + } + + for _, key := range keys { + err = s.Delete(key) + if err != nil { + return err + } + } + + // If we've retrieved less than the batchSize, we're done. + if len(keys) < clearBatchSize { + break + } + } + + return nil +} + +// IterateRaw iterates over the collection. The iteration range is untyped, it uses raw +// bytes. The resulting Iterator is typed. +// A nil start iterates from the first key contained in the collection. +// A nil end iterates up to the last key contained in the collection. +// A nil start and a nil end iterates over every key contained in the collection. +// TODO(tip): simplify after https://github.com/cosmos/cosmos-sdk/pull/14310 is merged +func (m Map[K, V]) IterateRaw(ctx context.Context, start, end []byte, order Order) (Iterator[K, V], error) { + prefixedStart := append(m.prefix, start...) + var prefixedEnd []byte + if end == nil { + prefixedEnd = nextBytesPrefixKey(m.prefix) + } else { + prefixedEnd = append(m.prefix, end...) + } + + if bytes.Compare(prefixedStart, prefixedEnd) == 1 { + return Iterator[K, V]{}, ErrInvalidIterator + } + + s := m.sa(ctx) + var ( + storeIter store.Iterator + err error + ) + switch order { + case OrderAscending: + storeIter, err = s.Iterator(prefixedStart, prefixedEnd) + case OrderDescending: + storeIter, err = s.ReverseIterator(prefixedStart, prefixedEnd) + default: + return Iterator[K, V]{}, errOrder + } + if err != nil { + return Iterator[K, V]{}, err + } + + return Iterator[K, V]{ + kc: m.kc, + vc: m.vc, + iter: storeIter, + prefixLength: len(m.prefix), + }, nil +} + +// KeyCodec returns the Map's KeyCodec. +func (m Map[K, V]) KeyCodec() codec.KeyCodec[K] { return m.kc } + +// ValueCodec returns the Map's ValueCodec. +func (m Map[K, V]) ValueCodec() codec.ValueCodec[V] { return m.vc } + +// EncodeKeyWithPrefix returns how the collection would store the key in storage given +// prefix, key codec and the concrete key. +func EncodeKeyWithPrefix[K any](prefix []byte, kc codec.KeyCodec[K], key K) ([]byte, error) { + prefixLen := len(prefix) + // preallocate buffer + keyBytes := make([]byte, prefixLen+kc.Size(key)) + // put prefix + copy(keyBytes, prefix) + // put key + _, err := kc.Encode(keyBytes[prefixLen:], key) + if err != nil { + return nil, fmt.Errorf("%w: key encode: %w", ErrEncoding, err) + } + return keyBytes, nil +} diff --git a/collections/pair.go b/collections/pair.go new file mode 100644 index 000000000000..955cfe3d22b7 --- /dev/null +++ b/collections/pair.go @@ -0,0 +1,332 @@ +package collections + +import ( + "encoding/json" + "fmt" + "strings" + + "cosmossdk.io/collections/codec" + "cosmossdk.io/schema" +) + +// Pair defines a key composed of two keys. +type Pair[K1, K2 any] struct { + key1 *K1 + key2 *K2 +} + +// K1 returns the first part of the key. +// If not present the zero value is returned. +func (p Pair[K1, K2]) K1() (k1 K1) { + if p.key1 == nil { + return + } + return *p.key1 +} + +// K2 returns the second part of the key. +// If not present the zero value is returned. +func (p Pair[K1, K2]) K2() (k2 K2) { + if p.key2 == nil { + return + } + return *p.key2 +} + +// Join creates a new Pair instance composed of the two provided keys, in order. +func Join[K1, K2 any](key1 K1, key2 K2) Pair[K1, K2] { + return Pair[K1, K2]{ + key1: &key1, + key2: &key2, + } +} + +// PairPrefix creates a new Pair instance composed only of the first part of the key. +func PairPrefix[K1, K2 any](key K1) Pair[K1, K2] { + return Pair[K1, K2]{key1: &key} +} + +// PairKeyCodec instantiates a new KeyCodec instance that can encode the Pair, given the KeyCodec of the +// first part of the key and the KeyCodec of the second part of the key. +func PairKeyCodec[K1, K2 any](keyCodec1 codec.KeyCodec[K1], keyCodec2 codec.KeyCodec[K2]) codec.KeyCodec[Pair[K1, K2]] { + return pairKeyCodec[K1, K2]{ + keyCodec1: keyCodec1, + keyCodec2: keyCodec2, + } +} + +// NamedPairKeyCodec instantiates a new KeyCodec instance that can encode the Pair, given the KeyCodec of the +// first part of the key and the KeyCodec of the second part of the key. +// It also provides names for the keys which are used for indexing purposes. +func NamedPairKeyCodec[K1, K2 any](key1Name string, keyCodec1 codec.KeyCodec[K1], key2Name string, keyCodec2 codec.KeyCodec[K2]) codec.KeyCodec[Pair[K1, K2]] { + return pairKeyCodec[K1, K2]{ + key1Name: key1Name, + key2Name: key2Name, + keyCodec1: keyCodec1, + keyCodec2: keyCodec2, + } +} + +type pairKeyCodec[K1, K2 any] struct { + key1Name, key2Name string + keyCodec1 codec.KeyCodec[K1] + keyCodec2 codec.KeyCodec[K2] +} + +func (p pairKeyCodec[K1, K2]) KeyCodec1() codec.KeyCodec[K1] { return p.keyCodec1 } + +func (p pairKeyCodec[K1, K2]) KeyCodec2() codec.KeyCodec[K2] { return p.keyCodec2 } + +func (p pairKeyCodec[K1, K2]) Encode(buffer []byte, pair Pair[K1, K2]) (int, error) { + writtenTotal := 0 + if pair.key1 != nil { + written, err := p.keyCodec1.EncodeNonTerminal(buffer, *pair.key1) + if err != nil { + return 0, err + } + writtenTotal += written + } + if pair.key2 != nil { + written, err := p.keyCodec2.Encode(buffer[writtenTotal:], *pair.key2) + if err != nil { + return 0, err + } + writtenTotal += written + } + return writtenTotal, nil +} + +func (p pairKeyCodec[K1, K2]) Decode(buffer []byte) (int, Pair[K1, K2], error) { + readTotal := 0 + read, key1, err := p.keyCodec1.DecodeNonTerminal(buffer) + if err != nil { + return 0, Pair[K1, K2]{}, err + } + readTotal += read + read, key2, err := p.keyCodec2.Decode(buffer[read:]) + if err != nil { + return 0, Pair[K1, K2]{}, err + } + + readTotal += read + return readTotal, Join(key1, key2), nil +} + +func (p pairKeyCodec[K1, K2]) Size(key Pair[K1, K2]) int { + size := 0 + if key.key1 != nil { + size += p.keyCodec1.SizeNonTerminal(*key.key1) + } + if key.key2 != nil { + size += p.keyCodec2.Size(*key.key2) + } + return size +} + +func (p pairKeyCodec[K1, K2]) Stringify(key Pair[K1, K2]) string { + b := new(strings.Builder) + b.WriteByte('(') + if key.key1 != nil { + b.WriteByte('"') + b.WriteString(p.keyCodec1.Stringify(*key.key1)) + b.WriteByte('"') + } else { + b.WriteString("") + } + b.WriteString(", ") + if key.key2 != nil { + b.WriteByte('"') + b.WriteString(p.keyCodec2.Stringify(*key.key2)) + b.WriteByte('"') + } else { + b.WriteString("") + } + b.WriteByte(')') + return b.String() +} + +func (p pairKeyCodec[K1, K2]) KeyType() string { + return fmt.Sprintf("Pair[%s, %s]", p.keyCodec1.KeyType(), p.keyCodec2.KeyType()) +} + +func (p pairKeyCodec[K1, K2]) EncodeNonTerminal(buffer []byte, pair Pair[K1, K2]) (int, error) { + writtenTotal := 0 + if pair.key1 != nil { + written, err := p.keyCodec1.EncodeNonTerminal(buffer, *pair.key1) + if err != nil { + return 0, err + } + writtenTotal += written + } + if pair.key2 != nil { + written, err := p.keyCodec2.EncodeNonTerminal(buffer[writtenTotal:], *pair.key2) + if err != nil { + return 0, err + } + writtenTotal += written + } + return writtenTotal, nil +} + +func (p pairKeyCodec[K1, K2]) DecodeNonTerminal(buffer []byte) (int, Pair[K1, K2], error) { + readTotal := 0 + read, key1, err := p.keyCodec1.DecodeNonTerminal(buffer) + if err != nil { + return 0, Pair[K1, K2]{}, err + } + readTotal += read + read, key2, err := p.keyCodec2.DecodeNonTerminal(buffer[read:]) + if err != nil { + return 0, Pair[K1, K2]{}, err + } + + readTotal += read + return readTotal, Join(key1, key2), nil +} + +func (p pairKeyCodec[K1, K2]) SizeNonTerminal(key Pair[K1, K2]) int { + size := 0 + if key.key1 != nil { + size += p.keyCodec1.SizeNonTerminal(*key.key1) + } + if key.key2 != nil { + size += p.keyCodec2.SizeNonTerminal(*key.key2) + } + return size +} + +// GENESIS + +type jsonPairKey [2]json.RawMessage + +func (p pairKeyCodec[K1, K2]) EncodeJSON(v Pair[K1, K2]) ([]byte, error) { + k1Json, err := p.keyCodec1.EncodeJSON(v.K1()) + if err != nil { + return nil, err + } + k2Json, err := p.keyCodec2.EncodeJSON(v.K2()) + if err != nil { + return nil, err + } + return json.Marshal(jsonPairKey{k1Json, k2Json}) +} + +func (p pairKeyCodec[K1, K2]) DecodeJSON(b []byte) (Pair[K1, K2], error) { + pairJSON := jsonPairKey{} + err := json.Unmarshal(b, &pairJSON) + if err != nil { + return Pair[K1, K2]{}, err + } + + k1, err := p.keyCodec1.DecodeJSON(pairJSON[0]) + if err != nil { + return Pair[K1, K2]{}, err + } + k2, err := p.keyCodec2.DecodeJSON(pairJSON[1]) + if err != nil { + return Pair[K1, K2]{}, err + } + + return Join(k1, k2), nil +} + +func (p pairKeyCodec[K1, K2]) Name() string { + return fmt.Sprintf("%s,%s", p.key1Name, p.key2Name) +} + +func (p pairKeyCodec[K1, K2]) SchemaCodec() (codec.SchemaCodec[Pair[K1, K2]], error) { + field1, err := getNamedKeyField(p.keyCodec1, p.key1Name) + if err != nil { + return codec.SchemaCodec[Pair[K1, K2]]{}, fmt.Errorf("error getting key1 field: %w", err) + } + + field2, err := getNamedKeyField(p.keyCodec2, p.key2Name) + if err != nil { + return codec.SchemaCodec[Pair[K1, K2]]{}, fmt.Errorf("error getting key2 field: %w", err) + } + + return codec.SchemaCodec[Pair[K1, K2]]{ + Fields: []schema.Field{field1, field2}, + ToSchemaType: func(pair Pair[K1, K2]) (any, error) { + return []interface{}{pair.K1(), pair.K2()}, nil + }, + FromSchemaType: func(a any) (Pair[K1, K2], error) { + aSlice, ok := a.([]interface{}) + if !ok || len(aSlice) != 2 { + return Pair[K1, K2]{}, fmt.Errorf("expected slice of length 2, got %T", a) + } + return Join(aSlice[0].(K1), aSlice[1].(K2)), nil + }, + }, nil +} + +func getNamedKeyField[T any](keyCdc codec.KeyCodec[T], name string) (schema.Field, error) { + keySchema, err := codec.KeySchemaCodec(keyCdc) + if err != nil { + return schema.Field{}, err + } + if len(keySchema.Fields) != 1 { + return schema.Field{}, fmt.Errorf("key schema in composite key has more than one field, got %v", keySchema.Fields) + } + field := keySchema.Fields[0] + field.Name = name + return field, nil +} + +// NewPrefixUntilPairRange defines a collection query which ranges until the provided Pair prefix. +// Unstable: this API might change in the future. +func NewPrefixUntilPairRange[K1, K2 any](prefix K1) *PairRange[K1, K2] { + return &PairRange[K1, K2]{end: RangeKeyPrefixEnd(PairPrefix[K1, K2](prefix))} +} + +// NewPrefixedPairRange creates a new PairRange which will prefix over all the keys +// starting with the provided prefix. +func NewPrefixedPairRange[K1, K2 any](prefix K1) *PairRange[K1, K2] { + return &PairRange[K1, K2]{ + start: RangeKeyExact(PairPrefix[K1, K2](prefix)), + end: RangeKeyPrefixEnd(PairPrefix[K1, K2](prefix)), + } +} + +// PairRange is an API that facilitates working with Pair iteration. +// It implements the Ranger API. +// Unstable: API and methods are currently unstable. +type PairRange[K1, K2 any] struct { + start *RangeKey[Pair[K1, K2]] + end *RangeKey[Pair[K1, K2]] + order Order + + err error +} + +func (p *PairRange[K1, K2]) StartInclusive(k2 K2) *PairRange[K1, K2] { + p.start = RangeKeyExact(Join(*p.start.key.key1, k2)) + return p +} + +func (p *PairRange[K1, K2]) StartExclusive(k2 K2) *PairRange[K1, K2] { + p.start = RangeKeyNext(Join(*p.start.key.key1, k2)) + return p +} + +func (p *PairRange[K1, K2]) EndInclusive(k2 K2) *PairRange[K1, K2] { + p.end = RangeKeyNext(Join(*p.end.key.key1, k2)) + return p +} + +func (p *PairRange[K1, K2]) EndExclusive(k2 K2) *PairRange[K1, K2] { + p.end = RangeKeyExact(Join(*p.end.key.key1, k2)) + return p +} + +func (p *PairRange[K1, K2]) Descending() *PairRange[K1, K2] { + p.order = OrderDescending + return p +} + +func (p *PairRange[K1, K2]) RangeValues() (start, end *RangeKey[Pair[K1, K2]], order Order, err error) { + if p.err != nil { + return nil, nil, 0, err + } + return p.start, p.end, p.order, nil +} diff --git a/collections/quad.go b/collections/quad.go new file mode 100644 index 000000000000..48b98a526538 --- /dev/null +++ b/collections/quad.go @@ -0,0 +1,435 @@ +package collections + +import ( + "encoding/json" + "fmt" + "strings" + + "cosmossdk.io/collections/codec" + "cosmossdk.io/schema" +) + +// Quad defines a multipart key composed of four keys. +type Quad[K1, K2, K3, K4 any] struct { + k1 *K1 + k2 *K2 + k3 *K3 + k4 *K4 +} + +// Join4 instantiates a new Quad instance composed of the four provided keys, in order. +func Join4[K1, K2, K3, K4 any](k1 K1, k2 K2, k3 K3, k4 K4) Quad[K1, K2, K3, K4] { + return Quad[K1, K2, K3, K4]{&k1, &k2, &k3, &k4} +} + +// K1 returns the first part of the key. If nil, the zero value is returned. +func (t Quad[K1, K2, K3, K4]) K1() (x K1) { + if t.k1 != nil { + return *t.k1 + } + return x +} + +// K2 returns the second part of the key. If nil, the zero value is returned. +func (t Quad[K1, K2, K3, K4]) K2() (x K2) { + if t.k2 != nil { + return *t.k2 + } + return x +} + +// K3 returns the third part of the key. If nil, the zero value is returned. +func (t Quad[K1, K2, K3, K4]) K3() (x K3) { + if t.k3 != nil { + return *t.k3 + } + return x +} + +// K4 returns the fourth part of the key. If nil, the zero value is returned. +func (t Quad[K1, K2, K3, K4]) K4() (x K4) { + if t.k4 != nil { + return *t.k4 + } + return x +} + +// QuadPrefix creates a new Quad instance composed only of the first part of the key. +func QuadPrefix[K1, K2, K3, K4 any](k1 K1) Quad[K1, K2, K3, K4] { + return Quad[K1, K2, K3, K4]{k1: &k1} +} + +// QuadSuperPrefix creates a new Quad instance composed only of the first two parts of the key. +func QuadSuperPrefix[K1, K2, K3, K4 any](k1 K1, k2 K2) Quad[K1, K2, K3, K4] { + return Quad[K1, K2, K3, K4]{k1: &k1, k2: &k2} +} + +// QuadSuperPrefix3 creates a new Quad instance composed only of the first three parts of the key. +func QuadSuperPrefix3[K1, K2, K3, K4 any](k1 K1, k2 K2, k3 K3) Quad[K1, K2, K3, K4] { + return Quad[K1, K2, K3, K4]{k1: &k1, k2: &k2, k3: &k3} +} + +// QuadKeyCodec instantiates a new KeyCodec instance that can encode the Quad, given +// the KeyCodecs of the four parts of the key, in order. +func QuadKeyCodec[K1, K2, K3, K4 any](keyCodec1 codec.KeyCodec[K1], keyCodec2 codec.KeyCodec[K2], keyCodec3 codec.KeyCodec[K3], keyCodec4 codec.KeyCodec[K4]) codec.KeyCodec[Quad[K1, K2, K3, K4]] { + return quadKeyCodec[K1, K2, K3, K4]{ + keyCodec1: keyCodec1, + keyCodec2: keyCodec2, + keyCodec3: keyCodec3, + keyCodec4: keyCodec4, + } +} + +// NamedQuadKeyCodec instantiates a new KeyCodec instance that can encode the Quad, given +// the KeyCodecs of the four parts of the key, in order. +// The provided names are used to identify the parts of the key in the schema for indexing. +func NamedQuadKeyCodec[K1, K2, K3, K4 any](key1Name string, keyCodec1 codec.KeyCodec[K1], key2Name string, keyCodec2 codec.KeyCodec[K2], key3Name string, keyCodec3 codec.KeyCodec[K3], key4Name string, keyCodec4 codec.KeyCodec[K4]) codec.KeyCodec[Quad[K1, K2, K3, K4]] { + return quadKeyCodec[K1, K2, K3, K4]{ + name1: key1Name, + keyCodec1: keyCodec1, + name2: key2Name, + keyCodec2: keyCodec2, + name3: key3Name, + keyCodec3: keyCodec3, + name4: key4Name, + keyCodec4: keyCodec4, + } +} + +type quadKeyCodec[K1, K2, K3, K4 any] struct { + name1, name2, name3, name4 string + keyCodec1 codec.KeyCodec[K1] + keyCodec2 codec.KeyCodec[K2] + keyCodec3 codec.KeyCodec[K3] + keyCodec4 codec.KeyCodec[K4] +} + +type jsonQuadKey [4]json.RawMessage + +// EncodeJSON encodes Quads to json +func (t quadKeyCodec[K1, K2, K3, K4]) EncodeJSON(value Quad[K1, K2, K3, K4]) ([]byte, error) { + json1, err := t.keyCodec1.EncodeJSON(*value.k1) + if err != nil { + return nil, err + } + + json2, err := t.keyCodec2.EncodeJSON(*value.k2) + if err != nil { + return nil, err + } + + json3, err := t.keyCodec3.EncodeJSON(*value.k3) + if err != nil { + return nil, err + } + + json4, err := t.keyCodec4.EncodeJSON(*value.k4) + if err != nil { + return nil, err + } + + return json.Marshal(jsonQuadKey{json1, json2, json3, json4}) +} + +// DecodeJSON decodes json to Quads +func (t quadKeyCodec[K1, K2, K3, K4]) DecodeJSON(b []byte) (Quad[K1, K2, K3, K4], error) { + var jsonKey jsonQuadKey + err := json.Unmarshal(b, &jsonKey) + if err != nil { + return Quad[K1, K2, K3, K4]{}, err + } + + key1, err := t.keyCodec1.DecodeJSON(jsonKey[0]) + if err != nil { + return Quad[K1, K2, K3, K4]{}, err + } + + key2, err := t.keyCodec2.DecodeJSON(jsonKey[1]) + if err != nil { + return Quad[K1, K2, K3, K4]{}, err + } + + key3, err := t.keyCodec3.DecodeJSON(jsonKey[2]) + if err != nil { + return Quad[K1, K2, K3, K4]{}, err + } + + key4, err := t.keyCodec4.DecodeJSON(jsonKey[3]) + if err != nil { + return Quad[K1, K2, K3, K4]{}, err + } + + return Join4(key1, key2, key3, key4), nil +} + +// Stringify converts Quads to string +func (t quadKeyCodec[K1, K2, K3, K4]) Stringify(key Quad[K1, K2, K3, K4]) string { + b := new(strings.Builder) + b.WriteByte('(') + if key.k1 != nil { + b.WriteByte('"') + b.WriteString(t.keyCodec1.Stringify(*key.k1)) + b.WriteByte('"') + } else { + b.WriteString("") + } + + b.WriteString(", ") + if key.k2 != nil { + b.WriteByte('"') + b.WriteString(t.keyCodec2.Stringify(*key.k2)) + b.WriteByte('"') + } else { + b.WriteString("") + } + + b.WriteString(", ") + if key.k3 != nil { + b.WriteByte('"') + b.WriteString(t.keyCodec3.Stringify(*key.k3)) + b.WriteByte('"') + } else { + b.WriteString("") + } + + b.WriteString(", ") + if key.k4 != nil { + b.WriteByte('"') + b.WriteString(t.keyCodec4.Stringify(*key.k4)) + b.WriteByte('"') + } else { + b.WriteString("") + } + + b.WriteByte(')') + return b.String() +} + +func (t quadKeyCodec[K1, K2, K3, K4]) KeyType() string { + return fmt.Sprintf("Quad[%s,%s,%s,%s]", t.keyCodec1.KeyType(), t.keyCodec2.KeyType(), t.keyCodec3.KeyType(), t.keyCodec4.KeyType()) +} + +func (t quadKeyCodec[K1, K2, K3, K4]) Encode(buffer []byte, key Quad[K1, K2, K3, K4]) (int, error) { + writtenTotal := 0 + if key.k1 != nil { + written, err := t.keyCodec1.EncodeNonTerminal(buffer, *key.k1) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k2 != nil { + written, err := t.keyCodec2.EncodeNonTerminal(buffer[writtenTotal:], *key.k2) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k3 != nil { + written, err := t.keyCodec3.EncodeNonTerminal(buffer[writtenTotal:], *key.k3) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k4 != nil { + written, err := t.keyCodec4.Encode(buffer[writtenTotal:], *key.k4) + if err != nil { + return 0, err + } + writtenTotal += written + } + return writtenTotal, nil +} + +func (t quadKeyCodec[K1, K2, K3, K4]) Decode(buffer []byte) (int, Quad[K1, K2, K3, K4], error) { + readTotal := 0 + read, key1, err := t.keyCodec1.DecodeNonTerminal(buffer) + if err != nil { + return 0, Quad[K1, K2, K3, K4]{}, err + } + readTotal += read + read, key2, err := t.keyCodec2.DecodeNonTerminal(buffer[readTotal:]) + if err != nil { + return 0, Quad[K1, K2, K3, K4]{}, err + } + readTotal += read + read, key3, err := t.keyCodec3.DecodeNonTerminal(buffer[readTotal:]) + if err != nil { + return 0, Quad[K1, K2, K3, K4]{}, err + } + readTotal += read + read, key4, err := t.keyCodec4.Decode(buffer[readTotal:]) + if err != nil { + return 0, Quad[K1, K2, K3, K4]{}, err + } + readTotal += read + return readTotal, Join4(key1, key2, key3, key4), nil +} + +func (t quadKeyCodec[K1, K2, K3, K4]) Size(key Quad[K1, K2, K3, K4]) int { + size := 0 + if key.k1 != nil { + size += t.keyCodec1.SizeNonTerminal(*key.k1) + } + if key.k2 != nil { + size += t.keyCodec2.SizeNonTerminal(*key.k2) + } + if key.k3 != nil { + size += t.keyCodec3.SizeNonTerminal(*key.k3) + } + if key.k4 != nil { + size += t.keyCodec4.Size(*key.k4) + } + return size +} + +func (t quadKeyCodec[K1, K2, K3, K4]) EncodeNonTerminal(buffer []byte, key Quad[K1, K2, K3, K4]) (int, error) { + writtenTotal := 0 + if key.k1 != nil { + written, err := t.keyCodec1.EncodeNonTerminal(buffer, *key.k1) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k2 != nil { + written, err := t.keyCodec2.EncodeNonTerminal(buffer[writtenTotal:], *key.k2) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k3 != nil { + written, err := t.keyCodec3.EncodeNonTerminal(buffer[writtenTotal:], *key.k3) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k4 != nil { + written, err := t.keyCodec4.EncodeNonTerminal(buffer[writtenTotal:], *key.k4) + if err != nil { + return 0, err + } + writtenTotal += written + } + return writtenTotal, nil +} + +func (t quadKeyCodec[K1, K2, K3, K4]) DecodeNonTerminal(buffer []byte) (int, Quad[K1, K2, K3, K4], error) { + readTotal := 0 + read, key1, err := t.keyCodec1.DecodeNonTerminal(buffer) + if err != nil { + return 0, Quad[K1, K2, K3, K4]{}, err + } + readTotal += read + read, key2, err := t.keyCodec2.DecodeNonTerminal(buffer[readTotal:]) + if err != nil { + return 0, Quad[K1, K2, K3, K4]{}, err + } + readTotal += read + read, key3, err := t.keyCodec3.DecodeNonTerminal(buffer[readTotal:]) + if err != nil { + return 0, Quad[K1, K2, K3, K4]{}, err + } + readTotal += read + read, key4, err := t.keyCodec4.DecodeNonTerminal(buffer[readTotal:]) + if err != nil { + return 0, Quad[K1, K2, K3, K4]{}, err + } + readTotal += read + return readTotal, Join4(key1, key2, key3, key4), nil +} + +func (t quadKeyCodec[K1, K2, K3, K4]) SizeNonTerminal(key Quad[K1, K2, K3, K4]) int { + size := 0 + if key.k1 != nil { + size += t.keyCodec1.SizeNonTerminal(*key.k1) + } + if key.k2 != nil { + size += t.keyCodec2.SizeNonTerminal(*key.k2) + } + if key.k3 != nil { + size += t.keyCodec3.SizeNonTerminal(*key.k3) + } + if key.k4 != nil { + size += t.keyCodec4.SizeNonTerminal(*key.k4) + } + return size +} + +func (t quadKeyCodec[K1, K2, K3, K4]) SchemaCodec() (codec.SchemaCodec[Quad[K1, K2, K3, K4]], error) { + field1, err := getNamedKeyField(t.keyCodec1, t.name1) + if err != nil { + return codec.SchemaCodec[Quad[K1, K2, K3, K4]]{}, fmt.Errorf("error getting key1 field: %w", err) + } + + field2, err := getNamedKeyField(t.keyCodec2, t.name2) + if err != nil { + return codec.SchemaCodec[Quad[K1, K2, K3, K4]]{}, fmt.Errorf("error getting key2 field: %w", err) + } + + field3, err := getNamedKeyField(t.keyCodec3, t.name3) + if err != nil { + return codec.SchemaCodec[Quad[K1, K2, K3, K4]]{}, fmt.Errorf("error getting key3 field: %w", err) + } + + field4, err := getNamedKeyField(t.keyCodec4, t.name4) + if err != nil { + return codec.SchemaCodec[Quad[K1, K2, K3, K4]]{}, fmt.Errorf("error getting key4 field: %w", err) + } + + return codec.SchemaCodec[Quad[K1, K2, K3, K4]]{ + Fields: []schema.Field{field1, field2, field3, field4}, + ToSchemaType: func(q Quad[K1, K2, K3, K4]) (any, error) { + return []interface{}{q.K1(), q.K2(), q.K3(), q.K4()}, nil + }, + FromSchemaType: func(a any) (Quad[K1, K2, K3, K4], error) { + aSlice, ok := a.([]interface{}) + if !ok || len(aSlice) != 4 { + return Quad[K1, K2, K3, K4]{}, fmt.Errorf("expected slice of length 4, got %T", a) + } + return Join4(aSlice[0].(K1), aSlice[1].(K2), aSlice[2].(K3), aSlice[3].(K4)), nil + }, + }, nil +} + +// NewPrefixUntilQuadRange defines a collection query which ranges until the provided Quad prefix. +// Unstable: this API might change in the future. +func NewPrefixUntilQuadRange[K1, K2, K3, K4 any](k1 K1) Ranger[Quad[K1, K2, K3, K4]] { + key := QuadPrefix[K1, K2, K3, K4](k1) + return &Range[Quad[K1, K2, K3, K4]]{ + end: RangeKeyPrefixEnd(key), + } +} + +// NewPrefixedQuadRange provides a Range for all keys prefixed with the given +// first part of the Quad key. +func NewPrefixedQuadRange[K1, K2, K3, K4 any](k1 K1) Ranger[Quad[K1, K2, K3, K4]] { + key := QuadPrefix[K1, K2, K3, K4](k1) + return &Range[Quad[K1, K2, K3, K4]]{ + start: RangeKeyExact(key), + end: RangeKeyPrefixEnd(key), + } +} + +// NewSuperPrefixedQuadRange provides a Range for all keys prefixed with the given +// first and second parts of the Quad key. +func NewSuperPrefixedQuadRange[K1, K2, K3, K4 any](k1 K1, k2 K2) Ranger[Quad[K1, K2, K3, K4]] { + key := QuadSuperPrefix[K1, K2, K3, K4](k1, k2) + return &Range[Quad[K1, K2, K3, K4]]{ + start: RangeKeyExact(key), + end: RangeKeyPrefixEnd(key), + } +} + +// NewSuperPrefixedQuadRange3 provides a Range for all keys prefixed with the given +// first, second and third parts of the Quad key. +func NewSuperPrefixedQuadRange3[K1, K2, K3, K4 any](k1 K1, k2 K2, k3 K3) Ranger[Quad[K1, K2, K3, K4]] { + key := QuadSuperPrefix3[K1, K2, K3, K4](k1, k2, k3) + return &Range[Quad[K1, K2, K3, K4]]{ + start: RangeKeyExact(key), + end: RangeKeyPrefixEnd(key), + } +} diff --git a/collections/triple.go b/collections/triple.go new file mode 100644 index 000000000000..d7dcb36056be --- /dev/null +++ b/collections/triple.go @@ -0,0 +1,354 @@ +package collections + +import ( + "encoding/json" + "fmt" + "strings" + + "cosmossdk.io/collections/codec" + "cosmossdk.io/schema" +) + +// Triple defines a multipart key composed of three keys. +type Triple[K1, K2, K3 any] struct { + k1 *K1 + k2 *K2 + k3 *K3 +} + +// Join3 instantiates a new Triple instance composed of the three provided keys, in order. +func Join3[K1, K2, K3 any](k1 K1, k2 K2, k3 K3) Triple[K1, K2, K3] { + return Triple[K1, K2, K3]{&k1, &k2, &k3} +} + +// K1 returns the first part of the key. If nil, the zero value is returned. +func (t Triple[K1, K2, K3]) K1() (x K1) { + if t.k1 != nil { + return *t.k1 + } + return x +} + +// K2 returns the second part of the key. If nil, the zero value is returned. +func (t Triple[K1, K2, K3]) K2() (x K2) { + if t.k2 != nil { + return *t.k2 + } + return x +} + +// K3 returns the third part of the key. If nil, the zero value is returned. +func (t Triple[K1, K2, K3]) K3() (x K3) { + if t.k3 != nil { + return *t.k3 + } + return x +} + +// TriplePrefix creates a new Triple instance composed only of the first part of the key. +func TriplePrefix[K1, K2, K3 any](k1 K1) Triple[K1, K2, K3] { + return Triple[K1, K2, K3]{k1: &k1} +} + +// TripleSuperPrefix creates a new Triple instance composed only of the first two parts of the key. +func TripleSuperPrefix[K1, K2, K3 any](k1 K1, k2 K2) Triple[K1, K2, K3] { + return Triple[K1, K2, K3]{k1: &k1, k2: &k2} +} + +// TripleKeyCodec instantiates a new KeyCodec instance that can encode the Triple, given +// the KeyCodecs of the three parts of the key, in order. +func TripleKeyCodec[K1, K2, K3 any](keyCodec1 codec.KeyCodec[K1], keyCodec2 codec.KeyCodec[K2], keyCodec3 codec.KeyCodec[K3]) codec.KeyCodec[Triple[K1, K2, K3]] { + return tripleKeyCodec[K1, K2, K3]{ + keyCodec1: keyCodec1, + keyCodec2: keyCodec2, + keyCodec3: keyCodec3, + } +} + +func NamedTripleKeyCodec[K1, K2, K3 any](key1Name string, keyCodec1 codec.KeyCodec[K1], key2Name string, keyCodec2 codec.KeyCodec[K2], key3Name string, keyCodec3 codec.KeyCodec[K3]) codec.KeyCodec[Triple[K1, K2, K3]] { + return tripleKeyCodec[K1, K2, K3]{ + key1Name: key1Name, + key2Name: key2Name, + key3Name: key3Name, + keyCodec1: keyCodec1, + keyCodec2: keyCodec2, + keyCodec3: keyCodec3, + } +} + +type tripleKeyCodec[K1, K2, K3 any] struct { + key1Name, key2Name, key3Name string + keyCodec1 codec.KeyCodec[K1] + keyCodec2 codec.KeyCodec[K2] + keyCodec3 codec.KeyCodec[K3] +} + +type jsonTripleKey [3]json.RawMessage + +// EncodeJSON convert triple keys to json +func (t tripleKeyCodec[K1, K2, K3]) EncodeJSON(value Triple[K1, K2, K3]) ([]byte, error) { + json1, err := t.keyCodec1.EncodeJSON(*value.k1) + if err != nil { + return nil, err + } + + json2, err := t.keyCodec2.EncodeJSON(*value.k2) + if err != nil { + return nil, err + } + + json3, err := t.keyCodec3.EncodeJSON(*value.k3) + if err != nil { + return nil, err + } + + return json.Marshal(jsonTripleKey{json1, json2, json3}) +} + +// DecodeJSON convert json to triple keys +func (t tripleKeyCodec[K1, K2, K3]) DecodeJSON(b []byte) (Triple[K1, K2, K3], error) { + var jsonKey jsonTripleKey + err := json.Unmarshal(b, &jsonKey) + if err != nil { + return Triple[K1, K2, K3]{}, err + } + + key1, err := t.keyCodec1.DecodeJSON(jsonKey[0]) + if err != nil { + return Triple[K1, K2, K3]{}, err + } + + key2, err := t.keyCodec2.DecodeJSON(jsonKey[1]) + if err != nil { + return Triple[K1, K2, K3]{}, err + } + + key3, err := t.keyCodec3.DecodeJSON(jsonKey[2]) + if err != nil { + return Triple[K1, K2, K3]{}, err + } + + return Join3(key1, key2, key3), nil +} + +// Stringify convert triple keys to string +func (t tripleKeyCodec[K1, K2, K3]) Stringify(key Triple[K1, K2, K3]) string { + b := new(strings.Builder) + b.WriteByte('(') + if key.k1 != nil { + b.WriteByte('"') + b.WriteString(t.keyCodec1.Stringify(*key.k1)) + b.WriteByte('"') + } else { + b.WriteString("") + } + + b.WriteString(", ") + if key.k2 != nil { + b.WriteByte('"') + b.WriteString(t.keyCodec2.Stringify(*key.k2)) + b.WriteByte('"') + } else { + b.WriteString("") + } + + b.WriteString(", ") + if key.k3 != nil { + b.WriteByte('"') + b.WriteString(t.keyCodec3.Stringify(*key.k3)) + b.WriteByte('"') + } else { + b.WriteString("") + } + + b.WriteByte(')') + return b.String() +} + +func (t tripleKeyCodec[K1, K2, K3]) KeyType() string { + return fmt.Sprintf("Triple[%s,%s,%s]", t.keyCodec1.KeyType(), t.keyCodec2.KeyType(), t.keyCodec3.KeyType()) +} + +func (t tripleKeyCodec[K1, K2, K3]) Encode(buffer []byte, key Triple[K1, K2, K3]) (int, error) { + writtenTotal := 0 + if key.k1 != nil { + written, err := t.keyCodec1.EncodeNonTerminal(buffer, *key.k1) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k2 != nil { + written, err := t.keyCodec2.EncodeNonTerminal(buffer[writtenTotal:], *key.k2) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k3 != nil { + written, err := t.keyCodec3.Encode(buffer[writtenTotal:], *key.k3) + if err != nil { + return 0, err + } + writtenTotal += written + } + return writtenTotal, nil +} + +func (t tripleKeyCodec[K1, K2, K3]) Decode(buffer []byte) (int, Triple[K1, K2, K3], error) { + readTotal := 0 + read, key1, err := t.keyCodec1.DecodeNonTerminal(buffer) + if err != nil { + return 0, Triple[K1, K2, K3]{}, err + } + readTotal += read + read, key2, err := t.keyCodec2.DecodeNonTerminal(buffer[readTotal:]) + if err != nil { + return 0, Triple[K1, K2, K3]{}, err + } + readTotal += read + read, key3, err := t.keyCodec3.Decode(buffer[readTotal:]) + if err != nil { + return 0, Triple[K1, K2, K3]{}, err + } + readTotal += read + return readTotal, Join3(key1, key2, key3), nil +} + +func (t tripleKeyCodec[K1, K2, K3]) Size(key Triple[K1, K2, K3]) int { + size := 0 + if key.k1 != nil { + size += t.keyCodec1.SizeNonTerminal(*key.k1) + } + if key.k2 != nil { + size += t.keyCodec2.SizeNonTerminal(*key.k2) + } + if key.k3 != nil { + size += t.keyCodec3.Size(*key.k3) + } + return size +} + +func (t tripleKeyCodec[K1, K2, K3]) EncodeNonTerminal(buffer []byte, key Triple[K1, K2, K3]) (int, error) { + writtenTotal := 0 + if key.k1 != nil { + written, err := t.keyCodec1.EncodeNonTerminal(buffer, *key.k1) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k2 != nil { + written, err := t.keyCodec2.EncodeNonTerminal(buffer[writtenTotal:], *key.k2) + if err != nil { + return 0, err + } + writtenTotal += written + } + if key.k3 != nil { + written, err := t.keyCodec3.EncodeNonTerminal(buffer[writtenTotal:], *key.k3) + if err != nil { + return 0, err + } + writtenTotal += written + } + return writtenTotal, nil +} + +func (t tripleKeyCodec[K1, K2, K3]) DecodeNonTerminal(buffer []byte) (int, Triple[K1, K2, K3], error) { + readTotal := 0 + read, key1, err := t.keyCodec1.DecodeNonTerminal(buffer) + if err != nil { + return 0, Triple[K1, K2, K3]{}, err + } + readTotal += read + read, key2, err := t.keyCodec2.DecodeNonTerminal(buffer[readTotal:]) + if err != nil { + return 0, Triple[K1, K2, K3]{}, err + } + readTotal += read + read, key3, err := t.keyCodec3.DecodeNonTerminal(buffer[readTotal:]) + if err != nil { + return 0, Triple[K1, K2, K3]{}, err + } + readTotal += read + return readTotal, Join3(key1, key2, key3), nil +} + +func (t tripleKeyCodec[K1, K2, K3]) SizeNonTerminal(key Triple[K1, K2, K3]) int { + size := 0 + if key.k1 != nil { + size += t.keyCodec1.SizeNonTerminal(*key.k1) + } + if key.k2 != nil { + size += t.keyCodec2.SizeNonTerminal(*key.k2) + } + if key.k3 != nil { + size += t.keyCodec3.SizeNonTerminal(*key.k3) + } + return size +} + +func (t tripleKeyCodec[K1, K2, K3]) Name() string { + return fmt.Sprintf("%s,%s,%s", t.key1Name, t.key2Name, t.key3Name) +} + +func (t tripleKeyCodec[K1, K2, K3]) SchemaCodec() (codec.SchemaCodec[Triple[K1, K2, K3]], error) { + field1, err := getNamedKeyField(t.keyCodec1, t.key1Name) + if err != nil { + return codec.SchemaCodec[Triple[K1, K2, K3]]{}, fmt.Errorf("error getting key1 field: %w", err) + } + + field2, err := getNamedKeyField(t.keyCodec2, t.key2Name) + if err != nil { + return codec.SchemaCodec[Triple[K1, K2, K3]]{}, fmt.Errorf("error getting key2 field: %w", err) + } + + field3, err := getNamedKeyField(t.keyCodec3, t.key3Name) + if err != nil { + return codec.SchemaCodec[Triple[K1, K2, K3]]{}, fmt.Errorf("error getting key3 field: %w", err) + } + + return codec.SchemaCodec[Triple[K1, K2, K3]]{ + Fields: []schema.Field{field1, field2, field3}, + ToSchemaType: func(t Triple[K1, K2, K3]) (any, error) { + return []interface{}{t.K1(), t.K2(), t.K3()}, nil + }, + FromSchemaType: func(a any) (Triple[K1, K2, K3], error) { + aSlice, ok := a.([]interface{}) + if !ok || len(aSlice) != 3 { + return Triple[K1, K2, K3]{}, fmt.Errorf("expected slice of length 3, got %T", a) + } + return Join3(aSlice[0].(K1), aSlice[1].(K2), aSlice[2].(K3)), nil + }, + }, nil +} + +// NewPrefixUntilTripleRange defines a collection query which ranges until the provided Pair prefix. +// Unstable: this API might change in the future. +func NewPrefixUntilTripleRange[K1, K2, K3 any](k1 K1) Ranger[Triple[K1, K2, K3]] { + key := TriplePrefix[K1, K2, K3](k1) + return &Range[Triple[K1, K2, K3]]{ + end: RangeKeyPrefixEnd(key), + } +} + +// NewPrefixedTripleRange provides a Range for all keys prefixed with the given +// first part of the Triple key. +func NewPrefixedTripleRange[K1, K2, K3 any](k1 K1) Ranger[Triple[K1, K2, K3]] { + key := TriplePrefix[K1, K2, K3](k1) + return &Range[Triple[K1, K2, K3]]{ + start: RangeKeyExact(key), + end: RangeKeyPrefixEnd(key), + } +} + +// NewSuperPrefixedTripleRange provides a Range for all keys prefixed with the given +// first and second parts of the Triple key. +func NewSuperPrefixedTripleRange[K1, K2, K3 any](k1 K1, k2 K2) Ranger[Triple[K1, K2, K3]] { + key := TripleSuperPrefix[K1, K2, K3](k1, k2) + return &Range[Triple[K1, K2, K3]]{ + start: RangeKeyExact(key), + end: RangeKeyPrefixEnd(key), + } +} diff --git a/runtime/builder.go b/runtime/builder.go index 37be02fa769e..f3de4da2dcbf 100644 --- a/runtime/builder.go +++ b/runtime/builder.go @@ -99,7 +99,14 @@ func (a *AppBuilder) registerIndexer() error { if indexerOpts := a.appOptions.Get("indexer"); indexerOpts != nil { moduleSet := map[string]any{} for modName, mod := range a.app.ModuleManager.Modules { - moduleSet[modName] = mod + storeKey := modName + for _, cfg := range a.app.config.OverrideStoreKeys { + if cfg.ModuleName == modName { + storeKey = cfg.KvStoreKey + break + } + } + moduleSet[storeKey] = mod } return a.app.EnableIndexer(indexerOpts, a.kvStoreKeys(), moduleSet) diff --git a/simapp/app_di.go b/simapp/app_di.go index 5a248d70821d..84fc11b063a4 100644 --- a/simapp/app_di.go +++ b/simapp/app_di.go @@ -7,12 +7,15 @@ import ( "fmt" "io" + _ "github.com/jackc/pgx/v5/stdlib" // Import and register pgx driver + clienthelpers "cosmossdk.io/client/v2/helpers" "cosmossdk.io/core/address" "cosmossdk.io/core/appmodule" "cosmossdk.io/core/registry" corestore "cosmossdk.io/core/store" "cosmossdk.io/depinject" + _ "cosmossdk.io/indexer/postgres" // register the postgres indexer "cosmossdk.io/log" "cosmossdk.io/x/accounts" basedepinject "cosmossdk.io/x/accounts/defaults/base/depinject" diff --git a/simapp/go.mod b/simapp/go.mod index 0d026b8b214f..abe231639672 100644 --- a/simapp/go.mod +++ b/simapp/go.mod @@ -9,6 +9,7 @@ require ( cosmossdk.io/core v1.0.0-alpha.5 // main cosmossdk.io/core/testing v0.0.0-20240923163230-04da382a9f29 // main cosmossdk.io/depinject v1.1.0 + cosmossdk.io/indexer/postgres v0.1.0 cosmossdk.io/log v1.4.1 cosmossdk.io/math v1.3.0 cosmossdk.io/store v1.1.1-0.20240909133312-50288938d1b6 // main @@ -50,6 +51,8 @@ require ( google.golang.org/protobuf v1.35.1 ) +require github.com/jackc/pgx/v5 v5.7.1 + require ( buf.build/gen/go/cometbft/cometbft/protocolbuffers/go v1.35.1-20240701160653-fedbb9acfd2f.1 // indirect buf.build/gen/go/cosmos/gogo-proto/protocolbuffers/go v1.35.1-20240130113600-88ef6483f90f.1 // indirect @@ -147,6 +150,9 @@ require ( github.com/huandu/skiplist v1.2.1 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/klauspost/compress v1.17.10 // indirect @@ -240,8 +246,14 @@ replace ( // pseudo version lower than the latest tag cosmossdk.io/api => cosmossdk.io/api v0.7.3-0.20240924065902-eb7653cfecdf // main cosmossdk.io/client/v2 => ../client/v2 +<<<<<<< HEAD // pseudo version lower than the latest tag cosmossdk.io/store => cosmossdk.io/store v1.0.0-rc.0.0.20241009154331-597e0fac1173 // main +======= + cosmossdk.io/collections => ../collections + cosmossdk.io/indexer/postgres => ../indexer/postgres + cosmossdk.io/store => ../store +>>>>>>> 2290c5ee5 (fix(indexer): the issues during simapp v1 integration (#22413)) cosmossdk.io/tools/confix => ../tools/confix cosmossdk.io/x/accounts => ../x/accounts cosmossdk.io/x/accounts/defaults/base => ../x/accounts/defaults/base diff --git a/simapp/go.sum b/simapp/go.sum index f1a96bb04bc4..978bf7aa6e2d 100644 --- a/simapp/go.sum +++ b/simapp/go.sum @@ -208,6 +208,7 @@ cosmossdk.io/log v1.4.1 h1:wKdjfDRbDyZRuWa8M+9nuvpVYxrEOwbD/CA8hvhU8QM= cosmossdk.io/log v1.4.1/go.mod h1:k08v0Pyq+gCP6phvdI6RCGhLf/r425UT6Rk/m+o74rU= cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= +cosmossdk.io/schema v0.3.0/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= cosmossdk.io/schema v0.3.1-0.20241010135032-192601639cac h1:3joNZZWZ3k7fMsrBDL1ktuQ2xQwYLZOaDhkruadDFmc= cosmossdk.io/schema v0.3.1-0.20241010135032-192601639cac/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= cosmossdk.io/store v1.0.0-rc.0.0.20241009154331-597e0fac1173 h1:MlvTcx2h4zmZZtIDg35B6bovbb5iUAExPmvaPE1Zci4= @@ -608,6 +609,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= diff --git a/tests/go.mod b/tests/go.mod index a2155a20c777..2500d8e90fc4 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -63,6 +63,11 @@ require ( cloud.google.com/go/storage v1.42.0 // indirect cosmossdk.io/client/v2 v2.0.0-20230630094428-02b760776860 // indirect cosmossdk.io/errors v1.0.1 // indirect +<<<<<<< HEAD +======= + cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5 // indirect + cosmossdk.io/indexer/postgres v0.1.0 // indirect +>>>>>>> 2290c5ee5 (fix(indexer): the issues during simapp v1 integration (#22413)) cosmossdk.io/schema v0.3.1-0.20241010135032-192601639cac // indirect cosmossdk.io/x/circuit v0.0.0-20230613133644-0a778132a60f // indirect cosmossdk.io/x/epochs v0.0.0-20240522060652-a1ae4c3e0337 // indirect @@ -148,6 +153,10 @@ require ( github.com/huandu/skiplist v1.2.1 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgx/v5 v5.7.1 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/klauspost/compress v1.17.10 // indirect @@ -234,8 +243,18 @@ replace ( // pseudo version lower than the latest tag cosmossdk.io/api => cosmossdk.io/api v0.7.3-0.20240924065902-eb7653cfecdf // main cosmossdk.io/client/v2 => ../client/v2 +<<<<<<< HEAD // pseudo version lower than the latest tag cosmossdk.io/store => cosmossdk.io/store v1.0.0-rc.0.0.20240913190136-3bc707a5a214 // main +======= + cosmossdk.io/collections => ../collections + cosmossdk.io/indexer/postgres => ../indexer/postgres + cosmossdk.io/runtime/v2 => ../runtime/v2 + cosmossdk.io/server/v2/appmanager => ../server/v2/appmanager + cosmossdk.io/server/v2/stf => ../server/v2/stf + cosmossdk.io/store => ../store + cosmossdk.io/store/v2 => ../store/v2 +>>>>>>> 2290c5ee5 (fix(indexer): the issues during simapp v1 integration (#22413)) cosmossdk.io/x/accounts => ../x/accounts cosmossdk.io/x/accounts/defaults/base => ../x/accounts/defaults/base cosmossdk.io/x/accounts/defaults/lockup => ../x/accounts/defaults/lockup diff --git a/tests/go.sum b/tests/go.sum index be06d8c7ad8b..2477ca4bbf01 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -208,6 +208,7 @@ cosmossdk.io/log v1.4.1 h1:wKdjfDRbDyZRuWa8M+9nuvpVYxrEOwbD/CA8hvhU8QM= cosmossdk.io/log v1.4.1/go.mod h1:k08v0Pyq+gCP6phvdI6RCGhLf/r425UT6Rk/m+o74rU= cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= +cosmossdk.io/schema v0.3.0/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= cosmossdk.io/schema v0.3.1-0.20241010135032-192601639cac h1:3joNZZWZ3k7fMsrBDL1ktuQ2xQwYLZOaDhkruadDFmc= cosmossdk.io/schema v0.3.1-0.20241010135032-192601639cac/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= cosmossdk.io/store v1.0.0-rc.0.0.20240913190136-3bc707a5a214 h1:UUW0+2UgbDwQ452o2aw4DrVSWmowcad7DB7Vln+N94I= @@ -603,6 +604,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= diff --git a/x/bank/keeper/view.go b/x/bank/keeper/view.go index bf5b03ee9678..bd43ee7be09c 100644 --- a/x/bank/keeper/view.go +++ b/x/bank/keeper/view.go @@ -9,7 +9,6 @@ import ( "cosmossdk.io/core/appmodule" errorsmod "cosmossdk.io/errors" "cosmossdk.io/math" - "cosmossdk.io/schema" "cosmossdk.io/x/bank/types" "github.com/cosmos/cosmos-sdk/codec" @@ -254,9 +253,3 @@ func (k BaseViewKeeper) ValidateBalance(ctx context.Context, addr sdk.AccAddress return nil } - -// ModuleCodec implements `schema.HasModuleCodec` interface. -// It allows the indexer to decode the module's KVPairUpdate. -func (k BaseViewKeeper) ModuleCodec() (schema.ModuleCodec, error) { - return k.Schema.ModuleCodec(collections.IndexingOptions{}) -} diff --git a/x/bank/module.go b/x/bank/module.go index 49aa2981b918..f387903c5886 100644 --- a/x/bank/module.go +++ b/x/bank/module.go @@ -9,8 +9,10 @@ import ( "github.com/spf13/cobra" "google.golang.org/grpc" + "cosmossdk.io/collections" "cosmossdk.io/core/appmodule" "cosmossdk.io/core/registry" + "cosmossdk.io/schema" "cosmossdk.io/x/bank/client/cli" "cosmossdk.io/x/bank/keeper" "cosmossdk.io/x/bank/simulation" @@ -175,3 +177,9 @@ func (am AppModule) WeightedOperationsX(weights simsx.WeightSource, reg simsx.Re reg.Add(weights.Get("msg_send", 100), simulation.MsgSendFactory()) reg.Add(weights.Get("msg_multisend", 10), simulation.MsgMultiSendFactory()) } + +// ModuleCodec implements `schema.HasModuleCodec` interface. +// It allows the indexer to decode the module's KVPairUpdate. +func (am AppModule) ModuleCodec() (schema.ModuleCodec, error) { + return am.keeper.(keeper.BaseKeeper).Schema.ModuleCodec(collections.IndexingOptions{}) +}