diff --git a/Makefile b/Makefile
index edffb61feca4e592fbd263981a3ebcf5ea682477..9a64367046d3ac93fdf4ec05437cfb15ffc43777 100644
--- a/Makefile
+++ b/Makefile
@@ -1,3 +1,7 @@
+.PHONY: proto logging mocks .FORCE
+
+SHELL = bash
+
 PROTODIR=perxis-proto/proto
 DSTDIR=./proto
 ALLPROTO?=$(shell find $(PROTODIR) -name '*.proto' )
@@ -6,6 +10,10 @@ PROTOFILES=	$(filter-out $(PROTODIR)/status/status.proto, $(ALLPROTO))
 PROTOGOFILES=$(PROTOFILES:.proto=.pb.go)
 PROTOGOGRPCFILES=$(PROTOFILES:.proto=_grpc.pb.go)
 
+PKGDIR=pkg
+ACCESSLOGGING=$(shell find $(PKGDIR) -name "logging_middleware.go" -type f)
+ERRORLOGGING=$(shell find $(PKGDIR) -name "error_logging_middleware.go" -type f)
+
 # Генерация grpc-клиентов для go
 proto: protoc-check protoc-gen-go-check $(PROTOGOFILES)
 	@echo "Generated all protobuf Go files"
@@ -33,11 +41,18 @@ ifeq (,$(wildcard $(GOPATH)/bin/protoc-gen-go))
 	or visit \"https://github.com/golang/protobuf/tree/v1.3.2#installation\" for more.\n")
 endif
 
+# Генерация логгирования (access & error) для всех сервисов. Предполагается наличие файлов `logging_middleware.go/error_middleware.go`
+# с директивой go:generate и командой генерации кода в директориях `/pkg` сервисов
+# Для установки инструмента генерации выполнить команду `go get -u github.com/hexdigest/gowrap/cmd/gowrap`
+logging: $(ERRORLOGGING) $(ACCESSLOGGING)
 
+%/middleware/logging_middleware.go: .FORCE
+	@echo "$@"
+	@go generate "$@"
 
-#MICROGENFILES?=$(shell find $(SERVICESDIR) -name "service.go" -exec grep -Ril "microgen" {} \;)
-#SERVICEDIRS?=$(shell find $(SERVICESDIR) -name "service" -type d -exec dirname {} \;)
-#SERVICEFILES?=$(shell find $(SERVICESDIR) -name "service.go" -exec grep -Ril "go:generate" {} \;)
+%/middleware/error_logging_middleware.go: .FORCE
+	@echo "$@"
+	@go generate "$@"
 
 # Генерация моков для всех интерфейсов, найденных в директории. Выходные файлы с моками сохраняются в `./mocks`
 MOCKSDIRS?=$(shell find . -name "service.go" -exec dirname {} \;)
diff --git a/assets/templates/middleware/access_log b/assets/templates/middleware/access_log
new file mode 100644
index 0000000000000000000000000000000000000000..a8587b82d5a72130690a61c81e9f78e5eeb6e726
--- /dev/null
+++ b/assets/templates/middleware/access_log
@@ -0,0 +1,64 @@
+import (
+  "fmt"
+  "time"
+  "context"
+
+  "go.uber.org/zap"
+)
+
+{{ $funcName := (or .Vars.FuncName ("LoggingMiddleware")) }}
+{{ $decorator := (or .Vars.DecoratorName ("loggingMiddleware")) }}
+
+// {{$decorator}} implements {{.Interface.Type}} that is instrumented with logging
+type {{$decorator}} struct {
+  logger *zap.Logger
+  next {{.Interface.Type}}
+}
+
+// {{$funcName}} instruments an implementation of the {{.Interface.Type}} with simple logging
+func {{$funcName}}(logger *zap.Logger) Middleware {
+  return func(next {{.Interface.Type}}) {{.Interface.Type}} {
+    return &{{$decorator}}{
+      next: next,
+      logger: logger,
+    }
+  }
+}
+
+{{range $method := .Interface.Methods}}
+    func (m *{{$decorator}}) {{$method.Declaration}} {
+        begin := time.Now()
+        {{- if $method.HasParams}}
+        var fields []zapcore.Field
+        for k, v := range {{$method.ParamsMap}} {
+            if k == "ctx" {
+			    fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+                continue
+            }
+            fields = append(fields, zap.Reflect(k,v))
+        }
+        {{end}}
+
+		m.logger.Debug("{{$method.Name}}.Request",fields...)
+
+        {{ $method.ResultsNames }} = m.next.{{ $method.Call }}
+
+        fields = []zapcore.Field{
+			zap.Duration("time", time.Since(begin)),
+			zap.Error(err),
+        }
+
+        {{ if $method.HasResults}}
+        for k, v := range {{$method.ResultsMap}} {
+            if k == "err" {
+                continue
+            }
+            fields = append(fields, zap.Reflect(k,v))
+        }
+        {{end}}
+
+		m.logger.Debug("{{$method.Name}}.Response", fields...)
+
+        return {{ $method.ResultsNames }}
+    }
+{{end}}
diff --git a/assets/templates/middleware/error_log b/assets/templates/middleware/error_log
new file mode 100755
index 0000000000000000000000000000000000000000..9455e907b738801eb7f2d43d428d98cc620370a0
--- /dev/null
+++ b/assets/templates/middleware/error_log
@@ -0,0 +1,40 @@
+import (
+  "io"
+  "time"
+
+  "go.uber.org/zap"
+)
+
+{{ $funcName := (or .Vars.FuncName ("ErrorLoggingMiddleware")) }}
+{{ $decorator := (or .Vars.DecoratorName ("errorLoggingMiddleware")) }}
+
+// {{$decorator}} implements {{.Interface.Type}} that is instrumented with logging
+type {{$decorator}} struct {
+  logger *zap.Logger
+  next {{.Interface.Type}}
+}
+
+// {{$funcName}} instruments an implementation of the {{.Interface.Type}} with simple logging
+func {{$funcName}}(logger *zap.Logger) Middleware {
+  return func(next {{.Interface.Type}}) {{.Interface.Type}} {
+    return &{{$decorator}}{
+      next: next,
+      logger: logger,
+    }
+  }
+}
+
+{{range $method := .Interface.Methods}}
+    func (m *{{$decorator}}) {{$method.Declaration}} {
+        logger := m.logger
+        {{- if $method.ReturnsError}}
+            defer func() {
+                if err != nil {
+      		        logger.Warn("response error", zap.Error(err))
+      		    }
+      	    }()
+        {{end -}}
+
+        {{ $method.Pass "m.next." }}
+    }
+{{end}}
diff --git a/assets/templates/middleware/middleware b/assets/templates/middleware/middleware
new file mode 100755
index 0000000000000000000000000000000000000000..89877774c933840c2bdd569f2beed8105588aae2
--- /dev/null
+++ b/assets/templates/middleware/middleware
@@ -0,0 +1,21 @@
+import (
+	"go.uber.org/zap"
+)
+
+type Middleware func({{.Interface.Type}}) {{.Interface.Type}}
+
+
+func WithLog(s {{.Interface.Type}}, logger *zap.Logger, log_access bool) {{.Interface.Type}} {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("{{ .Interface.Name }}")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
+
diff --git a/assets/templates/middleware/recovery b/assets/templates/middleware/recovery
new file mode 100644
index 0000000000000000000000000000000000000000..a84fa3f913e885a1c9b8f1ed71848856137a92fe
--- /dev/null
+++ b/assets/templates/middleware/recovery
@@ -0,0 +1,38 @@
+import (
+	"go.uber.org/zap"
+)
+
+{{ $funcName := (or .Vars.FuncName ("RecoveringMiddleware")) }}
+{{ $decorator := (or .Vars.DecoratorName ("recoveringMiddleware")) }}
+
+// {{$decorator}} implements {{.Interface.Type}} that is instrumented with logging
+type {{$decorator}} struct {
+  logger *zap.Logger
+  next {{.Interface.Type}}
+}
+
+// {{$funcName}} instruments an implementation of the {{.Interface.Type}} with simple logging
+func {{$funcName}}(logger *zap.Logger) Middleware {
+  return func(next {{.Interface.Type}}) {{.Interface.Type}} {
+    return &{{$decorator}}{
+      next: next,
+      logger: logger,
+    }
+  }
+}
+
+{{range $method := .Interface.Methods}}
+func (m *{{$decorator}}) {{$method.Declaration}} {
+    logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			{{- if $method.ReturnsError}}
+			err = fmt.Errorf("%v", r)
+			{{end -}}
+		}
+	}()
+
+	{{ $method.Pass "m.next." }}
+}
+{{end}}
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 14a85cd72aebd444fa384bc1788849052a937c4c..c4bc532e1b85714da9e5a283f5b60f65748fa1f5 100644
--- a/go.mod
+++ b/go.mod
@@ -11,31 +11,44 @@ require (
 	github.com/hashicorp/golang-lru v0.5.4
 	github.com/json-iterator/go v1.1.12
 	github.com/mitchellh/mapstructure v1.4.2
+	github.com/nats-io/nats.go v1.23.0
 	github.com/pkg/errors v0.9.1
 	github.com/rs/xid v1.4.0
 	github.com/stretchr/testify v1.8.0
 	go.mongodb.org/mongo-driver v1.11.4
 	go.uber.org/zap v1.19.1
-	golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d
-	golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
-	google.golang.org/grpc v1.45.0
-	google.golang.org/protobuf v1.28.0
+	golang.org/x/crypto v0.8.0
+	golang.org/x/net v0.9.0
+	google.golang.org/grpc v1.54.0
+	google.golang.org/protobuf v1.28.1
 	gopkg.in/yaml.v3 v3.0.1
 )
 
 require (
+	github.com/Masterminds/goutils v1.1.1 // indirect
+	github.com/Masterminds/semver/v3 v3.2.1 // indirect
+	github.com/Masterminds/sprig/v3 v3.2.3 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/go-kit/log v0.2.0 // indirect
 	github.com/go-logfmt/logfmt v0.5.1 // indirect
 	github.com/golang/snappy v0.0.1 // indirect
-	github.com/google/go-cmp v0.5.7 // indirect
+	github.com/google/uuid v1.3.0 // indirect
 	github.com/gosimple/unidecode v1.0.1 // indirect
 	github.com/hashicorp/errwrap v1.0.0 // indirect
+	github.com/hexdigest/gowrap v1.3.2 // indirect
+	github.com/huandu/xstrings v1.4.0 // indirect
+	github.com/imdario/mergo v0.3.15 // indirect
 	github.com/klauspost/compress v1.13.6 // indirect
+	github.com/mitchellh/copystructure v1.2.0 // indirect
+	github.com/mitchellh/reflectwalk v1.0.2 // indirect
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect
+	github.com/nats-io/nkeys v0.3.0 // indirect
+	github.com/nats-io/nuid v1.0.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/shopspring/decimal v1.3.1 // indirect
+	github.com/spf13/cast v1.5.0 // indirect
 	github.com/stretchr/objx v0.4.0 // indirect
 	github.com/xdg-go/pbkdf2 v1.0.0 // indirect
 	github.com/xdg-go/scram v1.1.1 // indirect
@@ -43,8 +56,10 @@ require (
 	github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
 	go.uber.org/atomic v1.9.0 // indirect
 	go.uber.org/multierr v1.7.0 // indirect
-	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
-	golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect
-	golang.org/x/text v0.3.7 // indirect
-	google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 // indirect
+	golang.org/x/mod v0.10.0 // indirect
+	golang.org/x/sync v0.1.0 // indirect
+	golang.org/x/sys v0.7.0 // indirect
+	golang.org/x/text v0.9.0 // indirect
+	golang.org/x/tools v0.8.0 // indirect
+	google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
 )
diff --git a/go.sum b/go.sum
index 511239a6e719ed8e4be2d67f6fc16c582aad70ef..53493f56d5ddbe17ec5350541663552f0dcb311c 100644
--- a/go.sum
+++ b/go.sum
@@ -1,86 +1,59 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
+github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
+github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
 github.com/antonmedv/expr v1.9.0 h1:j4HI3NHEdgDnN9p6oI6Ndr0G5QryMY0FNxT4ONrFDGU=
 github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8=
 github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
 github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
 github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4=
 github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
 github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw=
 github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
 github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
 github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
 github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
 github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/gosimple/slug v1.13.1 h1:bQ+kpX9Qa6tHRaK+fZR0A0M2Kd7Pa5eHPPsb1JpHD+Q=
 github.com/gosimple/slug v1.13.1/go.mod h1:UiRaFH+GEilHstLUmcBgWcI42viBN7mAb818JrYOeFQ=
 github.com/gosimple/unidecode v1.0.1 h1:hZzFTMMqSswvf0LBJZCZgThIZrpDHFXux9KeGmn6T/o=
 github.com/gosimple/unidecode v1.0.1/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
 github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
 github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
 github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
 github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
 github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hexdigest/gowrap v1.3.2 h1:ZDhDFhrbAHYRdt9ZnULKZyggC/3+W9EpfX6R8DjlggY=
+github.com/hexdigest/gowrap v1.3.2/go.mod h1:g8N2jI4n9AKrf843erksNTrt4sdkG+TGVfhWe8dWrJQ=
+github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
+github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
+github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
 github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
 github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
 github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
@@ -99,8 +72,15 @@ github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1
 github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
 github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
 github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
 github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
 github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -108,24 +88,36 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
 github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
 github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0=
 github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/nats-io/jwt/v2 v2.0.3 h1:i/O6cmIsjpcQyWDYNcq2JyZ3/VTF8SJ4JWluI5OhpvI=
+github.com/nats-io/nats-server/v2 v2.5.0 h1:wsnVaaXH9VRSg+A2MVg5Q727/CqxnmPLGFQ3YZYKTQg=
+github.com/nats-io/nats.go v1.23.0 h1:lR28r7IX44WjYgdiKz9GmUeW0uh/m33uD3yEjLZ2cOE=
+github.com/nats-io/nats.go v1.23.0/go.mod h1:ki/Scsa23edbh8IRZbCuNXR9TDcbvfaSijKtaqQgw+Q=
+github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
+github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
+github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
 github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84=
 github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
 github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
 github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
+github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
 github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -144,9 +136,9 @@ github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgk
 github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
 github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
 go.mongodb.org/mongo-driver v1.11.4 h1:4ayjakA013OdpGyL2K3ZqylTac/rMjrJOMZ1EHizXas=
 go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
 go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
 go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
@@ -159,115 +151,94 @@ go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
 go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
+golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
+golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
 golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
 golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw=
-golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
 golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
+golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w=
-google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
-google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
+google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
 gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/pkg/clients/middleware/caching_middleware.go b/pkg/clients/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..1733c0de4a8dde8f011d46b2b77005f8924e60bb
--- /dev/null
+++ b/pkg/clients/middleware/caching_middleware.go
@@ -0,0 +1,165 @@
+package middleware
+
+import (
+	"context"
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	service "git.perx.ru/perxis/perxis-go/pkg/clients"
+)
+
+func makeKey(ss ...string) string {
+	return strings.Join(ss, "-")
+}
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Clients) service.Clients {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Clients
+}
+
+func (m cachingMiddleware) Create(ctx context.Context, client *service.Client) (cl *service.Client, err error) {
+
+	cl, err = m.next.Create(ctx, client)
+	if err == nil {
+		m.cache.Remove(cl.SpaceID)
+	}
+	return cl, err
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, spaceId string, id string) (cl *service.Client, err error) {
+
+	key := makeKey(spaceId, id)
+	value, e := m.cache.Get(key)
+	if e == nil {
+		return value.(*service.Client), err
+	}
+	cl, err = m.next.Get(ctx, spaceId, id)
+	if err == nil {
+		m.cache.Set(key, cl)
+		for _, key := range keysFromIdentities(spaceId, cl) {
+			m.cache.Set(key, cl)
+		}
+	}
+	return cl, err
+}
+
+func (m cachingMiddleware) GetBy(ctx context.Context, spaceId string, params *service.GetByParams) (cl *service.Client, err error) {
+	if params == nil {
+		return m.next.GetBy(ctx, spaceId, params)
+	}
+
+	key := getIdentKey(spaceId, params)
+	value, e := m.cache.Get(key)
+	if e == nil {
+		return value.(*service.Client), err
+	}
+	cl, err = m.next.GetBy(ctx, spaceId, params)
+	if err == nil {
+		m.cache.Set(makeKey(spaceId, cl.ID), cl)
+		for _, key := range keysFromIdentities(spaceId, cl) {
+			m.cache.Set(key, cl)
+		}
+	}
+	return cl, err
+}
+
+func (m cachingMiddleware) List(ctx context.Context, spaceId string) (clients []*service.Client, err error) {
+
+	value, e := m.cache.Get(spaceId)
+	if e == nil {
+		return value.([]*service.Client), err
+	}
+	clients, err = m.next.List(ctx, spaceId)
+	if err == nil {
+		m.cache.Set(spaceId, clients)
+	}
+	return clients, err
+}
+
+func (m cachingMiddleware) Update(ctx context.Context, client *service.Client) (err error) {
+
+	err = m.next.Update(ctx, client)
+
+	if err == nil {
+		m.cache.Remove(client.SpaceID)
+		value, e := m.cache.Get(makeKey(client.SpaceID, client.ID))
+		if e == nil {
+			client := value.(*service.Client)
+			m.cache.Remove(makeKey(client.SpaceID, client.ID))
+			for _, key := range keysFromIdentities(client.SpaceID, client) {
+				m.cache.Remove(key)
+			}
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, spaceId string, id string) (err error) {
+
+	err = m.next.Delete(ctx, spaceId, id)
+	if err == nil {
+		value, e := m.cache.Get(makeKey(spaceId, id))
+		if e == nil {
+			client := value.(*service.Client)
+			m.cache.Remove(makeKey(client.SpaceID, client.ID))
+			for _, key := range keysFromIdentities(client.SpaceID, client) {
+				m.cache.Remove(key)
+			}
+		}
+		m.cache.Remove(spaceId)
+	}
+	return err
+}
+
+func (m cachingMiddleware) Enable(ctx context.Context, spaceId string, id string, enable bool) (err error) {
+
+	err = m.next.Enable(ctx, spaceId, id, enable)
+	if err == nil {
+		value, e := m.cache.Get(makeKey(spaceId, id))
+		if e == nil {
+			client := value.(*service.Client)
+			m.cache.Remove(makeKey(client.SpaceID, client.ID))
+			for _, key := range keysFromIdentities(client.SpaceID, client) {
+				m.cache.Remove(key)
+			}
+		}
+		m.cache.Remove(spaceId)
+	}
+	return err
+}
+
+func keysFromIdentities(spaceID string, client *service.Client) []string {
+	res := make([]string, 0)
+	if client.APIKey != nil && client.APIKey.Key != "" {
+		res = append(res, makeKey(spaceID, "api-key", client.APIKey.Key))
+	}
+	if client.TLS != nil && client.TLS.Subject != "" {
+		res = append(res, makeKey(spaceID, "tls", client.TLS.Subject))
+	}
+	if client.OAuth != nil && client.OAuth.ClientID != "" {
+		res = append(res, makeKey(spaceID, "oauth", client.OAuth.ClientID))
+	}
+	return res
+}
+
+func getIdentKey(spaceID string, params *service.GetByParams) string {
+	switch {
+	case params.APIKey != "":
+		return makeKey(spaceID, "api-key", params.APIKey)
+	case params.TLSSubject != "":
+		return makeKey(spaceID, "tls", params.TLSSubject)
+	case params.OAuthClientID != "":
+		return makeKey(spaceID, "oauth", params.OAuthClientID)
+	default:
+		return ""
+	}
+}
diff --git a/pkg/clients/middleware/caching_middleware_test.go b/pkg/clients/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b842e1183a5425a4b3aec8d59b0720e5627767ed
--- /dev/null
+++ b/pkg/clients/middleware/caching_middleware_test.go
@@ -0,0 +1,382 @@
+package middleware
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/clients"
+	csmocks "git.perx.ru/perxis/perxis-go/pkg/clients/mocks"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestClientsCache(t *testing.T) {
+
+	const (
+		cltID    = "cltID"
+		spaceID  = "spaceID"
+		clientID = "123@client"
+		size     = 5
+		ttl      = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	ctx := context.Background()
+
+	t.Run("Get from cache", func(t *testing.T) {
+		cs := &csmocks.Clients{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+		cs.On("Get", mock.Anything, spaceID, cltID).Return(&clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}, nil).Once()
+
+		v1, err := svc.Get(ctx, spaceID, cltID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, spaceID, cltID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кэша, после повторного запроса.")
+
+		v3, err := svc.GetBy(ctx, spaceID, &clients.GetByParams{OAuthClientID: clientID})
+		require.NoError(t, err)
+		assert.Same(t, v2, v3, "Ожидается получение объекта из кэша при запросе по ClientID.")
+
+		cs.AssertExpectations(t)
+	})
+
+	t.Run("GetBy from cache", func(t *testing.T) {
+		cs := &csmocks.Clients{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+		cs.On("GetBy", mock.Anything, spaceID, &clients.GetByParams{OAuthClientID: clientID}).Return(&clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}, nil).Once()
+
+		v1, err := svc.GetBy(ctx, spaceID, &clients.GetByParams{OAuthClientID: clientID})
+		require.NoError(t, err)
+
+		v2, err := svc.GetBy(ctx, spaceID, &clients.GetByParams{OAuthClientID: clientID})
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кэша, после повторного запроса.")
+
+		v3, err := svc.Get(ctx, spaceID, cltID)
+		require.NoError(t, err)
+		assert.Same(t, v2, v3, "Ожидается получение объекта из кэша, после запроса Get.")
+
+		cs.AssertExpectations(t)
+	})
+
+	t.Run("List", func(t *testing.T) {
+		cs := &csmocks.Clients{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+		cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}}, nil).Once()
+
+		vl1, err := svc.List(ctx, spaceID)
+		require.NoError(t, err)
+
+		vl2, err := svc.List(ctx, spaceID)
+		require.NoError(t, err)
+		assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+
+		cs.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+
+		t.Run("After Update", func(t *testing.T) {
+			cs := &csmocks.Clients{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			cs.On("Get", mock.Anything, spaceID, cltID).Return(&clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}, nil).Once()
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			v3, err := svc.GetBy(ctx, spaceID, &clients.GetByParams{OAuthClientID: clientID})
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кэша по ClientID.")
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Len(t, vl2, 1)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+
+			cs.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+
+			err = svc.Update(ctx, &clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_2", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}})
+			require.NoError(t, err)
+
+			cs.On("Get", mock.Anything, spaceID, cltID).Return(&clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_2", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}, nil).Once()
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, Name: "client_2", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+
+			v4, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v4, "Ожидает что после обновления объект был удален из кэша и будет запрошен заново из сервиса.")
+
+			v5, err := svc.GetBy(ctx, spaceID, &clients.GetByParams{OAuthClientID: clientID})
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v5)
+			assert.Same(t, v4, v5, "Ожидается что после обновления объект был удален из кеша и после запроса Get в кеш попал объект запрошенный заново из сервиса.")
+
+			cs.AssertExpectations(t)
+		})
+
+		t.Run("After Update(List)", func(t *testing.T) {
+			cs := &csmocks.Clients{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Len(t, vl2, 1)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+
+			cs.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+
+			err = svc.Update(ctx, &clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_2", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}})
+			require.NoError(t, err)
+
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, Name: "client_2", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+
+			cs.AssertExpectations(t)
+		})
+
+		t.Run("After Delete", func(t *testing.T) {
+			cs := &csmocks.Clients{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			cs.On("Get", mock.Anything, spaceID, cltID).Return(&clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}, nil).Once()
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			v3, err := svc.GetBy(ctx, spaceID, &clients.GetByParams{OAuthClientID: clientID})
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кэша по ClientID.")
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+
+			cs.On("Delete", mock.Anything, spaceID, cltID).Return(nil).Once()
+
+			err = svc.Delete(ctx, spaceID, cltID)
+			require.NoError(t, err)
+
+			cs.On("Get", mock.Anything, spaceID, cltID).Return(nil, errNotFound).Once()
+			cs.On("GetBy", mock.Anything, spaceID, &clients.GetByParams{OAuthClientID: clientID}).Return(nil, errNotFound).Once()
+			cs.On("List", mock.Anything, spaceID).Return(nil, errNotFound).Once()
+
+			_, err = svc.Get(ctx, spaceID, cltID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается что после удаление из хранилища объект был удален из кэша и получена ошибка из сервиса.")
+
+			_, err = svc.GetBy(ctx, spaceID, &clients.GetByParams{OAuthClientID: clientID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается что после удаление из хранилища объект был удален из кэша и получена ошибка из сервиса.")
+
+			_, err = svc.List(ctx, spaceID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается что после удаление из хранилища объекты были удалены из кэша.")
+
+			cs.AssertExpectations(t)
+		})
+
+		t.Run("After Delete(List)", func(t *testing.T) {
+			cs := &csmocks.Clients{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+
+			cs.On("Delete", mock.Anything, spaceID, cltID).Return(nil).Once()
+
+			err = svc.Delete(ctx, spaceID, cltID)
+			require.NoError(t, err)
+
+			cs.On("List", mock.Anything, spaceID).Return(nil, errNotFound).Once()
+
+			_, err = svc.List(ctx, spaceID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается что после удаление из хранилища объекты были удалены из кэша.")
+
+			cs.AssertExpectations(t)
+		})
+
+		t.Run("After Create", func(t *testing.T) {
+			cs := &csmocks.Clients{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, Name: "client_1"}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+			assert.Len(t, vl2, 1, "Ожидается получение объектов из кэша.")
+
+			cs.On("Create", mock.Anything, mock.Anything).Return(&clients.Client{ID: "cltID2", SpaceID: spaceID, Name: "client_2"}, nil).Once()
+
+			_, err = svc.Create(ctx, &clients.Client{ID: "cltID2", SpaceID: spaceID, Name: "client_2"})
+			require.NoError(t, err)
+
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, Name: "client_1"}, {ID: "cltID2", SpaceID: spaceID, Name: "client_2"}}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Len(t, vl3, 2, "Ожидается что после создания нового объекта кеш будет очищен и объекты запрошены заново из сервиса.")
+
+			cs.AssertExpectations(t)
+		})
+
+		t.Run("After Enable", func(t *testing.T) {
+			cs := &csmocks.Clients{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			tr := true
+			cs.On("Get", mock.Anything, spaceID, cltID).Return(&clients.Client{ID: cltID, SpaceID: spaceID, OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}, Disabled: &tr}, nil).Once()
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}, Disabled: &tr}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			v3, err := svc.GetBy(ctx, spaceID, &clients.GetByParams{OAuthClientID: clientID})
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кэша по ClientID.")
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+
+			cs.On("Enable", mock.Anything, spaceID, cltID, tr).Return(nil).Once()
+
+			err = svc.Enable(ctx, spaceID, cltID, tr)
+			require.NoError(t, err)
+
+			fl := false
+			cs.On("Get", mock.Anything, spaceID, cltID).Return(&clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}, Disabled: &fl}, nil).Once()
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}, Disabled: &fl}}, nil).Once()
+
+			v4, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v4, "Ожидается что после активации объект был удален из кэша и запрошен у сервиса.")
+
+			v5, err := svc.GetBy(ctx, spaceID, &clients.GetByParams{OAuthClientID: clientID})
+			assert.NotSame(t, v3, v5, "Ожидается что после активации объект был удален из кеша и после запроса Get в кеш попал объект запрошенный заново из сервиса.")
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидается что после активации объекта, кеш будет очищен и объекты будут запрошены заново из сервиса.")
+
+			cs.AssertExpectations(t)
+		})
+
+		t.Run("After Enable(List)", func(t *testing.T) {
+			cs := &csmocks.Clients{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			tr := true
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}, Disabled: &tr}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша, после повторного запроса.")
+
+			cs.On("Enable", mock.Anything, spaceID, cltID, tr).Return(nil).Once()
+
+			err = svc.Enable(ctx, spaceID, cltID, tr)
+			require.NoError(t, err)
+
+			fl := false
+			cs.On("List", mock.Anything, spaceID).Return([]*clients.Client{{ID: cltID, SpaceID: spaceID, OAuth: &clients.OAuth{ClientID: clientID, AuthID: "authID"}, Disabled: &fl}}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидается что после активации объекта, кеш будет очищен и объекты будут запрошены заново из сервиса.")
+
+			cs.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			cs := &csmocks.Clients{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			cs.On("Get", mock.Anything, spaceID, cltID).Return(&clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID}}, nil).Once()
+			cs.On("Get", mock.Anything, spaceID, cltID).Return(&clients.Client{ID: cltID, SpaceID: spaceID, Name: "client_1", OAuth: &clients.OAuth{ClientID: clientID}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+			v2, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша после повторного запроса.")
+
+			time.Sleep(2 * ttl)
+
+			v3, err := svc.Get(ctx, spaceID, cltID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается что элемент был удален из кэша по истечению ttl и будет запрошен заново из сервиса.")
+
+			cs.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/clients/middleware/error_logging_middleware.go b/pkg/clients/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b96827a0c620ad1ca1aa5aaf6b93a821af6279d
--- /dev/null
+++ b/pkg/clients/middleware/error_logging_middleware.go
@@ -0,0 +1,100 @@
+package middleware
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/clients -i Clients -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/clients"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements clients.Clients that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   clients.Clients
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the clients.Clients with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next clients.Clients) clients.Clients {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, client *clients.Client) (created *clients.Client, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, client)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, spaceId string, id string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, spaceId, id)
+}
+
+func (m *errorLoggingMiddleware) Enable(ctx context.Context, spaceId string, id string, enable bool) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Enable(ctx, spaceId, id, enable)
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, spaceId string, id string) (client *clients.Client, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, spaceId, id)
+}
+
+func (m *errorLoggingMiddleware) GetBy(ctx context.Context, spaceId string, params *clients.GetByParams) (client *clients.Client, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.GetBy(ctx, spaceId, params)
+}
+
+func (m *errorLoggingMiddleware) List(ctx context.Context, spaceId string) (clients []*clients.Client, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.List(ctx, spaceId)
+}
+
+func (m *errorLoggingMiddleware) Update(ctx context.Context, client *clients.Client) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Update(ctx, client)
+}
diff --git a/pkg/clients/middleware/logging_middleware.go b/pkg/clients/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..ef3ea5947a637bfa9caffa3af64f1f7cd2b8e019
--- /dev/null
+++ b/pkg/clients/middleware/logging_middleware.go
@@ -0,0 +1,288 @@
+package middleware
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/clients -i Clients -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/clients"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements clients.Clients that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   clients.Clients
+}
+
+// LoggingMiddleware instruments an implementation of the clients.Clients with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next clients.Clients) clients.Clients {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, client *clients.Client) (created *clients.Client, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"client": client} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	created, err = m.next.Create(ctx, client)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"created": created,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return created, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, spaceId string, id string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"id":      id} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, spaceId, id)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Enable(ctx context.Context, spaceId string, id string, enable bool) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"id":      id,
+		"enable":  enable} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Enable.Request", fields...)
+
+	err = m.next.Enable(ctx, spaceId, id, enable)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Enable.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, spaceId string, id string) (client *clients.Client, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"id":      id} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	client, err = m.next.Get(ctx, spaceId, id)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"client": client,
+		"err":    err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return client, err
+}
+
+func (m *loggingMiddleware) GetBy(ctx context.Context, spaceId string, params *clients.GetByParams) (client *clients.Client, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"params":  params} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("GetBy.Request", fields...)
+
+	client, err = m.next.GetBy(ctx, spaceId, params)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"client": client,
+		"err":    err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("GetBy.Response", fields...)
+
+	return client, err
+}
+
+func (m *loggingMiddleware) List(ctx context.Context, spaceId string) (clients []*clients.Client, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Request", fields...)
+
+	clients, err = m.next.List(ctx, spaceId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"clients": clients,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Response", fields...)
+
+	return clients, err
+}
+
+func (m *loggingMiddleware) Update(ctx context.Context, client *clients.Client) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"client": client} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Request", fields...)
+
+	err = m.next.Update(ctx, client)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Response", fields...)
+
+	return err
+}
diff --git a/pkg/clients/middleware/middleware.go b/pkg/clients/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..a49c9b3ebb0c041c23178d457ea5ddf2d2357d91
--- /dev/null
+++ b/pkg/clients/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package middleware
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/clients -i Clients -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/clients"
+	"go.uber.org/zap"
+)
+
+type Middleware func(clients.Clients) clients.Clients
+
+func WithLog(s clients.Clients, logger *zap.Logger, log_access bool) clients.Clients {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Clients")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/clients/middleware/recovering_middleware.go b/pkg/clients/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..2406ca994112a148e6a204589ee1844a782922fc
--- /dev/null
+++ b/pkg/clients/middleware/recovering_middleware.go
@@ -0,0 +1,115 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package middleware
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/clients -i Clients -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/clients"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements clients.Clients that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   clients.Clients
+}
+
+// RecoveringMiddleware instruments an implementation of the clients.Clients with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next clients.Clients) clients.Clients {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, client *clients.Client) (created *clients.Client, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, client)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, spaceId string, id string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, spaceId, id)
+}
+
+func (m *recoveringMiddleware) Enable(ctx context.Context, spaceId string, id string, enable bool) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Enable(ctx, spaceId, id, enable)
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, spaceId string, id string) (client *clients.Client, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, spaceId, id)
+}
+
+func (m *recoveringMiddleware) GetBy(ctx context.Context, spaceId string, params *clients.GetByParams) (client *clients.Client, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.GetBy(ctx, spaceId, params)
+}
+
+func (m *recoveringMiddleware) List(ctx context.Context, spaceId string) (clients []*clients.Client, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.List(ctx, spaceId)
+}
+
+func (m *recoveringMiddleware) Update(ctx context.Context, client *clients.Client) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Update(ctx, client)
+}
diff --git a/pkg/collaborators/middleware/caching_middleware.go b/pkg/collaborators/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..f57995acf8387c80edc622313ba4729ae11bc6f6
--- /dev/null
+++ b/pkg/collaborators/middleware/caching_middleware.go
@@ -0,0 +1,88 @@
+package service
+
+import (
+	"context"
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	service "git.perx.ru/perxis/perxis-go/pkg/collaborators"
+)
+
+func makeKey(ss ...string) string {
+	return strings.Join(ss, "-")
+}
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Collaborators) service.Collaborators {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Collaborators
+}
+
+func (m cachingMiddleware) Set(ctx context.Context, spaceId, subject, role string) (err error) {
+
+	err = m.next.Set(ctx, spaceId, subject, role)
+	if err == nil {
+		m.cache.Remove(spaceId)
+		m.cache.Remove(subject)
+	}
+	return err
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, spaceId, subject string) (role string, err error) {
+
+	key := makeKey(spaceId, subject)
+	value, e := m.cache.Get(key)
+	if e == nil {
+		return value.(string), err
+	}
+	role, err = m.next.Get(ctx, spaceId, subject)
+	if err == nil {
+		m.cache.Set(key, role)
+	}
+	return role, err
+}
+
+func (m cachingMiddleware) Remove(ctx context.Context, spaceId, subject string) (err error) {
+
+	err = m.next.Remove(ctx, spaceId, subject)
+	if err == nil {
+		m.cache.Remove(makeKey(spaceId, subject))
+		m.cache.Remove(spaceId)
+		m.cache.Remove(subject)
+	}
+	return err
+}
+
+func (m cachingMiddleware) ListCollaborators(ctx context.Context, spaceId string) (collaborators []*service.Collaborator, err error) {
+
+	value, e := m.cache.Get(spaceId)
+	if e == nil {
+		return value.([]*service.Collaborator), err
+	}
+	collaborators, err = m.next.ListCollaborators(ctx, spaceId)
+	if err == nil {
+		m.cache.Set(spaceId, collaborators)
+	}
+	return collaborators, err
+}
+
+func (m cachingMiddleware) ListSpaces(ctx context.Context, subject string) (collaborators []*service.Collaborator, err error) {
+
+	value, e := m.cache.Get(subject)
+	if e == nil {
+		return value.([]*service.Collaborator), err
+	}
+	collaborators, err = m.next.ListSpaces(ctx, subject)
+	if err == nil {
+		m.cache.Set(subject, collaborators)
+	}
+	return collaborators, err
+}
diff --git a/pkg/collaborators/middleware/caching_middleware_test.go b/pkg/collaborators/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..da1d6e842f542e1c664f988f38c4797ea31c38f1
--- /dev/null
+++ b/pkg/collaborators/middleware/caching_middleware_test.go
@@ -0,0 +1,190 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/collaborators"
+	csmocks "git.perx.ru/perxis/perxis-go/pkg/collaborators/mocks"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestCollaboratorsCache(t *testing.T) {
+
+	const (
+		userID    = "userID"
+		spaceID   = "spaceID"
+		spaceRole = "spaceRole"
+		size      = 5
+		ttl       = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	ctx := context.Background()
+
+	t.Run("Get from cache", func(t *testing.T) {
+		cs := &csmocks.Collaborators{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+		cs.On("Get", mock.Anything, spaceID, userID).Return(spaceRole, nil).Once()
+
+		_, err := svc.Get(ctx, spaceID, userID)
+		require.NoError(t, err)
+
+		rl, err := svc.Get(ctx, spaceID, userID)
+		require.NoError(t, err)
+		assert.Equal(t, spaceRole, rl)
+
+		cs.AssertExpectations(t)
+	})
+
+	t.Run("ListCollaborators from cache", func(t *testing.T) {
+		cs := &csmocks.Collaborators{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+		cs.On("ListCollaborators", mock.Anything, spaceID).Return([]*collaborators.Collaborator{{SpaceID: spaceID, Subject: userID, Role: spaceRole}}, nil).Once()
+
+		v1, err := svc.ListCollaborators(ctx, spaceID)
+		require.NoError(t, err)
+		v2, err := svc.ListCollaborators(ctx, spaceID)
+		require.NoError(t, err)
+		assert.Same(t, v1[0], v2[0], "Ожидается получение объектов из кэша при повторном запросе.")
+
+		cs.AssertExpectations(t)
+	})
+
+	t.Run("ListSpaces from cache", func(t *testing.T) {
+		cs := &csmocks.Collaborators{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+		cs.On("ListSpaces", mock.Anything, userID).Return([]*collaborators.Collaborator{{SpaceID: spaceID, Subject: userID, Role: spaceRole}}, nil).Once()
+
+		v1, err := svc.ListSpaces(ctx, userID)
+		require.NoError(t, err)
+		v2, err := svc.ListSpaces(ctx, userID)
+		require.NoError(t, err)
+		assert.Same(t, v1[0], v2[0], "Ожидается получение объектов из кэша при повторном запросе.")
+
+		cs.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("After Remove", func(t *testing.T) {
+			cs := &csmocks.Collaborators{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			cs.On("Get", mock.Anything, spaceID, userID).Return(spaceRole, nil).Once()
+			cs.On("ListCollaborators", mock.Anything, spaceID).Return([]*collaborators.Collaborator{{SpaceID: spaceID, Subject: userID, Role: spaceRole}}, nil).Once()
+			cs.On("ListSpaces", mock.Anything, userID).Return([]*collaborators.Collaborator{{SpaceID: spaceID, Subject: userID, Role: spaceRole}}, nil).Once()
+
+			_, err := svc.Get(ctx, spaceID, userID)
+			require.NoError(t, err)
+
+			rl, err := svc.Get(ctx, spaceID, userID)
+			require.NoError(t, err)
+			assert.Equal(t, spaceRole, rl, "Ожидается получение данных из кэша.")
+
+			lc1, err := svc.ListCollaborators(ctx, spaceID)
+			require.NoError(t, err)
+			lc2, err := svc.ListCollaborators(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, lc1[0], lc2[0], "Ожидается получение объектов из кэша.")
+
+			ls1, err := svc.ListSpaces(ctx, userID)
+			require.NoError(t, err)
+			ls2, err := svc.ListSpaces(ctx, userID)
+			require.NoError(t, err)
+			assert.Same(t, ls1[0], ls2[0], "Ожидается получение объектов из кэша.")
+
+			cs.On("Remove", mock.Anything, spaceID, userID).Return(nil).Once()
+
+			cs.On("Get", mock.Anything, spaceID, userID).Return("", errNotFound).Once()
+			cs.On("ListCollaborators", mock.Anything, spaceID).Return(nil, errNotFound).Once()
+			cs.On("ListSpaces", mock.Anything, userID).Return(nil, errNotFound).Once()
+
+			err = svc.Remove(ctx, spaceID, userID)
+
+			rl, err = svc.Get(ctx, spaceID, userID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление данных из кеша, и получение ошибки от сервиса")
+			assert.Empty(t, rl)
+
+			lc, err := svc.ListCollaborators(ctx, spaceID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление данных из кеша, и получение ошибки от сервиса")
+			assert.Nil(t, lc)
+
+			ls, err := svc.ListSpaces(ctx, userID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление данных из кеша, и получение ошибки от сервиса")
+			assert.Nil(t, ls)
+
+			cs.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			cs := &csmocks.Collaborators{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(cs)
+
+			cs.On("Get", mock.Anything, spaceID, userID).Return(spaceRole, nil).Once()
+			cs.On("ListCollaborators", mock.Anything, spaceID).Return([]*collaborators.Collaborator{{SpaceID: spaceID, Subject: userID, Role: spaceRole}}, nil).Once()
+			cs.On("ListSpaces", mock.Anything, userID).Return([]*collaborators.Collaborator{{SpaceID: spaceID, Subject: userID, Role: spaceRole}}, nil).Once()
+
+			_, err := svc.Get(ctx, spaceID, userID)
+			require.NoError(t, err)
+
+			rl, err := svc.Get(ctx, spaceID, userID)
+			require.NoError(t, err)
+			assert.Equal(t, spaceRole, rl, "Ожидается получение данных из кэша.")
+
+			lc1, err := svc.ListCollaborators(ctx, spaceID)
+			require.NoError(t, err)
+			lc2, err := svc.ListCollaborators(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, lc1[0], lc2[0], "Ожидается получение объектов из кэша.")
+
+			ls1, err := svc.ListSpaces(ctx, userID)
+			require.NoError(t, err)
+			ls2, err := svc.ListSpaces(ctx, userID)
+			require.NoError(t, err)
+			assert.Same(t, ls1[0], ls2[0], "Ожидается получение объектов из кэша.")
+
+			cs.On("Remove", mock.Anything, spaceID, userID).Return(nil).Once()
+
+			cs.On("Get", mock.Anything, spaceID, userID).Return("", errNotFound).Once()
+			cs.On("ListCollaborators", mock.Anything, spaceID).Return(nil, errNotFound).Once()
+			cs.On("ListSpaces", mock.Anything, userID).Return(nil, errNotFound).Once()
+
+			err = svc.Remove(ctx, spaceID, userID)
+
+			rl, err = svc.Get(ctx, spaceID, userID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление данных из кеша, и получение ошибки от сервиса")
+			assert.Empty(t, rl)
+
+			lc, err := svc.ListCollaborators(ctx, spaceID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление данных из кеша, и получение ошибки от сервиса")
+			assert.Nil(t, lc)
+
+			ls, err := svc.ListSpaces(ctx, userID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление данных из кеша, и получение ошибки от сервиса")
+			assert.Nil(t, ls)
+
+			cs.AssertExpectations(t)
+		})
+	})
+
+}
diff --git a/pkg/collaborators/middleware/error_logging_middleware.go b/pkg/collaborators/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..a45dfd8053e1718fdc139f1792a9f0b1547c08d1
--- /dev/null
+++ b/pkg/collaborators/middleware/error_logging_middleware.go
@@ -0,0 +1,80 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/collaborators -i Collaborators -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/collaborators"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements collaborators.Collaborators that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   collaborators.Collaborators
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the collaborators.Collaborators with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next collaborators.Collaborators) collaborators.Collaborators {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, spaceId string, subject string) (role string, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, spaceId, subject)
+}
+
+func (m *errorLoggingMiddleware) ListCollaborators(ctx context.Context, spaceId string) (collaborators []*collaborators.Collaborator, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.ListCollaborators(ctx, spaceId)
+}
+
+func (m *errorLoggingMiddleware) ListSpaces(ctx context.Context, subject string) (spaces []*collaborators.Collaborator, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.ListSpaces(ctx, subject)
+}
+
+func (m *errorLoggingMiddleware) Remove(ctx context.Context, spaceId string, subject string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Remove(ctx, spaceId, subject)
+}
+
+func (m *errorLoggingMiddleware) Set(ctx context.Context, spaceId string, subject string, role string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Set(ctx, spaceId, subject, role)
+}
diff --git a/pkg/collaborators/middleware/logging_middleware.go b/pkg/collaborators/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..41f541e6cc0e09f2fc6240d585159bda09c10a74
--- /dev/null
+++ b/pkg/collaborators/middleware/logging_middleware.go
@@ -0,0 +1,216 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/collaborators -i Collaborators -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/collaborators"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements collaborators.Collaborators that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   collaborators.Collaborators
+}
+
+// LoggingMiddleware instruments an implementation of the collaborators.Collaborators with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next collaborators.Collaborators) collaborators.Collaborators {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, spaceId string, subject string) (role string, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"subject": subject} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	role, err = m.next.Get(ctx, spaceId, subject)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"role": role,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return role, err
+}
+
+func (m *loggingMiddleware) ListCollaborators(ctx context.Context, spaceId string) (collaborators []*collaborators.Collaborator, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListCollaborators.Request", fields...)
+
+	collaborators, err = m.next.ListCollaborators(ctx, spaceId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"collaborators": collaborators,
+		"err":           err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListCollaborators.Response", fields...)
+
+	return collaborators, err
+}
+
+func (m *loggingMiddleware) ListSpaces(ctx context.Context, subject string) (spaces []*collaborators.Collaborator, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"subject": subject} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListSpaces.Request", fields...)
+
+	spaces, err = m.next.ListSpaces(ctx, subject)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"spaces": spaces,
+		"err":    err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListSpaces.Response", fields...)
+
+	return spaces, err
+}
+
+func (m *loggingMiddleware) Remove(ctx context.Context, spaceId string, subject string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"subject": subject} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Remove.Request", fields...)
+
+	err = m.next.Remove(ctx, spaceId, subject)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Remove.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Set(ctx context.Context, spaceId string, subject string, role string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"subject": subject,
+		"role":    role} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Set.Request", fields...)
+
+	err = m.next.Set(ctx, spaceId, subject, role)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Set.Response", fields...)
+
+	return err
+}
diff --git a/pkg/collaborators/middleware/middleware.go b/pkg/collaborators/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..682fd963ae290298adaec9001b7f60215c80d4db
--- /dev/null
+++ b/pkg/collaborators/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/collaborators -i Collaborators -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/collaborators"
+	"go.uber.org/zap"
+)
+
+type Middleware func(collaborators.Collaborators) collaborators.Collaborators
+
+func WithLog(s collaborators.Collaborators, logger *zap.Logger, log_access bool) collaborators.Collaborators {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Collaborators")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/collaborators/middleware/recovering_middleware.go b/pkg/collaborators/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..84e9dfb59514b1a146c96f0239cd3c8e83d8e7ba
--- /dev/null
+++ b/pkg/collaborators/middleware/recovering_middleware.go
@@ -0,0 +1,91 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/collaborators -i Collaborators -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/collaborators"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements collaborators.Collaborators that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   collaborators.Collaborators
+}
+
+// RecoveringMiddleware instruments an implementation of the collaborators.Collaborators with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next collaborators.Collaborators) collaborators.Collaborators {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, spaceId string, subject string) (role string, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, spaceId, subject)
+}
+
+func (m *recoveringMiddleware) ListCollaborators(ctx context.Context, spaceId string) (collaborators []*collaborators.Collaborator, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.ListCollaborators(ctx, spaceId)
+}
+
+func (m *recoveringMiddleware) ListSpaces(ctx context.Context, subject string) (spaces []*collaborators.Collaborator, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.ListSpaces(ctx, subject)
+}
+
+func (m *recoveringMiddleware) Remove(ctx context.Context, spaceId string, subject string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Remove(ctx, spaceId, subject)
+}
+
+func (m *recoveringMiddleware) Set(ctx context.Context, spaceId string, subject string, role string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Set(ctx, spaceId, subject, role)
+}
diff --git a/pkg/collections/collection.go b/pkg/collections/collection.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b3c5dd310ac8af32ee11e9e18c4aa1a3d4c72f3
--- /dev/null
+++ b/pkg/collections/collection.go
@@ -0,0 +1,180 @@
+package collections
+
+import (
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/permission"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+)
+
+// Config
+type Config struct {
+	SourceSpaceID      string
+	SourceEnvID        string
+	SourceCollectionID string
+	SourceSchema       *schema.Schema
+}
+
+// Access - описывает текущие ограничения на доступ к элементам коллекции для текущего
+// пользователя
+type Access struct {
+	Actions         []permission.Action // Список разрешенных действия с элементами коллекции
+	HiddenFields    []string            // Поля не отображаемые в интерфейсе и не возвращаемые API
+	ReadonlyFields  []string            // Поля недоступные для редактирования и не обновляемые через API
+	WriteonlyFields []string            // Поля отображаемые в интерфейсе, но не возвращаемые в API
+}
+
+func (a Access) Clone() *Access {
+
+	clone := &Access{
+		Actions:         make([]permission.Action, len(a.Actions)),
+		HiddenFields:    make([]string, len(a.HiddenFields)),
+		ReadonlyFields:  make([]string, len(a.ReadonlyFields)),
+		WriteonlyFields: make([]string, len(a.WriteonlyFields)),
+	}
+
+	copy(clone.Actions, a.Actions)
+	copy(clone.HiddenFields, a.HiddenFields)
+	copy(clone.ReadonlyFields, a.ReadonlyFields)
+	copy(clone.WriteonlyFields, a.WriteonlyFields)
+
+	return clone
+}
+
+func (a Access) Can(action permission.Action) bool {
+	for _, act := range a.Actions {
+		if act == action {
+			return true
+		}
+	}
+	return false
+}
+
+type Collection struct {
+	ID      string         `json:"id" bson:"id"`
+	SpaceID string         `json:"spaceId" bson:"-"`
+	EnvID   string         `json:"envId" bson:"-"`
+	Name    string         `json:"name" bson:"name"`
+	Single  *bool          `json:"single" bson:"single,omitempty"` // В коллекции может быть только один документ
+	System  *bool          `json:"system" bson:"system,omitempty"` // Системная коллекция
+	NoData  *bool          `json:"no_data" bson:"no_data"`         // Коллекция не содержит элементы. Схема используется для включения в другие схемы
+	Hidden  bool           `json:"hidden" bson:"hidden"`           // Коллекция скрыта в административном интерфейсе
+	Schema  *schema.Schema `json:"schema" bson:"schema"`
+	Access  *Access        `json:"access" bson:"-"` // Ограничения на доступ к элементам коллекции. Отсутствие объекта означает неограниченный доступ
+
+	// StateInfo отображает состояние коллекции:
+	// - State: идентификатор состояния коллекции (new/preparing/ready/error/changed)
+	// - Info: дополнительная информация о состоянии коллекции (например, если при
+	//   применении схемы к коллекции произошла ошибка)
+	// - StartedAt: время, в которое коллекция перешла в состояние `Preparing`
+	StateInfo *StateInfo `json:"state_info" bson:"state_info,omitempty"` // todo: показывать в интерфейсе как readonly
+
+	// View - Если значение поля непустое, то коллекция является View ("отображением"
+	// части данных другой коллекции согласно View.Filter)
+	View *View `json:"view,omitempty" bson:"view,omitempty"`
+
+	// Tags - список тегов коллекции. Добавляются при отправке событий events
+	Tags []string `json:"tags,omitempty" bson:"tags,omitempty"`
+
+	Config *Config `json:"-" bson:"-"`
+}
+
+type View struct {
+	SpaceID      string `json:"space_id" bson:"space_id"`             // SpaceID оригинальной коллекции
+	EnvID        string `json:"environment_id" bson:"environment_id"` // EnvID оригинальной коллекции
+	CollectionID string `json:"collection_id" bson:"collection_id"`   // CollectionID оригинальной коллекции
+	Filter       string `json:"filter" bson:"filter,omitempty"`       // Правила фильтрации записей оригинальной коллекции
+}
+
+type StateInfo struct {
+	State     State     `json:"state" bson:"state"`
+	Info      string    `json:"info" bson:"info"`
+	StartedAt time.Time `json:"started_at,omitempty" bson:"started_at,omitempty"`
+}
+
+type State int
+
+func (s State) String() string {
+	var state string
+
+	switch s {
+	case StateNew:
+		state = "New"
+	case StatePreparing:
+		state = "Preparing"
+	case StateReady:
+		state = "Ready"
+	case StateError:
+		state = "Error"
+	case StateChanged:
+		state = "Changed"
+	default:
+		state = "Unknown"
+	}
+
+	return state
+}
+
+const (
+	StateNew State = iota
+	StatePreparing
+	StateReady
+	StateError
+	StateChanged
+)
+
+func (c Collection) Clone() *Collection {
+
+	clone := &Collection{
+		ID:      c.ID,
+		SpaceID: c.SpaceID,
+		EnvID:   c.EnvID,
+		Name:    c.Name,
+		NoData:  c.NoData,
+		Hidden:  c.Hidden,
+	}
+
+	if c.Single != nil {
+		single := *c.Single
+		clone.Single = &single
+	}
+	if c.System != nil {
+		system := *c.System
+		clone.System = &system
+	}
+	if c.Schema != nil {
+		clone.Schema = c.Schema.Clone(false)
+	}
+	if c.Access != nil {
+		clone.Access = c.Access.Clone()
+	}
+	if c.StateInfo != nil {
+		info := *c.StateInfo
+		clone.StateInfo = &info
+	}
+	if c.View != nil {
+		view := *c.View
+		clone.View = &view
+	}
+	if c.Config != nil {
+		cfg := *c.Config
+		clone.Config = &cfg
+	}
+	if c.Tags != nil {
+		clone.Tags = append([]string{}, c.Tags...)
+	}
+
+	return clone
+}
+
+func (c Collection) IsSingle() bool {
+	return c.Single != nil && *c.Single
+}
+
+func (c Collection) IsNoData() bool {
+	return c.NoData != nil && *c.NoData
+}
+
+func (c Collection) IsView() bool {
+	return c.View != nil
+}
diff --git a/pkg/collections/middleware/caching_middleware.go b/pkg/collections/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..0bb41c9e28027e8d9c4cfa5e305b91ac76f44165
--- /dev/null
+++ b/pkg/collections/middleware/caching_middleware.go
@@ -0,0 +1,136 @@
+package service
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	service "git.perx.ru/perxis/perxis-go/pkg/collections"
+	envService "git.perx.ru/perxis/perxis-go/pkg/environments"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+)
+
+func makeKey(spaceId, envId, collectionId string, disableSchemaIncludes bool) string {
+	s := spaceId + "-" + envId + "-" + collectionId + "-"
+	if disableSchemaIncludes {
+		s += "1"
+	} else {
+		s += "0"
+	}
+	return s
+}
+
+func CachingMiddleware(cache *cache.Cache, envs envService.Environments) Middleware {
+	return func(next service.Collections) service.Collections {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+			envs:  envs,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Collections
+	envs  envService.Environments
+}
+
+func (m cachingMiddleware) Create(ctx context.Context, collection *service.Collection) (coll *service.Collection, err error) {
+	return m.next.Create(ctx, collection)
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, spaceId string, envId string, collectionId string, options ...*service.GetOptions) (coll *service.Collection, err error) {
+
+	opts := service.MergeGetOptions(options...)
+	value, e := m.cache.Get(makeKey(spaceId, envId, collectionId, opts.DisableSchemaIncludes))
+	if e == nil {
+		return value.(*service.Collection), err
+	}
+	coll, err = m.next.Get(ctx, spaceId, envId, collectionId, options...)
+	if err == nil {
+		env, err := m.envs.Get(ctx, coll.SpaceID, coll.EnvID)
+		if err != nil {
+			return nil, err
+		}
+		m.cache.Set(makeKey(coll.SpaceID, env.ID, coll.ID, opts.DisableSchemaIncludes), coll)
+		for _, al := range env.Aliases {
+			m.cache.Set(makeKey(coll.SpaceID, al, coll.ID, opts.DisableSchemaIncludes), coll)
+		}
+
+	}
+	return coll, err
+}
+
+func (m cachingMiddleware) List(ctx context.Context, spaceId, envId string, filter *service.Filter) (collections []*service.Collection, err error) {
+	return m.next.List(ctx, spaceId, envId, filter)
+}
+
+func (m cachingMiddleware) Update(ctx context.Context, coll *service.Collection) (err error) {
+
+	err = m.next.Update(ctx, coll)
+	if err == nil {
+		env, err := m.envs.Get(ctx, coll.SpaceID, coll.EnvID)
+		if err != nil {
+			return err
+		}
+		m.cache.Remove(makeKey(env.SpaceID, env.ID, coll.ID, true))
+		m.cache.Remove(makeKey(env.SpaceID, env.ID, coll.ID, false))
+		for _, al := range env.Aliases {
+			m.cache.Remove(makeKey(env.SpaceID, al, coll.ID, true))
+			m.cache.Remove(makeKey(env.SpaceID, al, coll.ID, false))
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) SetSchema(ctx context.Context, spaceId, envId, collectionId string, schema *schema.Schema) (err error) {
+	err = m.next.SetSchema(ctx, spaceId, envId, collectionId, schema)
+	if err == nil {
+		env, err := m.envs.Get(ctx, spaceId, envId)
+		if err != nil {
+			return err
+		}
+		m.cache.Remove(makeKey(env.SpaceID, env.ID, collectionId, true))
+		m.cache.Remove(makeKey(env.SpaceID, env.ID, collectionId, false))
+		for _, al := range env.Aliases {
+			m.cache.Remove(makeKey(env.SpaceID, al, collectionId, true))
+			m.cache.Remove(makeKey(env.SpaceID, al, collectionId, false))
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) SetState(ctx context.Context, spaceId, envId, collectionId string, state *service.StateInfo) (err error) {
+	err = m.next.SetState(ctx, spaceId, envId, collectionId, state)
+	if err == nil {
+		env, err := m.envs.Get(ctx, spaceId, envId)
+		if err != nil {
+			return err
+		}
+		m.cache.Remove(makeKey(env.SpaceID, env.ID, collectionId, true))
+		m.cache.Remove(makeKey(env.SpaceID, env.ID, collectionId, false))
+		for _, al := range env.Aliases {
+			m.cache.Remove(makeKey(env.SpaceID, al, collectionId, true))
+			m.cache.Remove(makeKey(env.SpaceID, al, collectionId, false))
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, spaceId string, envId string, collectionId string) (err error) {
+
+	err = m.next.Delete(ctx, spaceId, envId, collectionId)
+	if err == nil {
+		env, err := m.envs.Get(ctx, spaceId, envId)
+		if err != nil {
+			return err
+		}
+		m.cache.Remove(makeKey(env.SpaceID, env.ID, collectionId, true))
+		m.cache.Remove(makeKey(env.SpaceID, env.ID, collectionId, false))
+		for _, al := range env.Aliases {
+			m.cache.Remove(makeKey(env.SpaceID, al, collectionId, true))
+			m.cache.Remove(makeKey(env.SpaceID, al, collectionId, false))
+		}
+	}
+	return err
+}
diff --git a/pkg/collections/middleware/caching_middleware_test.go b/pkg/collections/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..967a75bce7fa7d933918edc9f05600f75a6c0ff5
--- /dev/null
+++ b/pkg/collections/middleware/caching_middleware_test.go
@@ -0,0 +1,458 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/collections"
+	colsmocks "git.perx.ru/perxis/perxis-go/pkg/collections/mocks"
+	"git.perx.ru/perxis/perxis-go/pkg/environments"
+	envmocks "git.perx.ru/perxis/perxis-go/pkg/environments/mocks"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestCollections_Cache(t *testing.T) {
+
+	const (
+		colID    = "colID"
+		spaceID  = "spaceID"
+		envID    = "envId"
+		envAlias = "envAlias"
+		size     = 5
+		ttl      = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	ctx := context.Background()
+
+	t.Run("Get from cache", func(t *testing.T) {
+		col := &colsmocks.Collections{}
+		env := &envmocks.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+		env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+		col.On("Get", mock.Anything, spaceID, envID, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}, nil).Once()
+
+		v1, err := svc.Get(ctx, spaceID, envID, colID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, spaceID, envID, colID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кеша при повторном запросе по ID окружения.")
+
+		v3, err := svc.Get(ctx, spaceID, envAlias, colID)
+		require.NoError(t, err)
+		assert.Same(t, v3, v2, "Ожидается получение объекта из кеша, при запросе того же объекта по alias окружения.")
+
+		env.AssertExpectations(t)
+		col.AssertExpectations(t)
+	})
+
+	t.Run("Get from cache(by Alias)", func(t *testing.T) {
+		col := &colsmocks.Collections{}
+		env := &envmocks.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+		col.On("Get", mock.Anything, spaceID, envAlias, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}, nil).Once()
+		env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+		v1, err := svc.Get(ctx, spaceID, envAlias, colID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, spaceID, envAlias, colID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кеша при повторном запросе по Alias окружения.")
+
+		v3, err := svc.Get(ctx, spaceID, envID, colID)
+		require.NoError(t, err)
+		assert.Same(t, v3, v2, "Ожидается получение объекта из кеша, при запросе того же объекта по ID окружения.")
+
+		env.AssertExpectations(t)
+		col.AssertExpectations(t)
+	})
+
+	t.Run("Get from cache with options", func(t *testing.T) {
+		col := &colsmocks.Collections{}
+		env := &envmocks.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+		env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+		col.On("Get", mock.Anything, spaceID, envID, colID, mock.Anything).Run(func(args mock.Arguments) {
+			require.Len(t, args, 5)
+			opt := args.Get(4).(*collections.GetOptions)
+			assert.True(t, opt.DisableSchemaIncludes)
+		}).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}, nil).Once()
+
+		_, err := svc.Get(ctx, spaceID, envID, colID, []*collections.GetOptions{{DisableSchemaIncludes: true}}...)
+		require.NoError(t, err)
+
+		env.AssertExpectations(t)
+		col.AssertExpectations(t)
+	})
+
+	//t.Run("List from cache", func(t *testing.T) {
+	//	col := &colsmocks.Collections{}
+	//	env := &envmocks.Environments{}
+	//
+	//	svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+	//
+	//	col.On("List", mock.Anything, spaceID, envID).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Once()
+	//	env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+	//
+	//	vl1, err := svc.List(ctx, spaceID, envID, nil)
+	//	require.NoError(t, err)
+	//
+	//	vl2, err := svc.List(ctx, spaceID, envID, nil)
+	//	require.NoError(t, err)
+	//	assert.Len(t, vl2, 1)
+	//	assert.Same(t, vl1[0], vl2[0], "При повторном запросе по ID окружения, ожидается получение списка объектов из кеша.")
+	//
+	//	vl3, err := svc.List(ctx, spaceID, envAlias, nil)
+	//	require.NoError(t, err)
+	//	assert.Len(t, vl3, 1)
+	//	assert.Same(t, vl3[0], vl2[0], "При повторном запросе по Alias окружения, ожидается получение списка объектов из кеша.")
+	//
+	//	env.AssertExpectations(t)
+	//	col.AssertExpectations(t)
+	//})
+
+	t.Run("List", func(t *testing.T) {
+		col := &colsmocks.Collections{}
+		env := &envmocks.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+		col.On("List", mock.Anything, spaceID, envAlias, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Once()
+		col.On("List", mock.Anything, spaceID, envID, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Once()
+
+		_, err := svc.List(ctx, spaceID, envAlias, nil)
+		require.NoError(t, err)
+
+		_, err = svc.List(ctx, spaceID, envID, nil)
+		require.NoError(t, err)
+
+		env.AssertExpectations(t)
+		col.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("After Update", func(t *testing.T) {
+			col := &colsmocks.Collections{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("Get", mock.Anything, spaceID, envID, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envID, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша по ID окружения.")
+
+			v3, err := svc.Get(ctx, spaceID, envAlias, colID)
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кеша по Alias окружения.")
+
+			vl1, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+			err = svc.Update(ctx, &collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "nameUPD"})
+			require.NoError(t, err)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("Get", mock.Anything, spaceID, envID, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "nameUPD"}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envID, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "nameUPD"}}, nil).Once()
+
+			v4, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v4, "Ожидает что элемент после обновления был удален из кэша и будет запрошен заново из сервиса.")
+
+			v5, err := svc.Get(ctx, spaceID, envAlias, colID)
+			require.NoError(t, err)
+			assert.Same(t, v4, v5, "Ожидается получение объекта из кеша по Alias окружения.")
+
+			vl2, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+			assert.NotSame(t, vl1[0], vl2[0], "Ожидает что после обновления элементы будут запрошены заново из сервиса.")
+
+			env.AssertExpectations(t)
+			col.AssertExpectations(t)
+		})
+
+		t.Run("After Update(by Alias)", func(t *testing.T) {
+			col := &colsmocks.Collections{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			//env.On("Get", mock.Anything, spaceID, envAlias).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			col.On("Get", mock.Anything, spaceID, envAlias, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envAlias, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envAlias, colID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envAlias, colID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша по Alias окружения.")
+
+			v3, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кеша по ID окружения.")
+
+			vl1, err := svc.List(ctx, spaceID, envAlias, nil)
+			require.NoError(t, err)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+			err = svc.Update(ctx, &collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "nameUPD"})
+			require.NoError(t, err)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			//env.On("Get", mock.Anything, spaceID, envAlias).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			col.On("Get", mock.Anything, spaceID, envAlias, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "nameUPD"}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envAlias, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "nameUPD"}}, nil).Once()
+
+			v4, err := svc.Get(ctx, spaceID, envAlias, colID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v4, "Ожидает что элемент после обновления был удален из кэша и будет запрошен заново из сервиса.")
+
+			v5, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+			assert.Same(t, v4, v5, "Ожидается получение объекта из кеша по Alias окружения.")
+
+			vl4, err := svc.List(ctx, spaceID, envAlias, nil)
+			require.NoError(t, err)
+			assert.NotSame(t, vl1[0], vl4[0], "Ожидает что после обновления элементы будут запрошены заново из сервиса.")
+
+			env.AssertExpectations(t)
+			col.AssertExpectations(t)
+		})
+
+		t.Run("After Set Schema", func(t *testing.T) {
+			col := &colsmocks.Collections{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Twice()
+			col.On("Get", mock.Anything, spaceID, envID, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envID, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Twice()
+			col.On("List", mock.Anything, spaceID, envAlias, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша по ID окружения.")
+
+			v3, err := svc.Get(ctx, spaceID, envAlias, colID)
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кеша по Alias окружения.")
+
+			vl1, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+			assert.Len(t, vl2, 1)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кеша по ID окружения.")
+
+			vl3, err := svc.List(ctx, spaceID, envAlias, nil)
+			require.NoError(t, err)
+			assert.Len(t, vl2, 1)
+			assert.Equal(t, vl2[0], vl3[0], "Ожидается получение объектов из кеша по Alias окружения.")
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("SetSchema", mock.Anything, spaceID, envID, colID, mock.Anything).Return(nil).Once()
+			err = svc.SetSchema(ctx, spaceID, envID, colID, &schema.Schema{})
+			require.NoError(t, err)
+
+			//env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("Get", mock.Anything, spaceID, envID, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "nameUPD"}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envID, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "nameUPD"}}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envAlias, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "nameUPD"}}, nil).Once()
+
+			v4, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v4, "Ожидает что элемент после обновления схемы был удален из кэша и будет запрошен заново из сервиса.")
+
+			v5, err := svc.Get(ctx, spaceID, envAlias, colID)
+			require.NoError(t, err)
+			assert.Same(t, v4, v5, "Ожидается получение объекта из кеша по Alias окружения.")
+
+			vl4, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+			assert.NotSame(t, vl4[0], vl3[0], "Ожидает что после обновления схемы элементы будут запрошены заново из сервиса.")
+
+			vl5, err := svc.List(ctx, spaceID, envAlias, nil)
+			require.NoError(t, err)
+			assert.Equal(t, vl4[0], vl5[0], "Ожидается получение объектов из кеша по Alias окружения..")
+
+			env.AssertExpectations(t)
+			col.AssertExpectations(t)
+		})
+
+		t.Run("After Delete", func(t *testing.T) {
+			col := &colsmocks.Collections{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Twice()
+			col.On("Get", mock.Anything, spaceID, envID, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envID, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Twice()
+			col.On("List", mock.Anything, spaceID, envAlias, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша по ID окружения.")
+
+			v3, err := svc.Get(ctx, spaceID, envAlias, colID)
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кеша по Alias окружения.")
+
+			vl1, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+			assert.Len(t, vl2, 1)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кеша по ID окружения.")
+
+			vl3, err := svc.List(ctx, spaceID, envAlias, nil)
+			require.NoError(t, err)
+			assert.Len(t, vl2, 1)
+			assert.Equal(t, vl2[0], vl3[0], "Ожидается получение объектов из кеша по Alias окружения.")
+
+			//env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("Delete", mock.Anything, spaceID, envID, colID).Return(nil).Once()
+			err = svc.Delete(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+
+			//env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("Get", mock.Anything, spaceID, envID, colID).Return(nil, errNotFound).Once()
+			col.On("List", mock.Anything, spaceID, envID, mock.Anything).Return([]*collections.Collection{}, nil).Once()
+
+			_, err = svc.Get(ctx, spaceID, envID, colID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидает что элемент был удален из кэша и получена ошибка от сервиса.")
+
+			vl4, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+			assert.Len(t, vl4, 0, "Ожидает что элементы были удалены из кэша.")
+
+			col.On("Get", mock.Anything, spaceID, envAlias, colID).Return(nil, errNotFound).Once()
+
+			_, err = svc.Get(ctx, spaceID, envAlias, colID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидает что элемент был удален из кэша и получена ошибка от сервиса.")
+
+			env.AssertExpectations(t)
+			col.AssertExpectations(t)
+		})
+
+		t.Run("After Create", func(t *testing.T) {
+			col := &colsmocks.Collections{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+			//env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envID, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Twice()
+			col.On("List", mock.Anything, spaceID, envAlias, mock.Anything).Return([]*collections.Collection{{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+			assert.Len(t, vl2, 1)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кеша по ID окружения.")
+
+			vl3, err := svc.List(ctx, spaceID, envAlias, nil)
+			require.NoError(t, err)
+			assert.Len(t, vl2, 1)
+			assert.Equal(t, vl2[0], vl3[0], "Ожидается получение объектов из кеша по Alias окружения.")
+
+			//env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("Create", mock.Anything, mock.Anything).Return(&collections.Collection{ID: "colID2", SpaceID: spaceID, EnvID: envID, Name: "name2"}, nil).Once()
+			_, err = svc.Create(ctx, &collections.Collection{ID: "colID2", SpaceID: spaceID, EnvID: envID, Name: "name2"})
+			require.NoError(t, err)
+
+			//env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envID, mock.Anything).Return([]*collections.Collection{
+				{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"},
+				{ID: "colID2", SpaceID: spaceID, EnvID: envID, Name: "name2"},
+			}, nil).Once()
+			col.On("List", mock.Anything, spaceID, envAlias, mock.Anything).Return([]*collections.Collection{
+				{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"},
+				{ID: "colID2", SpaceID: spaceID, EnvID: envID, Name: "name2"},
+			}, nil).Once()
+
+			vl4, err := svc.List(ctx, spaceID, envID, nil)
+			require.NoError(t, err)
+			assert.Len(t, vl4, 2, "Ожидает что элементы были удалены из кэша и получены заново из сервиса.")
+
+			vl5, err := svc.List(ctx, spaceID, envAlias, nil)
+			require.NoError(t, err)
+			assert.Len(t, vl5, 2)
+			assert.Equal(t, vl4[0], vl5[0], "Ожидается получение объектов из кеша по Alias окружения..")
+
+			env.AssertExpectations(t)
+			col.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			col := &colsmocks.Collections{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), env)(col)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil)
+			col.On("Get", mock.Anything, spaceID, envID, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			time.Sleep(2 * ttl)
+
+			col.On("Get", mock.Anything, spaceID, envID, colID).Return(&collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "name"}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, envID, colID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v2, "Ожидает что элемент был удален из кэша и будет запрошен заново из сервиса.")
+
+			env.AssertExpectations(t)
+			col.AssertExpectations(t)
+		})
+	})
+
+}
diff --git a/pkg/collections/middleware/error_logging_middleware.go b/pkg/collections/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..d1be7f66c8564cc162d93a926652c63d0a670e1d
--- /dev/null
+++ b/pkg/collections/middleware/error_logging_middleware.go
@@ -0,0 +1,101 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/collections -i Collections -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/collections"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements collections.Collections that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   collections.Collections
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the collections.Collections with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next collections.Collections) collections.Collections {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, collection *collections.Collection) (created *collections.Collection, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, collection)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, spaceId string, envId string, collectionId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, spaceId, envId, collectionId)
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, spaceId string, envId string, collectionId string, options ...*collections.GetOptions) (collection *collections.Collection, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, spaceId, envId, collectionId, options...)
+}
+
+func (m *errorLoggingMiddleware) List(ctx context.Context, spaceId string, envId string, filter *collections.Filter) (collections []*collections.Collection, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.List(ctx, spaceId, envId, filter)
+}
+
+func (m *errorLoggingMiddleware) SetSchema(ctx context.Context, spaceId string, envId string, collectionId string, schema *schema.Schema) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.SetSchema(ctx, spaceId, envId, collectionId, schema)
+}
+
+func (m *errorLoggingMiddleware) SetState(ctx context.Context, spaceId string, envId string, collectionId string, state *collections.StateInfo) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.SetState(ctx, spaceId, envId, collectionId, state)
+}
+
+func (m *errorLoggingMiddleware) Update(ctx context.Context, coll *collections.Collection) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Update(ctx, coll)
+}
diff --git a/pkg/collections/middleware/logging_middleware.go b/pkg/collections/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd43cb9ffd3e662241a1bb08e7174cfd1c5677df
--- /dev/null
+++ b/pkg/collections/middleware/logging_middleware.go
@@ -0,0 +1,296 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/collections -i Collections -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/collections"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements collections.Collections that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   collections.Collections
+}
+
+// LoggingMiddleware instruments an implementation of the collections.Collections with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next collections.Collections) collections.Collections {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, collection *collections.Collection) (created *collections.Collection, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":        ctx,
+		"collection": collection} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	created, err = m.next.Create(ctx, collection)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"created": created,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return created, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, spaceId string, envId string, collectionId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, spaceId, envId, collectionId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, spaceId string, envId string, collectionId string, options ...*collections.GetOptions) (collection *collections.Collection, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	collection, err = m.next.Get(ctx, spaceId, envId, collectionId, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"collection": collection,
+		"err":        err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return collection, err
+}
+
+func (m *loggingMiddleware) List(ctx context.Context, spaceId string, envId string, filter *collections.Filter) (collections []*collections.Collection, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"envId":   envId,
+		"filter":  filter} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Request", fields...)
+
+	collections, err = m.next.List(ctx, spaceId, envId, filter)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"collections": collections,
+		"err":         err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Response", fields...)
+
+	return collections, err
+}
+
+func (m *loggingMiddleware) SetSchema(ctx context.Context, spaceId string, envId string, collectionId string, schema *schema.Schema) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"schema":       schema} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("SetSchema.Request", fields...)
+
+	err = m.next.SetSchema(ctx, spaceId, envId, collectionId, schema)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("SetSchema.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) SetState(ctx context.Context, spaceId string, envId string, collectionId string, state *collections.StateInfo) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"state":        state} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("SetState.Request", fields...)
+
+	err = m.next.SetState(ctx, spaceId, envId, collectionId, state)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("SetState.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Update(ctx context.Context, coll *collections.Collection) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":  ctx,
+		"coll": coll} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Request", fields...)
+
+	err = m.next.Update(ctx, coll)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Response", fields...)
+
+	return err
+}
diff --git a/pkg/collections/middleware/middleware.go b/pkg/collections/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..911368f0f1df019e0e9b4b22afac9863cd4e523b
--- /dev/null
+++ b/pkg/collections/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/collections -i Collections -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/collections"
+	"go.uber.org/zap"
+)
+
+type Middleware func(collections.Collections) collections.Collections
+
+func WithLog(s collections.Collections, logger *zap.Logger, log_access bool) collections.Collections {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Collections")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/collections/middleware/recovering_middleware.go b/pkg/collections/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb61326fd05df2473219bccacea776246a68376e
--- /dev/null
+++ b/pkg/collections/middleware/recovering_middleware.go
@@ -0,0 +1,116 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/collections -i Collections -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/collections"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements collections.Collections that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   collections.Collections
+}
+
+// RecoveringMiddleware instruments an implementation of the collections.Collections with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next collections.Collections) collections.Collections {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, collection *collections.Collection) (created *collections.Collection, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, collection)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, spaceId string, envId string, collectionId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, spaceId, envId, collectionId)
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, spaceId string, envId string, collectionId string, options ...*collections.GetOptions) (collection *collections.Collection, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, spaceId, envId, collectionId, options...)
+}
+
+func (m *recoveringMiddleware) List(ctx context.Context, spaceId string, envId string, filter *collections.Filter) (collections []*collections.Collection, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.List(ctx, spaceId, envId, filter)
+}
+
+func (m *recoveringMiddleware) SetSchema(ctx context.Context, spaceId string, envId string, collectionId string, schema *schema.Schema) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.SetSchema(ctx, spaceId, envId, collectionId, schema)
+}
+
+func (m *recoveringMiddleware) SetState(ctx context.Context, spaceId string, envId string, collectionId string, state *collections.StateInfo) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.SetState(ctx, spaceId, envId, collectionId, state)
+}
+
+func (m *recoveringMiddleware) Update(ctx context.Context, coll *collections.Collection) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Update(ctx, coll)
+}
diff --git a/pkg/collections/mocks/Collections.go b/pkg/collections/mocks/Collections.go
new file mode 100644
index 0000000000000000000000000000000000000000..e52dffabfd0c8764ab1edd08979b873666d1935e
--- /dev/null
+++ b/pkg/collections/mocks/Collections.go
@@ -0,0 +1,163 @@
+// Code generated by mockery v2.14.0. DO NOT EDIT.
+
+package mocks
+
+import (
+	context "context"
+
+	collections "git.perx.ru/perxis/perxis-go/pkg/collections"
+	schema "git.perx.ru/perxis/perxis-go/pkg/schema"
+	mock "github.com/stretchr/testify/mock"
+)
+
+// Collections is an autogenerated mock type for the Collections type
+type Collections struct {
+	mock.Mock
+}
+
+// Create provides a mock function with given fields: ctx, collection
+func (_m *Collections) Create(ctx context.Context, collection *collections.Collection) (*collections.Collection, error) {
+	ret := _m.Called(ctx, collection)
+
+	var r0 *collections.Collection
+	if rf, ok := ret.Get(0).(func(context.Context, *collections.Collection) *collections.Collection); ok {
+		r0 = rf(ctx, collection)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(*collections.Collection)
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, *collections.Collection) error); ok {
+		r1 = rf(ctx, collection)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// Delete provides a mock function with given fields: ctx, spaceId, envId, collectionId
+func (_m *Collections) Delete(ctx context.Context, spaceId string, envId string, collectionId string) error {
+	ret := _m.Called(ctx, spaceId, envId, collectionId)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string) error); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Get provides a mock function with given fields: ctx, spaceId, envId, collectionId, options
+func (_m *Collections) Get(ctx context.Context, spaceId string, envId string, collectionId string, options ...*collections.GetOptions) (*collections.Collection, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 *collections.Collection
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, ...*collections.GetOptions) *collections.Collection); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(*collections.Collection)
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, ...*collections.GetOptions) error); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, options...)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// List provides a mock function with given fields: ctx, spaceId, envId, filter
+func (_m *Collections) List(ctx context.Context, spaceId string, envId string, filter *collections.Filter) ([]*collections.Collection, error) {
+	ret := _m.Called(ctx, spaceId, envId, filter)
+
+	var r0 []*collections.Collection
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, *collections.Filter) []*collections.Collection); ok {
+		r0 = rf(ctx, spaceId, envId, filter)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).([]*collections.Collection)
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, *collections.Filter) error); ok {
+		r1 = rf(ctx, spaceId, envId, filter)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// SetSchema provides a mock function with given fields: ctx, spaceId, envId, collectionId, _a4
+func (_m *Collections) SetSchema(ctx context.Context, spaceId string, envId string, collectionId string, _a4 *schema.Schema) error {
+	ret := _m.Called(ctx, spaceId, envId, collectionId, _a4)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *schema.Schema) error); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, _a4)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// SetState provides a mock function with given fields: ctx, spaceId, envId, collectionId, state
+func (_m *Collections) SetState(ctx context.Context, spaceId string, envId string, collectionId string, state *collections.StateInfo) error {
+	ret := _m.Called(ctx, spaceId, envId, collectionId, state)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *collections.StateInfo) error); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, state)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Update provides a mock function with given fields: ctx, coll
+func (_m *Collections) Update(ctx context.Context, coll *collections.Collection) error {
+	ret := _m.Called(ctx, coll)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, *collections.Collection) error); ok {
+		r0 = rf(ctx, coll)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+type mockConstructorTestingTNewCollections interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewCollections creates a new instance of Collections. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewCollections(t mockConstructorTestingTNewCollections) *Collections {
+	mock := &Collections{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/collections/options.go b/pkg/collections/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..7bdf2b70bef9349d90cf012c5cf2e996698f13f0
--- /dev/null
+++ b/pkg/collections/options.go
@@ -0,0 +1,15 @@
+package collections
+
+type GetOptions struct {
+	DisableSchemaIncludes bool
+}
+
+func MergeGetOptions(opts ...*GetOptions) *GetOptions {
+	o := &GetOptions{}
+	for _, opt := range opts {
+		if opt.DisableSchemaIncludes {
+			o.DisableSchemaIncludes = true
+		}
+	}
+	return o
+}
diff --git a/pkg/collections/service.go b/pkg/collections/service.go
new file mode 100644
index 0000000000000000000000000000000000000000..e73133e7c5ecbf6f58d29b2f07343f05bb13d5b0
--- /dev/null
+++ b/pkg/collections/service.go
@@ -0,0 +1,30 @@
+package collections
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+)
+
+// @microgen grpc
+// @protobuf git.perx.ru/perxis/perxis-go/proto/collections
+// @grpc-addr content.collections.Collections
+type Collections interface {
+	Create(ctx context.Context, collection *Collection) (created *Collection, err error)
+	Get(ctx context.Context, spaceId, envId, collectionId string, options ...*GetOptions) (collection *Collection, err error)
+	List(ctx context.Context, spaceId, envId string, filter *Filter) (collections []*Collection, err error)
+	Update(ctx context.Context, coll *Collection) (err error)
+	SetSchema(ctx context.Context, spaceId, envId, collectionId string, schema *schema.Schema) (err error)
+
+	// @microgen -
+	SetState(ctx context.Context, spaceId, envId, collectionId string, state *StateInfo) (err error)
+	Delete(ctx context.Context, spaceId, envId, collectionId string) (err error)
+}
+
+type Filter struct {
+	IncludeNoData bool     `json:"include_no_data,omitempty"`
+	IncludeHidden bool     `json:"include_hidden,omitempty"`
+	ExcludeSystem bool     `json:"exclude_system,omitempty"`
+	Name          []string `json:"name,omitempty"`
+	ID            []string `json:"id,omitempty"`
+}
diff --git a/pkg/collections/transport/client.microgen.go b/pkg/collections/transport/client.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..7fd5b53f360a927e48c00edcd3f4dc7606dd47fe
--- /dev/null
+++ b/pkg/collections/transport/client.microgen.go
@@ -0,0 +1,107 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import (
+	"context"
+	"errors"
+
+	collections "git.perx.ru/perxis/perxis-go/pkg/collections"
+	schema "git.perx.ru/perxis/perxis-go/pkg/schema"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+func (set EndpointsSet) Create(arg0 context.Context, arg1 *collections.Collection) (res0 *collections.Collection, res1 error) {
+	request := CreateRequest{Collection: arg1}
+	response, res1 := set.CreateEndpoint(arg0, &request)
+	if res1 != nil {
+		if e, ok := status.FromError(res1); ok || e.Code() == codes.Internal || e.Code() == codes.Unknown {
+			res1 = errors.New(e.Message())
+		}
+		return
+	}
+	return response.(*CreateResponse).Created, res1
+}
+
+func (set EndpointsSet) Get(arg0 context.Context, arg1 string, arg2 string, arg3 string, arg4 ...*collections.GetOptions) (res0 *collections.Collection, res1 error) {
+	request := GetRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		Options:      arg4,
+		SpaceId:      arg1,
+	}
+	response, res1 := set.GetEndpoint(arg0, &request)
+	if res1 != nil {
+		if e, ok := status.FromError(res1); ok || e.Code() == codes.Internal || e.Code() == codes.Unknown {
+			res1 = errors.New(e.Message())
+		}
+		return
+	}
+	return response.(*GetResponse).Collection, res1
+}
+
+func (set EndpointsSet) List(arg0 context.Context, arg1 string, arg2 string, arg3 *collections.Filter) (res0 []*collections.Collection, res1 error) {
+	request := ListRequest{
+		EnvId:   arg2,
+		Filter:  arg3,
+		SpaceId: arg1,
+	}
+	response, res1 := set.ListEndpoint(arg0, &request)
+	if res1 != nil {
+		if e, ok := status.FromError(res1); ok || e.Code() == codes.Internal || e.Code() == codes.Unknown {
+			res1 = errors.New(e.Message())
+		}
+		return
+	}
+	return response.(*ListResponse).Collections, res1
+}
+
+func (set EndpointsSet) Update(arg0 context.Context, arg1 *collections.Collection) (res0 error) {
+	request := UpdateRequest{Coll: arg1}
+	_, res0 = set.UpdateEndpoint(arg0, &request)
+	if res0 != nil {
+		if e, ok := status.FromError(res0); ok || e.Code() == codes.Internal || e.Code() == codes.Unknown {
+			res0 = errors.New(e.Message())
+		}
+		return
+	}
+	return res0
+}
+
+func (set EndpointsSet) SetSchema(arg0 context.Context, arg1 string, arg2 string, arg3 string, arg4 *schema.Schema) (res0 error) {
+	request := SetSchemaRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		Schema:       arg4,
+		SpaceId:      arg1,
+	}
+	_, res0 = set.SetSchemaEndpoint(arg0, &request)
+	if res0 != nil {
+		if e, ok := status.FromError(res0); ok || e.Code() == codes.Internal || e.Code() == codes.Unknown {
+			res0 = errors.New(e.Message())
+		}
+		return
+	}
+	return res0
+}
+
+func (set EndpointsSet) SetState(arg0 context.Context, arg1 string, arg2 string, arg3 string, arg4 *collections.StateInfo) (res0 error) {
+	return
+}
+
+func (set EndpointsSet) Delete(arg0 context.Context, arg1 string, arg2 string, arg3 string) (res0 error) {
+	request := DeleteRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		SpaceId:      arg1,
+	}
+	_, res0 = set.DeleteEndpoint(arg0, &request)
+	if res0 != nil {
+		if e, ok := status.FromError(res0); ok || e.Code() == codes.Internal || e.Code() == codes.Unknown {
+			res0 = errors.New(e.Message())
+		}
+		return
+	}
+	return res0
+}
diff --git a/pkg/collections/transport/endpoints.microgen.go b/pkg/collections/transport/endpoints.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b4643bbf83e24b8f751d5dbaac676ea2a4f2f5b
--- /dev/null
+++ b/pkg/collections/transport/endpoints.microgen.go
@@ -0,0 +1,15 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import endpoint "github.com/go-kit/kit/endpoint"
+
+// EndpointsSet implements Collections API and used for transport purposes.
+type EndpointsSet struct {
+	CreateEndpoint    endpoint.Endpoint
+	GetEndpoint       endpoint.Endpoint
+	ListEndpoint      endpoint.Endpoint
+	UpdateEndpoint    endpoint.Endpoint
+	SetSchemaEndpoint endpoint.Endpoint
+	DeleteEndpoint    endpoint.Endpoint
+}
diff --git a/pkg/collections/transport/exchanges.microgen.go b/pkg/collections/transport/exchanges.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..3aabcbc890d2e5691fe6167024254bba71f5fe2d
--- /dev/null
+++ b/pkg/collections/transport/exchanges.microgen.go
@@ -0,0 +1,59 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import (
+	collections "git.perx.ru/perxis/perxis-go/pkg/collections"
+	schema "git.perx.ru/perxis/perxis-go/pkg/schema"
+)
+
+type (
+	CreateRequest struct {
+		Collection *collections.Collection `json:"collection"`
+	}
+	CreateResponse struct {
+		Created *collections.Collection `json:"created"`
+	}
+
+	GetRequest struct {
+		SpaceId      string                    `json:"space_id"`
+		EnvId        string                    `json:"env_id"`
+		CollectionId string                    `json:"collection_id"`
+		Options      []*collections.GetOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	GetResponse struct {
+		Collection *collections.Collection `json:"collection"`
+	}
+
+	ListRequest struct {
+		SpaceId string              `json:"space_id"`
+		EnvId   string              `json:"env_id"`
+		Filter  *collections.Filter `json:"filter"`
+	}
+	ListResponse struct {
+		Collections []*collections.Collection `json:"collections"`
+	}
+
+	UpdateRequest struct {
+		Coll *collections.Collection `json:"coll"`
+	}
+	// Formal exchange type, please do not delete.
+	UpdateResponse struct{}
+
+	SetSchemaRequest struct {
+		SpaceId      string         `json:"space_id"`
+		EnvId        string         `json:"env_id"`
+		CollectionId string         `json:"collection_id"`
+		Schema       *schema.Schema `json:"schema"`
+	}
+	// Formal exchange type, please do not delete.
+	SetSchemaResponse struct{}
+
+	DeleteRequest struct {
+		SpaceId      string `json:"space_id"`
+		EnvId        string `json:"env_id"`
+		CollectionId string `json:"collection_id"`
+	}
+	// Formal exchange type, please do not delete.
+	DeleteResponse struct{}
+)
diff --git a/pkg/collections/transport/grpc/client.microgen.go b/pkg/collections/transport/grpc/client.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..0f41276cea0c2112aa1eb316b83d745c88208b07
--- /dev/null
+++ b/pkg/collections/transport/grpc/client.microgen.go
@@ -0,0 +1,61 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transportgrpc
+
+import (
+	transport "git.perx.ru/perxis/perxis-go/pkg/collections/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/collections"
+	grpckit "github.com/go-kit/kit/transport/grpc"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	grpc "google.golang.org/grpc"
+)
+
+func NewGRPCClient(conn *grpc.ClientConn, addr string, opts ...grpckit.ClientOption) transport.EndpointsSet {
+	if addr == "" {
+		addr = "content.collections.Collections"
+	}
+	return transport.EndpointsSet{
+		CreateEndpoint: grpckit.NewClient(
+			conn, addr, "Create",
+			_Encode_Create_Request,
+			_Decode_Create_Response,
+			pb.CreateResponse{},
+			opts...,
+		).Endpoint(),
+		DeleteEndpoint: grpckit.NewClient(
+			conn, addr, "Delete",
+			_Encode_Delete_Request,
+			_Decode_Delete_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+		GetEndpoint: grpckit.NewClient(
+			conn, addr, "Get",
+			_Encode_Get_Request,
+			_Decode_Get_Response,
+			pb.GetResponse{},
+			opts...,
+		).Endpoint(),
+		ListEndpoint: grpckit.NewClient(
+			conn, addr, "List",
+			_Encode_List_Request,
+			_Decode_List_Response,
+			pb.ListResponse{},
+			opts...,
+		).Endpoint(),
+		SetSchemaEndpoint: grpckit.NewClient(
+			conn, addr, "SetSchema",
+			_Encode_SetSchema_Request,
+			_Decode_SetSchema_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+		UpdateEndpoint: grpckit.NewClient(
+			conn, addr, "Update",
+			_Encode_Update_Request,
+			_Decode_Update_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+	}
+}
diff --git a/pkg/collections/transport/grpc/protobuf_endpoint_converters.microgen.go b/pkg/collections/transport/grpc/protobuf_endpoint_converters.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..34c541f8f4e05556c58c2a0a8547812a3afe4e1e
--- /dev/null
+++ b/pkg/collections/transport/grpc/protobuf_endpoint_converters.microgen.go
@@ -0,0 +1,273 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+// Please, do not change functions names!
+package transportgrpc
+
+import (
+	"context"
+	"errors"
+
+	transport "git.perx.ru/perxis/perxis-go/pkg/collections/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/collections"
+	empty "github.com/golang/protobuf/ptypes/empty"
+)
+
+func _Encode_Create_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil CreateRequest")
+	}
+	req := request.(*transport.CreateRequest)
+	reqCollection, err := PtrCollectionToProto(req.Collection)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.CreateRequest{Collection: reqCollection}, nil
+}
+
+func _Encode_Get_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetRequest")
+	}
+	req := request.(*transport.GetRequest)
+	return &pb.GetRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		SpaceId:      req.SpaceId,
+		Options:      GetOptionsToProto(req.Options),
+	}, nil
+}
+
+func _Encode_List_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil ListRequest")
+	}
+	req := request.(*transport.ListRequest)
+	reqFilter, err := PtrFilterToProto(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.ListRequest{
+		EnvId:   req.EnvId,
+		Filter:  reqFilter,
+		SpaceId: req.SpaceId,
+	}, nil
+}
+
+func _Encode_Update_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UpdateRequest")
+	}
+	req := request.(*transport.UpdateRequest)
+	reqColl, err := PtrCollectionToProto(req.Coll)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.UpdateRequest{Collection: reqColl}, nil
+}
+
+func _Encode_SetSchema_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil SetSchemaRequest")
+	}
+	req := request.(*transport.SetSchemaRequest)
+	reqSchema, err := PtrSchemaSchemaToProto(req.Schema)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.SetSchemaRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		Schema:       reqSchema,
+		SpaceId:      req.SpaceId,
+	}, nil
+}
+
+func _Encode_Delete_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil DeleteRequest")
+	}
+	req := request.(*transport.DeleteRequest)
+	return &pb.DeleteRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		SpaceId:      req.SpaceId,
+	}, nil
+}
+
+func _Encode_Create_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil CreateResponse")
+	}
+	resp := response.(*transport.CreateResponse)
+	respCreated, err := PtrCollectionToProto(resp.Created)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.CreateResponse{Created: respCreated}, nil
+}
+
+func _Encode_Get_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetResponse")
+	}
+	resp := response.(*transport.GetResponse)
+	respCollection, err := PtrCollectionToProto(resp.Collection)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.GetResponse{Collection: respCollection}, nil
+}
+
+func _Encode_List_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil ListResponse")
+	}
+	resp := response.(*transport.ListResponse)
+	respCollections, err := ListPtrCollectionToProto(resp.Collections)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.ListResponse{Collections: respCollections}, nil
+}
+
+func _Encode_Update_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_SetSchema_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_Delete_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Decode_Create_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil CreateRequest")
+	}
+	req := request.(*pb.CreateRequest)
+	reqCollection, err := ProtoToPtrCollection(req.Collection)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.CreateRequest{Collection: reqCollection}, nil
+}
+
+func _Decode_Get_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetRequest")
+	}
+	req := request.(*pb.GetRequest)
+	return &transport.GetRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		SpaceId:      string(req.SpaceId),
+		Options:      ProtoToGetOptions(req.Options),
+	}, nil
+}
+
+func _Decode_List_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil ListRequest")
+	}
+	req := request.(*pb.ListRequest)
+	reqFilter, err := ProtoToPtrFilter(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.ListRequest{
+		EnvId:   string(req.EnvId),
+		Filter:  reqFilter,
+		SpaceId: string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_Update_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UpdateRequest")
+	}
+	req := request.(*pb.UpdateRequest)
+	reqColl, err := ProtoToPtrCollection(req.Collection)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.UpdateRequest{Coll: reqColl}, nil
+}
+
+func _Decode_SetSchema_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil SetSchemaRequest")
+	}
+	req := request.(*pb.SetSchemaRequest)
+	reqSchema, err := ProtoToPtrSchemaSchema(req.Schema)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.SetSchemaRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		Schema:       reqSchema,
+		SpaceId:      string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_Delete_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil DeleteRequest")
+	}
+	req := request.(*pb.DeleteRequest)
+	return &transport.DeleteRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		SpaceId:      string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_Create_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil CreateResponse")
+	}
+	resp := response.(*pb.CreateResponse)
+	respCreated, err := ProtoToPtrCollection(resp.Created)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.CreateResponse{Created: respCreated}, nil
+}
+
+func _Decode_Get_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetResponse")
+	}
+	resp := response.(*pb.GetResponse)
+	respCollection, err := ProtoToPtrCollection(resp.Collection)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.GetResponse{Collection: respCollection}, nil
+}
+
+func _Decode_List_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil ListResponse")
+	}
+	resp := response.(*pb.ListResponse)
+	respCollections, err := ProtoToListPtrCollection(resp.Collections)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.ListResponse{Collections: respCollections}, nil
+}
+
+func _Decode_Update_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Decode_SetSchema_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Decode_Delete_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
diff --git a/pkg/collections/transport/grpc/protobuf_type_converters.microgen.go b/pkg/collections/transport/grpc/protobuf_type_converters.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..d19bee8b078380d6af962eac8e7738ba0170cd98
--- /dev/null
+++ b/pkg/collections/transport/grpc/protobuf_type_converters.microgen.go
@@ -0,0 +1,229 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+// It is better for you if you do not change functions names!
+// This file will never be overwritten.
+package transportgrpc
+
+import (
+	"fmt"
+
+	service "git.perx.ru/perxis/perxis-go/pkg/collections"
+	"git.perx.ru/perxis/perxis-go/pkg/permission"
+	schema "git.perx.ru/perxis/perxis-go/pkg/schema"
+	pb "git.perx.ru/perxis/perxis-go/proto/collections"
+	commonpb "git.perx.ru/perxis/perxis-go/proto/common"
+	jsoniter "github.com/json-iterator/go"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+func PtrSchemaSchemaToProto(schema *schema.Schema) (string, error) {
+	if schema == nil {
+		return "", nil
+	}
+	res, err := jsoniter.MarshalToString(schema)
+	if err != nil {
+		return "", err
+	}
+	return res, nil
+}
+
+func ProtoToPtrSchemaSchema(protoSchema string) (*schema.Schema, error) {
+	if protoSchema == "" {
+		return nil, nil
+	}
+	sch := schema.New()
+	err := sch.UnmarshalJSON([]byte(protoSchema))
+	if err != nil {
+		return nil, fmt.Errorf("failed to decode schema. err: %s", err.Error())
+	}
+	return sch, nil
+}
+
+func PtrCollectionToProto(coll *service.Collection) (*pb.Collection, error) {
+	if coll == nil {
+		return nil, nil
+	}
+
+	var access *pb.Access
+
+	if coll.Access != nil {
+		actions := make([]commonpb.Action, len(coll.Access.Actions))
+		for i, a := range coll.Access.Actions {
+			actions[i] = commonpb.Action(a)
+		}
+		access = &pb.Access{
+			Actions:         actions,
+			HiddenFields:    coll.Access.HiddenFields,
+			ReadonlyFields:  coll.Access.ReadonlyFields,
+			WriteonlyFields: coll.Access.WriteonlyFields,
+		}
+	}
+	protoCollection := &pb.Collection{
+		Id:      coll.ID,
+		SpaceId: coll.SpaceID,
+		EnvId:   coll.EnvID,
+		Name:    coll.Name,
+		Single:  coll.Single,
+		System:  coll.System,
+		NoData:  coll.NoData,
+		Access:  access,
+		Hidden:  coll.Hidden,
+		Tags:    coll.Tags,
+	}
+
+	if coll.StateInfo != nil {
+		protoCollection.StateInfo = &pb.Collection_StateInfo{
+			State:     pb.Collection_State(coll.StateInfo.State),
+			Info:      coll.StateInfo.Info,
+			StartedAt: timestamppb.New(coll.StateInfo.StartedAt),
+		}
+	}
+
+	sch, err := PtrSchemaSchemaToProto(coll.Schema)
+	if err != nil {
+		return nil, err
+	}
+	protoCollection.Schema = sch
+
+	if coll.View != nil {
+		protoCollection.View = &pb.Collection_View{
+			SpaceId:      coll.View.SpaceID,
+			EnvId:        coll.View.EnvID,
+			CollectionId: coll.View.CollectionID,
+			Filter:       coll.View.Filter,
+		}
+	}
+
+	return protoCollection, nil
+}
+
+func ProtoToPtrCollection(protoCollection *pb.Collection) (*service.Collection, error) {
+	if protoCollection == nil {
+		return nil, nil
+	}
+
+	var access *service.Access
+
+	if protoCollection.Access != nil {
+		actions := make([]permission.Action, len(protoCollection.Access.Actions))
+		for i, a := range protoCollection.Access.Actions {
+			actions[i] = permission.Action(a)
+		}
+		access = &service.Access{
+			Actions:         actions,
+			HiddenFields:    protoCollection.Access.HiddenFields,
+			ReadonlyFields:  protoCollection.Access.ReadonlyFields,
+			WriteonlyFields: protoCollection.Access.WriteonlyFields,
+		}
+	}
+	collection := &service.Collection{
+		ID:      protoCollection.Id,
+		SpaceID: protoCollection.SpaceId,
+		EnvID:   protoCollection.EnvId,
+		Name:    protoCollection.Name,
+		Single:  protoCollection.Single,
+		System:  protoCollection.System,
+		NoData:  protoCollection.NoData,
+		Access:  access,
+		Hidden:  protoCollection.Hidden,
+		Tags:    protoCollection.Tags,
+	}
+
+	if protoCollection.StateInfo != nil {
+		collection.StateInfo = &service.StateInfo{
+			State:     service.State(protoCollection.StateInfo.State),
+			Info:      protoCollection.StateInfo.Info,
+			StartedAt: protoCollection.StateInfo.StartedAt.AsTime(),
+		}
+	}
+
+	schm, err := ProtoToPtrSchemaSchema(protoCollection.Schema)
+	if err != nil {
+		return nil, err
+	}
+	collection.Schema = schm
+
+	if protoCollection.View != nil {
+		collection.View = &service.View{
+			SpaceID:      protoCollection.View.SpaceId,
+			EnvID:        protoCollection.View.EnvId,
+			CollectionID: protoCollection.View.CollectionId,
+			Filter:       protoCollection.View.Filter,
+		}
+	}
+
+	return collection, nil
+}
+
+func ListPtrCollectionToProto(collections []*service.Collection) ([]*pb.Collection, error) {
+	protoCollections := make([]*pb.Collection, 0, len(collections))
+	for _, collection := range collections {
+		protoCollection, err := PtrCollectionToProto(collection)
+		if err != nil {
+			return nil, err
+		}
+		protoCollections = append(protoCollections, protoCollection)
+	}
+	return protoCollections, nil
+}
+
+func ProtoToListPtrCollection(protoCollections []*pb.Collection) ([]*service.Collection, error) {
+	collections := make([]*service.Collection, 0, len(protoCollections))
+	for _, protoCollection := range protoCollections {
+		collection, err := ProtoToPtrCollection(protoCollection)
+		if err != nil {
+			return nil, err
+		}
+		collections = append(collections, collection)
+	}
+	return collections, nil
+}
+
+func ProtoToPtrBool(protoSingle *bool) (*bool, error) {
+	panic("function not provided") // TODO: provide converter
+}
+
+func PtrBoolToProto(single *bool) (*bool, error) {
+	panic("function not provided") // TODO: provide converter
+}
+
+func PtrFilterToProto(filter *service.Filter) (*pb.ListRequest_Filter, error) {
+	if filter == nil {
+		return nil, nil
+	}
+	return &pb.ListRequest_Filter{
+		ExcludeSystem: filter.ExcludeSystem,
+		IncludeNoData: filter.IncludeNoData,
+		IncludeHidden: filter.IncludeHidden,
+		Name:          filter.Name,
+		Id:            filter.ID,
+	}, nil
+}
+
+func ProtoToPtrFilter(protoFilter *pb.ListRequest_Filter) (*service.Filter, error) {
+	if protoFilter == nil {
+		return nil, nil
+	}
+	return &service.Filter{
+		IncludeNoData: protoFilter.IncludeNoData,
+		IncludeHidden: protoFilter.IncludeHidden,
+		ExcludeSystem: protoFilter.ExcludeSystem,
+		Name:          protoFilter.Name,
+		ID:            protoFilter.Id,
+	}, nil
+}
+
+func GetOptionsToProto(options []*service.GetOptions) *pb.GetOptions {
+	opts := service.MergeGetOptions(options...)
+	return &pb.GetOptions{DisableSchemaIncludes: opts.DisableSchemaIncludes}
+}
+
+func ProtoToGetOptions(protoOptions *pb.GetOptions) []*service.GetOptions {
+	if protoOptions == nil {
+		return nil
+	}
+	opts := &service.GetOptions{
+		DisableSchemaIncludes: protoOptions.DisableSchemaIncludes,
+	}
+	return []*service.GetOptions{opts}
+}
diff --git a/pkg/collections/transport/grpc/server.microgen.go b/pkg/collections/transport/grpc/server.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..169e726d32ec7cfbbc465a7100d10aad2b80d481
--- /dev/null
+++ b/pkg/collections/transport/grpc/server.microgen.go
@@ -0,0 +1,112 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+// DO NOT EDIT.
+package transportgrpc
+
+import (
+	transport "git.perx.ru/perxis/perxis-go/pkg/collections/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/collections"
+	grpc "github.com/go-kit/kit/transport/grpc"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	context "golang.org/x/net/context"
+)
+
+type collectionsServer struct {
+	create    grpc.Handler
+	get       grpc.Handler
+	list      grpc.Handler
+	update    grpc.Handler
+	setSchema grpc.Handler
+	delete    grpc.Handler
+
+	pb.UnimplementedCollectionsServer
+}
+
+func NewGRPCServer(endpoints *transport.EndpointsSet, opts ...grpc.ServerOption) pb.CollectionsServer {
+	return &collectionsServer{
+		create: grpc.NewServer(
+			endpoints.CreateEndpoint,
+			_Decode_Create_Request,
+			_Encode_Create_Response,
+			opts...,
+		),
+		delete: grpc.NewServer(
+			endpoints.DeleteEndpoint,
+			_Decode_Delete_Request,
+			_Encode_Delete_Response,
+			opts...,
+		),
+		get: grpc.NewServer(
+			endpoints.GetEndpoint,
+			_Decode_Get_Request,
+			_Encode_Get_Response,
+			opts...,
+		),
+		list: grpc.NewServer(
+			endpoints.ListEndpoint,
+			_Decode_List_Request,
+			_Encode_List_Response,
+			opts...,
+		),
+		setSchema: grpc.NewServer(
+			endpoints.SetSchemaEndpoint,
+			_Decode_SetSchema_Request,
+			_Encode_SetSchema_Response,
+			opts...,
+		),
+		update: grpc.NewServer(
+			endpoints.UpdateEndpoint,
+			_Decode_Update_Request,
+			_Encode_Update_Response,
+			opts...,
+		),
+	}
+}
+
+func (S *collectionsServer) Create(ctx context.Context, req *pb.CreateRequest) (*pb.CreateResponse, error) {
+	_, resp, err := S.create.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.CreateResponse), nil
+}
+
+func (S *collectionsServer) Get(ctx context.Context, req *pb.GetRequest) (*pb.GetResponse, error) {
+	_, resp, err := S.get.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.GetResponse), nil
+}
+
+func (S *collectionsServer) List(ctx context.Context, req *pb.ListRequest) (*pb.ListResponse, error) {
+	_, resp, err := S.list.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.ListResponse), nil
+}
+
+func (S *collectionsServer) Update(ctx context.Context, req *pb.UpdateRequest) (*empty.Empty, error) {
+	_, resp, err := S.update.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
+
+func (S *collectionsServer) SetSchema(ctx context.Context, req *pb.SetSchemaRequest) (*empty.Empty, error) {
+	_, resp, err := S.setSchema.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
+
+func (S *collectionsServer) Delete(ctx context.Context, req *pb.DeleteRequest) (*empty.Empty, error) {
+	_, resp, err := S.delete.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
diff --git a/pkg/collections/transport/server.microgen.go b/pkg/collections/transport/server.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..958e28ab78913b23389469ce53316be45dd82c48
--- /dev/null
+++ b/pkg/collections/transport/server.microgen.go
@@ -0,0 +1,69 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import (
+	"context"
+
+	collections "git.perx.ru/perxis/perxis-go/pkg/collections"
+	endpoint "github.com/go-kit/kit/endpoint"
+)
+
+func Endpoints(svc collections.Collections) EndpointsSet {
+	return EndpointsSet{
+		CreateEndpoint:    CreateEndpoint(svc),
+		DeleteEndpoint:    DeleteEndpoint(svc),
+		GetEndpoint:       GetEndpoint(svc),
+		ListEndpoint:      ListEndpoint(svc),
+		SetSchemaEndpoint: SetSchemaEndpoint(svc),
+		UpdateEndpoint:    UpdateEndpoint(svc),
+	}
+}
+
+func CreateEndpoint(svc collections.Collections) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*CreateRequest)
+		res0, res1 := svc.Create(arg0, req.Collection)
+		return &CreateResponse{Created: res0}, res1
+	}
+}
+
+func GetEndpoint(svc collections.Collections) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*GetRequest)
+		res0, res1 := svc.Get(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.Options...)
+		return &GetResponse{Collection: res0}, res1
+	}
+}
+
+func ListEndpoint(svc collections.Collections) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*ListRequest)
+		res0, res1 := svc.List(arg0, req.SpaceId, req.EnvId, req.Filter)
+		return &ListResponse{Collections: res0}, res1
+	}
+}
+
+func UpdateEndpoint(svc collections.Collections) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*UpdateRequest)
+		res0 := svc.Update(arg0, req.Coll)
+		return &UpdateResponse{}, res0
+	}
+}
+
+func SetSchemaEndpoint(svc collections.Collections) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*SetSchemaRequest)
+		res0 := svc.SetSchema(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.Schema)
+		return &SetSchemaResponse{}, res0
+	}
+}
+
+func DeleteEndpoint(svc collections.Collections) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*DeleteRequest)
+		res0 := svc.Delete(arg0, req.SpaceId, req.EnvId, req.CollectionId)
+		return &DeleteResponse{}, res0
+	}
+}
diff --git a/pkg/data/data.go b/pkg/data/data.go
new file mode 100644
index 0000000000000000000000000000000000000000..0540055ad4e213f666b0cf72019b9b4b9c39fbe8
--- /dev/null
+++ b/pkg/data/data.go
@@ -0,0 +1,294 @@
+package data
+
+import (
+	"strconv"
+	"strings"
+)
+
+const DefaultFieldDelimiter = "."
+
+type DeleteValueType struct{}
+
+var DeleteValue DeleteValueType
+
+// TODO: везде добавить поддержку массивов и массивов объектов
+
+// Сделано на базе библиотеки https://github.com/knadh/koanf
+
+// Flatten takes a map[string]interface{} and traverses it and flattens
+// nested children into keys delimited by delim.
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+//
+// eg: `{ "parent": { "child": 123 }}` becomes `{ "parent.child": 123 }`
+// In addition, it keeps track of and returns a map of the delimited keypaths with
+// a slice of key parts, for eg: { "parent.child": ["parent", "child"] }. This
+// parts list is used to remember the key path's original structure to
+// unflatten later.
+func Flatten(m map[string]interface{}, keys []string, delim string) (map[string]interface{}, map[string][]string) {
+	var (
+		out    = make(map[string]interface{})
+		keyMap = make(map[string][]string)
+	)
+
+	flatten(m, keys, delim, out, keyMap)
+	return out, keyMap
+}
+
+func flatten(m map[string]interface{}, keys []string, delim string, out map[string]interface{}, keyMap map[string][]string) {
+	for key, val := range m {
+		// Copy the incoming key paths into a fresh list
+		// and append the current key in the iteration.
+		kp := make([]string, 0, len(keys)+1)
+		kp = append(kp, keys...)
+		kp = append(kp, key)
+
+		switch cur := val.(type) {
+		case map[string]interface{}:
+			// Empty map.
+			if len(cur) == 0 {
+				newKey := strings.Join(kp, delim)
+				out[newKey] = val
+				keyMap[newKey] = kp
+				continue
+			}
+
+			// It's a nested map. Flatten it recursively.
+			flatten(cur, kp, delim, out, keyMap)
+		default:
+			newKey := strings.Join(kp, delim)
+			out[newKey] = val
+			keyMap[newKey] = kp
+		}
+	}
+}
+
+// Unflatten takes a flattened key:value map (non-nested with delimited keys)
+// and returns a nested map where the keys are split into hierarchies by the given
+// delimiter. For instance, `parent.child.key: 1` to `{parent: {child: {key: 1}}}`
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+func Unflatten(m map[string]interface{}, delim string) map[string]interface{} {
+	out := make(map[string]interface{})
+
+	// Iterate through the flat conf map.
+	for k, v := range m {
+		var (
+			keys = strings.Split(k, delim)
+			next = out
+		)
+
+		// Iterate through key parts, for eg:, parent.child.key
+		// will be ["parent", "child", "key"]
+		for _, k := range keys[:len(keys)-1] {
+			sub, ok := next[k]
+			if !ok {
+				// If the key does not exist in the map, create it.
+				sub = make(map[string]interface{})
+				next[k] = sub
+			}
+			if n, ok := sub.(map[string]interface{}); ok {
+				next = n
+			}
+		}
+
+		// Assign the value.
+		next[keys[len(keys)-1]] = v
+	}
+	return out
+}
+
+// Delete removes the entry present at a given path, from the interface
+// if it is an object or an array.
+// The path is the key map slice, for eg:, parent.child.key -> [parent child key].
+// Any empty, nested map on the path, is recursively deleted.
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+func Delete(field string, data any, delim ...string) error {
+	return set(getPath(field, delim...), data, DeleteValue)
+}
+
+// DeleteMany removes the entries present at a given paths, from the interface
+func DeleteMany(paths []string, value any, delim ...string) {
+	if value == nil || len(paths) == 0 {
+		return
+	}
+	for _, path := range paths {
+		Delete(path, value, delim...)
+	}
+}
+
+// Search recursively searches the interface for a given path. The path is
+// the key map slice, for eg:, parent.child.key -> [parent child key].
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+func Search(in interface{}, path []string) interface{} {
+	switch val := in.(type) {
+
+	case map[string]interface{}:
+		next, ok := val[path[0]]
+		if ok {
+			if len(path) == 1 {
+				return next
+			}
+			switch v := next.(type) {
+			case map[string]interface{}, []interface{}:
+				return Search(v, path[1:])
+			}
+		}
+	case []interface{}:
+		out := make([]interface{}, len(val))
+		for i, e := range val {
+			out[i] = Search(e, path)
+		}
+		return out
+	}
+	return nil
+}
+
+func getPath(field string, delim ...string) []string {
+	if field == "" {
+		return nil
+	}
+
+	d := DefaultFieldDelimiter
+	if len(delim) > 0 {
+		d = delim[0]
+	}
+	return strings.Split(field, d)
+}
+
+func Set(field string, data, value any, delim ...string) error {
+	return set(getPath(field, delim...), data, value)
+}
+
+func set(path []string, data, value any) error {
+	if len(path) == 0 {
+		return nil
+	}
+
+	switch v := data.(type) {
+	case map[string]interface{}:
+		if len(path) == 1 {
+
+			if _, ok := value.(DeleteValueType); ok {
+				delete(v, path[0])
+				return nil
+			}
+
+			v[path[0]] = value
+			return nil
+		}
+
+		next, ok := v[path[0]]
+		if !ok {
+			next = make(map[string]interface{})
+			v[path[0]] = next
+		}
+		return set(path[1:], next, value)
+
+	case []interface{}:
+		idx, err := strconv.Atoi(path[0])
+		if err != nil {
+			for _, vv := range v {
+				if err = set(path, vv, value); err != nil {
+					return err
+				}
+			}
+		}
+		if idx >= len(v) {
+			return nil
+		}
+		return set(path[1:], v[idx], value)
+	}
+
+	return nil
+}
+
+func Get(field string, data any, delim ...string) (any, bool) {
+	return get(getPath(field, delim...), data)
+}
+
+func get(path []string, data any) (any, bool) {
+	if len(path) == 0 {
+		return data, true
+	}
+
+	switch v := data.(type) {
+	case map[string]interface{}:
+		val, ok := v[path[0]]
+		if !ok {
+			return nil, false
+		}
+		return get(path[1:], val)
+	case []interface{}:
+		idx, err := strconv.Atoi(path[0])
+		if err != nil || idx >= len(v) {
+			return nil, false
+		}
+		return get(path[1:], v[idx])
+	}
+
+	return nil, false
+}
+
+// Keep keeps the entries present at a given paths, from the interface and remove other data
+// if it is an object or an array.
+// The path is the sting with delim, for eg:, parent.child.key
+func Keep(paths []string, data any, delim ...string) {
+	if len(paths) == 0 {
+		data = nil
+		return
+	}
+	switch val := data.(type) {
+	case map[string]interface{}:
+		for k, v := range val {
+			if Contains(k, paths) {
+				continue
+			}
+			p := getObjectPaths(k, paths, delim...)
+			if len(p) == 0 {
+				delete(val, k)
+			}
+			Keep(p, v, delim...)
+		}
+	case []interface{}:
+		for _, ar := range val {
+			Keep(paths, ar, delim...)
+		}
+	}
+}
+
+func getObjectPaths(prefix string, arr []string, delim ...string) []string {
+	var res []string
+	d := DefaultFieldDelimiter
+	if len(delim) > 0 {
+		d = delim[0]
+	}
+	for _, v := range arr {
+		if strings.HasPrefix(v, prefix+d) {
+			res = append(res, strings.TrimPrefix(v, prefix+d))
+		}
+	}
+	return res
+}
+
+func CloneMap(m map[string]interface{}) map[string]interface{} {
+	if m == nil {
+		return m
+	}
+
+	c := make(map[string]interface{}, len(m))
+	for k, v := range m {
+		c[k] = v
+	}
+	return c
+}
diff --git a/pkg/data/data_test.go b/pkg/data/data_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..785eefbb868a68c9d8c6b2f75b8f861ab2041e11
--- /dev/null
+++ b/pkg/data/data_test.go
@@ -0,0 +1,374 @@
+package data
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestDelete(t *testing.T) {
+	tests := []struct {
+		name  string
+		in    interface{}
+		field string
+		out   interface{}
+	}{
+		{
+			"simple",
+			map[string]interface{}{"a": "1", "z": "2"},
+			"a",
+			map[string]interface{}{"z": "2"},
+		},
+		{
+			"object",
+			map[string]interface{}{"a": map[string]interface{}{"a": "1", "z": "2"}},
+			"a",
+			map[string]interface{}{},
+		},
+		{
+			"object field",
+			map[string]interface{}{"a": map[string]interface{}{"a": "1", "z": "2"}},
+			"a.a",
+			map[string]interface{}{"a": map[string]interface{}{"z": "2"}},
+		},
+		{
+			"object field from map with array",
+			map[string]interface{}{"a": []interface{}{
+				map[string]interface{}{"a": "1", "b": "2"},
+				map[string]interface{}{"a": "3", "b": "4"},
+			}, "z": "2"},
+			"a.a",
+			map[string]interface{}{"a": []interface{}{
+				map[string]interface{}{"b": "2"},
+				map[string]interface{}{"b": "4"},
+			}, "z": "2"},
+		},
+		{
+			"object field from map with array of arrays",
+			map[string]interface{}{"a": []interface{}{
+				[]interface{}{
+					map[string]interface{}{"a": "1", "b": "2"},
+				}, []interface{}{
+					map[string]interface{}{"a": "3", "b": "4"},
+				},
+			}, "z": "2"},
+			"a.a",
+			map[string]interface{}{"a": []interface{}{
+				[]interface{}{
+					map[string]interface{}{"b": "2"},
+				}, []interface{}{
+					map[string]interface{}{"b": "4"},
+				},
+			}, "z": "2"},
+		},
+		// Решили что автоматически удалять пустые объекты/слайсы не нужно
+		//{
+		//	"empty object",
+		//	map[string]interface{}{"a": map[string]interface{}{"a": map[string]interface{}{}}},
+		//	[]string{"a", "a"},
+		//	map[string]interface{}{},
+		//}, {
+		//	"empty array",
+		//	map[string]interface{}{"a": map[string]interface{}{"a": []interface{}{}}},
+		//	[]string{"a", "a"},
+		//	map[string]interface{}{},
+		//},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			Delete(tt.field, tt.in)
+			assert.Equal(t, tt.out, tt.in)
+		})
+	}
+}
+
+func TestDeleteMany(t *testing.T) {
+	tests := []struct {
+		name  string
+		in    interface{}
+		paths []string
+		out   interface{}
+	}{
+		{
+			"simple",
+			map[string]interface{}{"a": "1", "z": "2", "d": "2"},
+			[]string{"a", "d"},
+			map[string]interface{}{"z": "2"},
+		},
+		{
+			"object",
+			map[string]interface{}{"a": map[string]interface{}{"a": "1", "z": "2"}},
+			[]string{"a"},
+			map[string]interface{}{},
+		},
+		{
+			"object field",
+			map[string]interface{}{"a": map[string]interface{}{"a": "1", "z": "2", "b": "4"}},
+			[]string{"a.a", "a.b"},
+			map[string]interface{}{"a": map[string]interface{}{"z": "2"}},
+		},
+		{
+			"object field from map with array",
+			map[string]interface{}{"a": []interface{}{
+				map[string]interface{}{"a": "1", "b": "2", "c": 0},
+				map[string]interface{}{"a": "3", "b": "4", "c": 0},
+			}, "z": "2"},
+			[]string{"a.a", "a.c"},
+			map[string]interface{}{"a": []interface{}{
+				map[string]interface{}{"b": "2"},
+				map[string]interface{}{"b": "4"},
+			}, "z": "2"},
+		},
+		{
+			"object field from map with array of arrays",
+			map[string]interface{}{"a": []interface{}{
+				[]interface{}{
+					map[string]interface{}{"a": "1", "b": "2"},
+				}, []interface{}{
+					map[string]interface{}{"a": "3", "b": "4"},
+				},
+			}, "z": "2"},
+			[]string{"a.a"},
+			map[string]interface{}{"a": []interface{}{
+				[]interface{}{
+					map[string]interface{}{"b": "2"},
+				}, []interface{}{
+					map[string]interface{}{"b": "4"},
+				},
+			}, "z": "2"},
+		},
+		{
+			"empty object",
+			map[string]interface{}{"a": map[string]interface{}{"a": map[string]interface{}{}}},
+			[]string{"a.a", "a"},
+			map[string]interface{}{},
+		},
+		{
+			"field not exist in object",
+			map[string]interface{}{"a": map[string]interface{}{"a": map[string]interface{}{}}},
+			[]string{"a.b"},
+			map[string]interface{}{"a": map[string]interface{}{"a": map[string]interface{}{}}},
+		},
+		{
+			"empty array",
+			map[string]interface{}{"a": map[string]interface{}{"a": []interface{}{}}},
+			[]string{"a.a", "a"},
+			map[string]interface{}{},
+		},
+		{
+			"field not exist in array",
+			map[string]interface{}{"a": map[string]interface{}{"a": []interface{}{}}},
+			[]string{"a.b"},
+			map[string]interface{}{"a": map[string]interface{}{"a": []interface{}{}}},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			DeleteMany(tt.paths, tt.in)
+			assert.Equal(t, tt.out, tt.in)
+		})
+	}
+}
+
+func TestSearch(t *testing.T) {
+	tests := []struct {
+		name string
+		in   interface{}
+		path []string
+		out  interface{}
+	}{
+		{
+			"simple",
+			map[string]interface{}{"a": "1", "z": "2"},
+			[]string{"a"},
+			"1",
+		},
+		{
+			"object",
+			map[string]interface{}{
+				"a": map[string]interface{}{"a": "1", "z": "2"},
+				"b": map[string]interface{}{"c": "1", "d": "2"},
+			},
+			[]string{"a"},
+			map[string]interface{}{"a": "1", "z": "2"},
+		},
+		{
+			"object field",
+			map[string]interface{}{"a": map[string]interface{}{"a": "1", "z": "2"}},
+			[]string{"a", "a"},
+			"1",
+		},
+		{
+			"object field from map with array",
+			map[string]interface{}{"a": []interface{}{
+				map[string]interface{}{"a": "1", "b": "2"},
+				map[string]interface{}{"a": "3", "b": "4"},
+			}, "z": "2"},
+			[]string{"a", "a"},
+			[]interface{}{"1", "3"},
+		},
+		{
+			"object field from array of arrays",
+			[]interface{}{
+				[]interface{}{
+					map[string]interface{}{"a": "1", "b": "2"},
+				}, []interface{}{
+					map[string]interface{}{"a": "3", "b": "4"},
+				},
+			},
+			[]string{"a"},
+			[]interface{}{[]interface{}{"1"}, []interface{}{"3"}},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			out := Search(tt.in, tt.path)
+			assert.Equal(t, tt.out, out)
+		})
+	}
+}
+
+func TestSet(t *testing.T) {
+	type args struct {
+		field string
+		data  any
+		value any
+	}
+	tests := []struct {
+		name     string
+		args     args
+		wantData any
+		wantErr  assert.ErrorAssertionFunc
+	}{
+		{"Simple", args{"a", map[string]interface{}{"a": "0"}, "a"}, map[string]interface{}{"a": "a"}, assert.NoError},
+		{"New key", args{"b", map[string]interface{}{"a": "0"}, "a"}, map[string]interface{}{"a": "0", "b": "a"}, assert.NoError},
+		{"Path", args{"a.b.c", map[string]interface{}{"a": map[string]any{"b": map[string]any{"c": "0"}}}, "c"}, map[string]any{"a": map[string]any{"b": map[string]any{"c": "c"}}}, assert.NoError},
+		{"Delete", args{"a.b", map[string]interface{}{"a": map[string]any{"b": map[string]any{"c": "0"}}}, DeleteValue}, map[string]any{"a": map[string]any{}}, assert.NoError},
+		{"Create map", args{"b.a", map[string]interface{}{"a": "0"}, "a"}, map[string]interface{}{"a": "0", "b": map[string]interface{}{"a": "a"}}, assert.NoError},
+		{"Map value", args{"a", map[string]interface{}{"a": "0"}, map[string]interface{}{"a": "a"}}, map[string]interface{}{"a": map[string]interface{}{"a": "a"}}, assert.NoError},
+		{"Slice", args{"a.a", map[string]interface{}{"a": []any{map[string]any{"a": "0"}, map[string]any{"a": "0", "b": "b"}}}, "a"}, map[string]interface{}{"a": []any{map[string]any{"a": "a"}, map[string]any{"a": "a", "b": "b"}}}, assert.NoError},
+		{"Slice", args{"a.0.a", map[string]interface{}{"a": []any{map[string]any{"a": "0"}, map[string]any{"a": "0", "b": "b"}}}, "a"}, map[string]interface{}{"a": []any{map[string]any{"a": "a"}, map[string]any{"a": "0", "b": "b"}}}, assert.NoError},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			data := tt.args.data
+			tt.wantErr(t, Set(tt.args.field, data, tt.args.value), fmt.Sprintf("Set(%v, %v, %v)", tt.args.field, data, tt.args.value))
+			assert.Equal(t, tt.wantData, data)
+		})
+	}
+}
+
+func TestGet(t *testing.T) {
+	type args struct {
+		field string
+		data  any
+	}
+	tests := []struct {
+		name  string
+		args  args
+		want  any
+		found bool
+	}{
+		{"Direct value", args{"", 100}, 100, true},
+		{"Not found", args{"a", 100}, nil, false},
+		{"Simple", args{"a", map[string]any{"a": "0"}}, "0", true},
+		{"Path", args{"a.b.c", map[string]any{"a": map[string]any{"b": map[string]any{"c": "c"}}}}, "c", true},
+		{"Incorrect path", args{"a.b.wrong", map[string]any{"a": map[string]any{"b": map[string]any{"c": "c"}}}}, nil, false},
+		{"Map value", args{"a.b", map[string]any{"a": map[string]any{"b": map[string]any{"c": "c"}}}}, map[string]any{"c": "c"}, true},
+		{"Slice", args{"a.1.b", map[string]any{"a": []any{map[string]any{"b": "0"}, map[string]any{"b": "1"}}}}, "1", true},
+		{"Slice out of range", args{"a.2.b", map[string]any{"a": []any{map[string]any{"b": "0"}, map[string]any{"b": "1"}}}}, nil, false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, got1 := Get(tt.args.field, tt.args.data)
+			assert.Equalf(t, tt.want, got, "Get(%v, %v)", tt.args.field, tt.args.data)
+			assert.Equalf(t, tt.found, got1, "Get(%v, %v)", tt.args.field, tt.args.data)
+		})
+	}
+}
+
+func TestKeep(t *testing.T) {
+	tests := []struct {
+		name string
+		in   interface{}
+		path []string
+		out  interface{}
+	}{
+		{
+			"simple",
+			map[string]interface{}{"a": "1", "z": "2"},
+			[]string{"a"},
+			map[string]interface{}{"a": "1"},
+		},
+		{
+			"object",
+			map[string]interface{}{"a": map[string]interface{}{"a": "1", "z": "2"}},
+			[]string{"a"},
+			map[string]interface{}{"a": map[string]interface{}{"a": "1", "z": "2"}},
+		},
+		{
+			"no field",
+			map[string]interface{}{"a": map[string]interface{}{"a": "1", "z": "2"}},
+			[]string{"z"},
+			map[string]interface{}{},
+		},
+		{
+			"object field",
+			map[string]interface{}{"a": map[string]interface{}{"a": "1", "z": "2"}},
+			[]string{"a.a"},
+			map[string]interface{}{"a": map[string]interface{}{"a": "1"}},
+		},
+		{
+			"object field from map with array",
+			map[string]interface{}{"a": []interface{}{
+				map[string]interface{}{"a": "1", "b": "2"},
+				map[string]interface{}{"a": "3", "b": "4"},
+			}, "z": "2"},
+			[]string{"a.a", "z"},
+			map[string]interface{}{"a": []interface{}{
+				map[string]interface{}{"a": "1"},
+				map[string]interface{}{"a": "3"},
+			}, "z": "2"},
+		},
+		{
+			"object field from map with array of arrays",
+			map[string]interface{}{"a": []interface{}{
+				[]interface{}{
+					map[string]interface{}{"a": "1", "b": "2"},
+				}, []interface{}{
+					map[string]interface{}{"a": "3", "b": "4"},
+				},
+			}, "z": "2"},
+			[]string{"a.b", "z"},
+			map[string]interface{}{"a": []interface{}{
+				[]interface{}{
+					map[string]interface{}{"b": "2"},
+				}, []interface{}{
+					map[string]interface{}{"b": "4"},
+				},
+			}, "z": "2"},
+		},
+		{
+			"empty object",
+			map[string]interface{}{"a": map[string]interface{}{"a": map[string]interface{}{}}},
+			[]string{"a.b"},
+			map[string]interface{}{"a": map[string]interface{}{}},
+		}, {
+			"empty array",
+			map[string]interface{}{"a": map[string]interface{}{"a": []interface{}{}}},
+			[]string{"a.b"},
+			map[string]interface{}{"a": map[string]interface{}{}},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			Keep(tt.path, tt.in)
+			assert.Equal(t, tt.out, tt.in)
+		})
+	}
+}
diff --git a/pkg/environments/middleware/caching_middleware.go b/pkg/environments/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..18b594bc3a193d962d63ef3d5eae6217408d61bd
--- /dev/null
+++ b/pkg/environments/middleware/caching_middleware.go
@@ -0,0 +1,167 @@
+package service
+
+import (
+	"context"
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	service "git.perx.ru/perxis/perxis-go/pkg/environments"
+)
+
+func makeKey(ss ...string) string {
+	return strings.Join(ss, "-")
+}
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Environments) service.Environments {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Environments
+}
+
+func (m cachingMiddleware) Create(ctx context.Context, env *service.Environment) (environment *service.Environment, err error) {
+
+	environment, err = m.next.Create(ctx, env)
+	if err == nil {
+		m.cache.Remove(environment.SpaceID)
+	}
+	return environment, err
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, spaceId string, envId string) (environment *service.Environment, err error) {
+
+	value, e := m.cache.Get(makeKey(spaceId, envId))
+	if e == nil {
+		return value.(*service.Environment), err
+	}
+	environment, err = m.next.Get(ctx, spaceId, envId)
+	if err == nil {
+		m.cache.Set(makeKey(spaceId, environment.ID), environment)
+		for _, a := range environment.Aliases {
+			m.cache.Set(makeKey(spaceId, a), environment)
+		}
+	}
+	return environment, err
+}
+
+func (m cachingMiddleware) List(ctx context.Context, spaceId string) (environments []*service.Environment, err error) {
+
+	value, e := m.cache.Get(spaceId)
+	if e == nil {
+		return value.([]*service.Environment), err
+	}
+	environments, err = m.next.List(ctx, spaceId)
+	if err == nil {
+		m.cache.Set(spaceId, environments)
+	}
+	return environments, err
+}
+
+func (m cachingMiddleware) Update(ctx context.Context, env *service.Environment) (err error) {
+
+	err = m.next.Update(ctx, env)
+	if err == nil {
+		value, e := m.cache.Get(makeKey(env.SpaceID, env.ID))
+		if e == nil {
+			env := value.(*service.Environment)
+			m.cache.Remove(makeKey(env.SpaceID, env.ID))
+			for _, a := range env.Aliases {
+				m.cache.Remove(makeKey(env.SpaceID, a))
+			}
+		}
+		m.cache.Remove(env.SpaceID)
+	}
+	return err
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, spaceId string, envId string) (err error) {
+
+	err = m.next.Delete(ctx, spaceId, envId)
+	if err == nil {
+		value, e := m.cache.Get(makeKey(spaceId, envId))
+		if e == nil {
+			env := value.(*service.Environment)
+			m.cache.Remove(makeKey(env.SpaceID, env.ID))
+			for _, a := range env.Aliases {
+				m.cache.Remove(makeKey(env.SpaceID, a))
+			}
+		}
+		m.cache.Remove(spaceId)
+	}
+	return err
+}
+
+func (m cachingMiddleware) SetAlias(ctx context.Context, spaceId string, envId string, alias string) (err error) {
+
+	err = m.next.SetAlias(ctx, spaceId, envId, alias)
+	if err == nil {
+		value, e := m.cache.Get(makeKey(spaceId, alias))
+		if e == nil {
+			env := value.(*service.Environment)
+			m.cache.Remove(makeKey(env.SpaceID, env.ID))
+			for _, a := range env.Aliases {
+				m.cache.Remove(makeKey(env.SpaceID, a))
+			}
+		}
+
+		value, e = m.cache.Get(makeKey(spaceId, envId))
+		if e == nil {
+			env := value.(*service.Environment)
+			m.cache.Remove(makeKey(env.SpaceID, env.ID))
+			for _, a := range env.Aliases {
+				m.cache.Remove(makeKey(env.SpaceID, a))
+			}
+		}
+		m.cache.Remove(spaceId)
+	}
+	return err
+}
+
+func (m cachingMiddleware) RemoveAlias(ctx context.Context, spaceId string, envId string, alias string) (err error) {
+
+	err = m.next.RemoveAlias(ctx, spaceId, envId, alias)
+	if err == nil {
+		m.cache.Remove(spaceId)
+		value, e := m.cache.Get(makeKey(spaceId, alias))
+		if e == nil {
+			env := value.(*service.Environment)
+			m.cache.Remove(makeKey(env.SpaceID, env.ID))
+			for _, a := range env.Aliases {
+				m.cache.Remove(makeKey(env.SpaceID, a))
+			}
+		}
+
+		value, e = m.cache.Get(makeKey(spaceId, envId))
+		if e == nil {
+			env := value.(*service.Environment)
+			m.cache.Remove(makeKey(env.SpaceID, env.ID))
+			for _, a := range env.Aliases {
+				m.cache.Remove(makeKey(env.SpaceID, a))
+			}
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) Migrate(ctx context.Context, spaceId, envId string, options ...*service.MigrateOptions) (err error) {
+	err = m.next.Migrate(ctx, spaceId, envId, options...)
+
+	// значение из кэша удалить вне зависимости от наличия ошибки, поскольку состояние окружения могло измениться
+	value, e := m.cache.Get(makeKey(spaceId, envId))
+	if e == nil {
+		env := value.(*service.Environment)
+		m.cache.Remove(makeKey(env.SpaceID, env.ID))
+		for _, a := range env.Aliases {
+			m.cache.Remove(makeKey(env.SpaceID, a))
+		}
+	}
+	m.cache.Remove(spaceId)
+	return err
+}
diff --git a/pkg/environments/middleware/caching_middleware_test.go b/pkg/environments/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5bfa7a9b5d207ed0c00d60cd126950a6da5aba0
--- /dev/null
+++ b/pkg/environments/middleware/caching_middleware_test.go
@@ -0,0 +1,387 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/environments"
+	mocksenvironments "git.perx.ru/perxis/perxis-go/pkg/environments/mocks"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestEnvironmentsCache(t *testing.T) {
+
+	const (
+		envID    = "envID"
+		spaceID  = "spaceID"
+		envAlias = "envAlias"
+		size     = 5
+		ttl      = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	ctx := context.Background()
+
+	t.Run("Get from cache", func(t *testing.T) {
+		envs := &mocksenvironments.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+		envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{envAlias}}, nil).Once()
+
+		v1, err := svc.Get(ctx, spaceID, envID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, spaceID, envID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кэша при повторном запросе по ID.")
+
+		v3, err := svc.Get(ctx, spaceID, envAlias)
+		require.NoError(t, err)
+		assert.Same(t, v3, v2, "Ожидается получение объекта из кеша, при запросе того же объекта по alias окружения.")
+
+		envs.AssertExpectations(t)
+	})
+
+	t.Run("Get from cache(by Alias)", func(t *testing.T) {
+		envs := &mocksenvironments.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+		envs.On("Get", mock.Anything, spaceID, envAlias).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{envAlias}}, nil).Once()
+
+		v1, err := svc.Get(ctx, spaceID, envAlias)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, spaceID, envAlias)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кэша по alias.")
+
+		v3, err := svc.Get(ctx, spaceID, envID)
+		require.NoError(t, err)
+		assert.Same(t, v3, v2, "Ожидается получение объекта из кеша, при запросе того же объекта по ID окружения.")
+
+		envs.AssertExpectations(t)
+	})
+
+	t.Run("List from cache", func(t *testing.T) {
+		envs := &mocksenvironments.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+		envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment"}}, nil).Once()
+
+		vl1, err := svc.List(ctx, spaceID)
+		require.NoError(t, err)
+
+		vl2, err := svc.List(ctx, spaceID)
+		require.NoError(t, err)
+		assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша.")
+
+		envs.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("After SetAlias", func(t *testing.T) {
+			envs := &mocksenvironments.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+			envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{"envID2"}}, nil).Once()
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{"envID2"}}}, nil).Once()
+			envs.On("SetAlias", mock.Anything, spaceID, envID, envAlias).Return(nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша.")
+
+			err = svc.SetAlias(ctx, spaceID, envID, envAlias)
+			require.NoError(t, err)
+
+			envs.On("Get", mock.Anything, spaceID, envAlias).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{"envID2", envAlias}}, nil).Once()
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{"envID2", envAlias}}}, nil).Once()
+
+			v4, err := svc.Get(ctx, spaceID, envAlias)
+			require.NoError(t, err)
+			assert.Contains(t, v4.Aliases, envAlias, "Ожидает что элемент будет запрошен из сервиса по Alias.")
+
+			v5, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+			assert.Same(t, v4, v5, "Ожидается получение объекта из кэша по ID.")
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидает что объекты будут удалены из кэша и запрошены из сервиса.")
+
+			envs.AssertExpectations(t)
+		})
+
+		t.Run("After RemoveAlias", func(t *testing.T) {
+			envs := &mocksenvironments.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+			envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{"envID2", envAlias}}, nil).Once()
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{"envID2", envAlias}}}, nil).Once()
+			envs.On("RemoveAlias", mock.Anything, spaceID, envID, envAlias).Return(nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша по ID.")
+
+			v3, err := svc.Get(ctx, spaceID, envAlias)
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кэша по Alias.")
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша.")
+
+			err = svc.RemoveAlias(ctx, spaceID, envID, envAlias)
+			require.NoError(t, err)
+
+			envs.On("Get", mock.Anything, spaceID, envAlias).Return(nil, errNotFound).Once()
+			envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{"envID2"}}, nil).Once()
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{"envID2"}}}, nil).Once()
+
+			_, err = svc.Get(ctx, spaceID, envAlias)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидает что элемент был удален из кеша и сервис вернул ошибку на запрос по несуществующему Alias.")
+
+			v4, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v4, "Ожидает что элемент был удален из кеша и получен из сервиса по ID.")
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидает что объекты будут удалены из кэша и запрошены из сервиса.")
+
+			envs.AssertExpectations(t)
+		})
+
+		t.Run("After Update", func(t *testing.T) {
+			envs := &mocksenvironments.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+			envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{envAlias}}, nil).Once()
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{envAlias}}}, nil).Once()
+			envs.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			v3, err := svc.Get(ctx, spaceID, envAlias)
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кэша по Alias.")
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша.")
+
+			err = svc.Update(ctx, &environments.Environment{ID: envID, SpaceID: spaceID, Description: "EnvironmentUPD", Aliases: []string{envAlias}})
+			require.NoError(t, err)
+
+			envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "EnvironmentUPD", Aliases: []string{envAlias}}, nil).Once()
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "EnvironmentUPD", Aliases: []string{envAlias}}}, nil).Once()
+
+			_, err = svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+
+			v4, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v4, "Ожидает что элемент был удален из кэша и будет запрошен заново из сервиса.")
+
+			v5, err := svc.Get(ctx, spaceID, envAlias)
+			require.NoError(t, err)
+			assert.Same(t, v4, v5, "Ожидается получение объекта из кэша по Alias после обновления объекта и получения по ID.")
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидает что объекты будут удалены из кэша и запрошены из сервиса.")
+
+			envs.AssertExpectations(t)
+		})
+
+		t.Run("After Update(List)", func(t *testing.T) {
+			envs := &mocksenvironments.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{envAlias}}}, nil).Once()
+			envs.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша.")
+
+			err = svc.Update(ctx, &environments.Environment{ID: envID, SpaceID: spaceID, Description: "EnvironmentUPD", Aliases: []string{envAlias}})
+			require.NoError(t, err)
+
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "EnvironmentUPD", Aliases: []string{envAlias}}}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидает что объекты будут удалены из кэша и запрошены из сервиса.")
+
+			envs.AssertExpectations(t)
+		})
+
+		t.Run("After Delete", func(t *testing.T) {
+			envs := &mocksenvironments.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+			envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{envAlias}}, nil).Once()
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment", Aliases: []string{envAlias}}}, nil).Once()
+			envs.On("Delete", mock.Anything, spaceID, envID).Return(nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			v3, err := svc.Get(ctx, spaceID, envAlias)
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кэша по Alias.")
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша.")
+
+			err = svc.Delete(ctx, spaceID, envID)
+			require.NoError(t, err)
+
+			envs.On("Get", mock.Anything, spaceID, envID).Return(nil, errNotFound).Once()
+			envs.On("Get", mock.Anything, spaceID, envAlias).Return(nil, errNotFound).Once()
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{}, nil).Once()
+
+			_, err = svc.Get(ctx, spaceID, envID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидает что элемент был удален из кэша по ID и получена ошибка от сервиса.")
+
+			_, err = svc.Get(ctx, spaceID, envAlias)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидает что элемент был удален  из кэша по Alias и получена ошибка от сервиса.")
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Len(t, vl3, 0, "Ожидает что объекты будут удалены из кэша и запрошены из сервиса.")
+
+			envs.AssertExpectations(t)
+		})
+
+		t.Run("After Create", func(t *testing.T) {
+			envs := &mocksenvironments.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment"}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается получение объектов из кэша.")
+
+			envs.On("Create", mock.Anything, mock.Anything).Return(&environments.Environment{ID: "envID2", SpaceID: spaceID, Description: "Environment2"}, nil).Once()
+			_, err = svc.Create(ctx, &environments.Environment{ID: "envID2", SpaceID: spaceID, Description: "Environment2"})
+			require.NoError(t, err)
+
+			envs.On("List", mock.Anything, spaceID).Return([]*environments.Environment{{ID: envID, SpaceID: spaceID, Description: "Environment"}, {ID: "envID2", SpaceID: spaceID, Description: "Environment2"}}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Len(t, vl3, 2, "Ожидает что объекты были удалены из кэша и запрошены заново из сервиса.")
+
+			envs.AssertExpectations(t)
+		})
+
+		t.Run("After size exceeded", func(t *testing.T) {
+			envs := &mocksenvironments.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(1, ttl))(envs)
+
+			envs.On("Get", mock.Anything, spaceID, "envID2").Return(&environments.Environment{ID: "envID2", SpaceID: spaceID, Description: "Environment2"}, nil).Once()
+			envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment"}, nil).Once()
+			envs.On("Get", mock.Anything, spaceID, "envID2").Return(&environments.Environment{ID: "envID2", SpaceID: spaceID, Description: "Environment2"}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, "envID2")
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, "envID2")
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			_, err = svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+
+			v5, err := svc.Get(ctx, spaceID, "envID2")
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v5, "Ожидает что объект был удален из кэша и будет запрошен заново из сервиса.")
+
+			envs.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			envs := &mocksenvironments.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(envs)
+
+			envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment"}, nil).Once()
+			v1, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			time.Sleep(2 * ttl)
+
+			envs.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Description: "Environment"}, nil).Once()
+			v3, err := svc.Get(ctx, spaceID, envID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидает что объект был удален из кэша и будет запрошен заново из сервиса.")
+
+			envs.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/environments/middleware/error_logging_middleware.go b/pkg/environments/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..6d6cb544f129f2925d33be91f6666dcf8ebb8936
--- /dev/null
+++ b/pkg/environments/middleware/error_logging_middleware.go
@@ -0,0 +1,110 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/environments -i Environments -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/environments"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements environments.Environments that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   environments.Environments
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the environments.Environments with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next environments.Environments) environments.Environments {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, env *environments.Environment) (created *environments.Environment, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, env)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, spaceId string, envId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, spaceId, envId)
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, spaceId string, envId string) (env *environments.Environment, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, spaceId, envId)
+}
+
+func (m *errorLoggingMiddleware) List(ctx context.Context, spaceId string) (envs []*environments.Environment, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.List(ctx, spaceId)
+}
+
+func (m *errorLoggingMiddleware) Migrate(ctx context.Context, spaceId string, envId string, options ...*environments.MigrateOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Migrate(ctx, spaceId, envId, options...)
+}
+
+func (m *errorLoggingMiddleware) RemoveAlias(ctx context.Context, spaceId string, envId string, alias string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.RemoveAlias(ctx, spaceId, envId, alias)
+}
+
+func (m *errorLoggingMiddleware) SetAlias(ctx context.Context, spaceId string, envId string, alias string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.SetAlias(ctx, spaceId, envId, alias)
+}
+
+func (m *errorLoggingMiddleware) Update(ctx context.Context, env *environments.Environment) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Update(ctx, env)
+}
diff --git a/pkg/environments/middleware/logging_middleware.go b/pkg/environments/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..26f4eb279126e6a008db8998b83012c8a316ca2f
--- /dev/null
+++ b/pkg/environments/middleware/logging_middleware.go
@@ -0,0 +1,325 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/environments -i Environments -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/environments"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements environments.Environments that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   environments.Environments
+}
+
+// LoggingMiddleware instruments an implementation of the environments.Environments with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next environments.Environments) environments.Environments {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, env *environments.Environment) (created *environments.Environment, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx": ctx,
+		"env": env} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	created, err = m.next.Create(ctx, env)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"created": created,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return created, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, spaceId string, envId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"envId":   envId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, spaceId, envId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, spaceId string, envId string) (env *environments.Environment, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"envId":   envId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	env, err = m.next.Get(ctx, spaceId, envId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"env": env,
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return env, err
+}
+
+func (m *loggingMiddleware) List(ctx context.Context, spaceId string) (envs []*environments.Environment, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Request", fields...)
+
+	envs, err = m.next.List(ctx, spaceId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"envs": envs,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Response", fields...)
+
+	return envs, err
+}
+
+func (m *loggingMiddleware) Migrate(ctx context.Context, spaceId string, envId string, options ...*environments.MigrateOptions) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"envId":   envId,
+		"options": options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Migrate.Request", fields...)
+
+	err = m.next.Migrate(ctx, spaceId, envId, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Migrate.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) RemoveAlias(ctx context.Context, spaceId string, envId string, alias string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"envId":   envId,
+		"alias":   alias} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("RemoveAlias.Request", fields...)
+
+	err = m.next.RemoveAlias(ctx, spaceId, envId, alias)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("RemoveAlias.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) SetAlias(ctx context.Context, spaceId string, envId string, alias string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"envId":   envId,
+		"alias":   alias} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("SetAlias.Request", fields...)
+
+	err = m.next.SetAlias(ctx, spaceId, envId, alias)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("SetAlias.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Update(ctx context.Context, env *environments.Environment) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx": ctx,
+		"env": env} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Request", fields...)
+
+	err = m.next.Update(ctx, env)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Response", fields...)
+
+	return err
+}
diff --git a/pkg/environments/middleware/middleware.go b/pkg/environments/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..b5e29a99db170df7d2cc5003b2d9a206a423fef2
--- /dev/null
+++ b/pkg/environments/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/environments -i Environments -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/environments"
+	"go.uber.org/zap"
+)
+
+type Middleware func(environments.Environments) environments.Environments
+
+func WithLog(s environments.Environments, logger *zap.Logger, log_access bool) environments.Environments {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Environments")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/environments/middleware/recovering_middleware.go b/pkg/environments/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..bf4bec7d46fa8b3e963c65330c6883c9c7fb0c20
--- /dev/null
+++ b/pkg/environments/middleware/recovering_middleware.go
@@ -0,0 +1,127 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/environments -i Environments -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/environments"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements environments.Environments that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   environments.Environments
+}
+
+// RecoveringMiddleware instruments an implementation of the environments.Environments with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next environments.Environments) environments.Environments {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, env *environments.Environment) (created *environments.Environment, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, env)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, spaceId string, envId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, spaceId, envId)
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, spaceId string, envId string) (env *environments.Environment, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, spaceId, envId)
+}
+
+func (m *recoveringMiddleware) List(ctx context.Context, spaceId string) (envs []*environments.Environment, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.List(ctx, spaceId)
+}
+
+func (m *recoveringMiddleware) Migrate(ctx context.Context, spaceId string, envId string, options ...*environments.MigrateOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Migrate(ctx, spaceId, envId, options...)
+}
+
+func (m *recoveringMiddleware) RemoveAlias(ctx context.Context, spaceId string, envId string, alias string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.RemoveAlias(ctx, spaceId, envId, alias)
+}
+
+func (m *recoveringMiddleware) SetAlias(ctx context.Context, spaceId string, envId string, alias string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.SetAlias(ctx, spaceId, envId, alias)
+}
+
+func (m *recoveringMiddleware) Update(ctx context.Context, env *environments.Environment) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Update(ctx, env)
+}
diff --git a/pkg/events/events.go b/pkg/events/events.go
new file mode 100644
index 0000000000000000000000000000000000000000..454f690a196b08782d8110b0c2c13b186853a776
--- /dev/null
+++ b/pkg/events/events.go
@@ -0,0 +1,55 @@
+package events
+
+type Subscription interface {
+	Unsubscribe() error
+}
+
+type Connection interface {
+	Publish(subject string, msg any, opts ...PublishOption) error
+	Subscribe(subject string, handler any, opts ...SubscribeOption) (Subscription, error)
+	Close() error
+}
+
+type PublishOptions struct {
+	Tags []string
+}
+
+func NewPublishOptions(opts ...PublishOption) *PublishOptions {
+	o := &PublishOptions{}
+	for _, opt := range opts {
+		if opt != nil {
+			opt(o)
+		}
+	}
+	return o
+}
+
+type PublishOption func(options *PublishOptions)
+
+func Tag(tag ...string) PublishOption {
+	return func(o *PublishOptions) {
+		o.Tags = tag
+	}
+}
+
+type SubscribeOptions struct {
+	FilterTags []string
+}
+
+func NewSubscribeOptions(opts ...SubscribeOption) *SubscribeOptions {
+	o := &SubscribeOptions{}
+	for _, opt := range opts {
+		if opt != nil {
+			opt(o)
+		}
+	}
+	return o
+}
+
+type SubscribeOption func(options *SubscribeOptions)
+
+func FilterTag(tag ...string) SubscribeOption {
+	return func(o *SubscribeOptions) {
+		o.FilterTags = tag
+	}
+}
diff --git a/pkg/events/mocks/Connection.go b/pkg/events/mocks/Connection.go
new file mode 100644
index 0000000000000000000000000000000000000000..a295924662ba10243a788c219359d60720789759
--- /dev/null
+++ b/pkg/events/mocks/Connection.go
@@ -0,0 +1,96 @@
+// Code generated by mockery v2.20.0. DO NOT EDIT.
+
+package mocks
+
+import (
+	events "git.perx.ru/perxis/perxis-go/pkg/events"
+	mock "github.com/stretchr/testify/mock"
+)
+
+// Connection is an autogenerated mock type for the Connection type
+type Connection struct {
+	mock.Mock
+}
+
+// Close provides a mock function with given fields:
+func (_m *Connection) Close() error {
+	ret := _m.Called()
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func() error); ok {
+		r0 = rf()
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Publish provides a mock function with given fields: subject, msg, opts
+func (_m *Connection) Publish(subject string, msg interface{}, opts ...events.PublishOption) error {
+	_va := make([]interface{}, len(opts))
+	for _i := range opts {
+		_va[_i] = opts[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, subject, msg)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(string, interface{}, ...events.PublishOption) error); ok {
+		r0 = rf(subject, msg, opts...)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Subscribe provides a mock function with given fields: subject, handler, opts
+func (_m *Connection) Subscribe(subject string, handler interface{}, opts ...events.SubscribeOption) (events.Subscription, error) {
+	_va := make([]interface{}, len(opts))
+	for _i := range opts {
+		_va[_i] = opts[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, subject, handler)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 events.Subscription
+	var r1 error
+	if rf, ok := ret.Get(0).(func(string, interface{}, ...events.SubscribeOption) (events.Subscription, error)); ok {
+		return rf(subject, handler, opts...)
+	}
+	if rf, ok := ret.Get(0).(func(string, interface{}, ...events.SubscribeOption) events.Subscription); ok {
+		r0 = rf(subject, handler, opts...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(events.Subscription)
+		}
+	}
+
+	if rf, ok := ret.Get(1).(func(string, interface{}, ...events.SubscribeOption) error); ok {
+		r1 = rf(subject, handler, opts...)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+type mockConstructorTestingTNewConnection interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewConnection creates a new instance of Connection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewConnection(t mockConstructorTestingTNewConnection) *Connection {
+	mock := &Connection{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/events/mocks/MsgFilter.go b/pkg/events/mocks/MsgFilter.go
new file mode 100644
index 0000000000000000000000000000000000000000..8e1340743309bfc3097e478e7aac7f1880bfb157
--- /dev/null
+++ b/pkg/events/mocks/MsgFilter.go
@@ -0,0 +1,44 @@
+// Code generated by mockery v2.20.0. DO NOT EDIT.
+
+package mocks
+
+import (
+	nats "github.com/nats-io/nats.go"
+	mock "github.com/stretchr/testify/mock"
+)
+
+// MsgFilter is an autogenerated mock type for the MsgFilter type
+type MsgFilter struct {
+	mock.Mock
+}
+
+// Execute provides a mock function with given fields: _a0
+func (_m *MsgFilter) Execute(_a0 *nats.Msg) *nats.Msg {
+	ret := _m.Called(_a0)
+
+	var r0 *nats.Msg
+	if rf, ok := ret.Get(0).(func(*nats.Msg) *nats.Msg); ok {
+		r0 = rf(_a0)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(*nats.Msg)
+		}
+	}
+
+	return r0
+}
+
+type mockConstructorTestingTNewMsgFilter interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewMsgFilter creates a new instance of MsgFilter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewMsgFilter(t mockConstructorTestingTNewMsgFilter) *MsgFilter {
+	mock := &MsgFilter{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/events/mocks/ProtoEncoder.go b/pkg/events/mocks/ProtoEncoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..f0916a3c720b41de88ba029c2d44e23bffbb42c7
--- /dev/null
+++ b/pkg/events/mocks/ProtoEncoder.go
@@ -0,0 +1,68 @@
+// Code generated by mockery v2.20.0. DO NOT EDIT.
+
+package mocks
+
+import (
+	mock "github.com/stretchr/testify/mock"
+	protoiface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// ProtoEncoder is an autogenerated mock type for the ProtoEncoder type
+type ProtoEncoder struct {
+	mock.Mock
+}
+
+// FromProto provides a mock function with given fields: message
+func (_m *ProtoEncoder) FromProto(message protoiface.MessageV1) error {
+	ret := _m.Called(message)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(protoiface.MessageV1) error); ok {
+		r0 = rf(message)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// ToProto provides a mock function with given fields:
+func (_m *ProtoEncoder) ToProto() (protoiface.MessageV1, error) {
+	ret := _m.Called()
+
+	var r0 protoiface.MessageV1
+	var r1 error
+	if rf, ok := ret.Get(0).(func() (protoiface.MessageV1, error)); ok {
+		return rf()
+	}
+	if rf, ok := ret.Get(0).(func() protoiface.MessageV1); ok {
+		r0 = rf()
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(protoiface.MessageV1)
+		}
+	}
+
+	if rf, ok := ret.Get(1).(func() error); ok {
+		r1 = rf()
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+type mockConstructorTestingTNewProtoEncoder interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewProtoEncoder creates a new instance of ProtoEncoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewProtoEncoder(t mockConstructorTestingTNewProtoEncoder) *ProtoEncoder {
+	mock := &ProtoEncoder{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/events/mocks/PublishOption.go b/pkg/events/mocks/PublishOption.go
new file mode 100644
index 0000000000000000000000000000000000000000..f3517b7602f5b9714cc9de98ac6413bf324f91e8
--- /dev/null
+++ b/pkg/events/mocks/PublishOption.go
@@ -0,0 +1,33 @@
+// Code generated by mockery v2.20.0. DO NOT EDIT.
+
+package mocks
+
+import (
+	events "git.perx.ru/perxis/perxis-go/pkg/events"
+	mock "github.com/stretchr/testify/mock"
+)
+
+// PublishOption is an autogenerated mock type for the PublishOption type
+type PublishOption struct {
+	mock.Mock
+}
+
+// Execute provides a mock function with given fields: options
+func (_m *PublishOption) Execute(options *events.PublishOptions) {
+	_m.Called(options)
+}
+
+type mockConstructorTestingTNewPublishOption interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewPublishOption creates a new instance of PublishOption. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewPublishOption(t mockConstructorTestingTNewPublishOption) *PublishOption {
+	mock := &PublishOption{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/events/mocks/SubscribeOption.go b/pkg/events/mocks/SubscribeOption.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b2a9449f517c4d0881a53a64194139d50203961
--- /dev/null
+++ b/pkg/events/mocks/SubscribeOption.go
@@ -0,0 +1,33 @@
+// Code generated by mockery v2.20.0. DO NOT EDIT.
+
+package mocks
+
+import (
+	events "git.perx.ru/perxis/perxis-go/pkg/events"
+	mock "github.com/stretchr/testify/mock"
+)
+
+// SubscribeOption is an autogenerated mock type for the SubscribeOption type
+type SubscribeOption struct {
+	mock.Mock
+}
+
+// Execute provides a mock function with given fields: options
+func (_m *SubscribeOption) Execute(options *events.SubscribeOptions) {
+	_m.Called(options)
+}
+
+type mockConstructorTestingTNewSubscribeOption interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewSubscribeOption creates a new instance of SubscribeOption. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewSubscribeOption(t mockConstructorTestingTNewSubscribeOption) *SubscribeOption {
+	mock := &SubscribeOption{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/events/mocks/Subscription.go b/pkg/events/mocks/Subscription.go
new file mode 100644
index 0000000000000000000000000000000000000000..b43ed0fa5e55b3d026a6d682b44facb0f60c1ed3
--- /dev/null
+++ b/pkg/events/mocks/Subscription.go
@@ -0,0 +1,39 @@
+// Code generated by mockery v2.20.0. DO NOT EDIT.
+
+package mocks
+
+import mock "github.com/stretchr/testify/mock"
+
+// Subscription is an autogenerated mock type for the Subscription type
+type Subscription struct {
+	mock.Mock
+}
+
+// Unsubscribe provides a mock function with given fields:
+func (_m *Subscription) Unsubscribe() error {
+	ret := _m.Called()
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func() error); ok {
+		r0 = rf()
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+type mockConstructorTestingTNewSubscription interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewSubscription creates a new instance of Subscription. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewSubscription(t mockConstructorTestingTNewSubscription) *Subscription {
+	mock := &Subscription{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/events/nats.go b/pkg/events/nats.go
new file mode 100644
index 0000000000000000000000000000000000000000..4540a48216432f2332c4a9913051b3d8934127ed
--- /dev/null
+++ b/pkg/events/nats.go
@@ -0,0 +1,197 @@
+package events
+
+import (
+	"reflect"
+
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"github.com/nats-io/nats.go"
+)
+
+type natsConnetion struct {
+	Conn *nats.Conn
+	enc  nats.Encoder
+	// добавление префикса для всех топиков
+	prefix string
+}
+
+func Open(url string, prefix string) (Connection, error) {
+	var err error
+	b := new(natsConnetion)
+	b.Conn, err = nats.Connect(url)
+	if err != nil {
+		return nil, err
+	}
+	b.enc = &ProtobufEncoder{}
+	b.prefix = prefix
+	return b, nil
+}
+
+func (c *natsConnetion) getSubject(subject string) string {
+	if c.prefix != "" {
+		subject = c.prefix + "." + subject
+	}
+	return subject
+}
+
+func (c *natsConnetion) Publish(subject string, msg any, opts ...PublishOption) error {
+	m := &nats.Msg{Subject: c.getSubject(subject)}
+	switch v := msg.(type) {
+	case *nats.Msg:
+		m = v
+	case []byte:
+		m.Data = v
+	default:
+		data, err := c.enc.Encode(subject, v)
+		if err != nil {
+			return err
+		}
+		m.Data = data
+	}
+
+	filters := PublishFilters(NewPublishOptions(opts...))
+	if len(filters) > 0 {
+		for _, f := range filters {
+			if m = f(m); m == nil {
+				return nil
+			}
+		}
+	}
+
+	return c.Conn.PublishMsg(m)
+}
+
+func (c *natsConnetion) Subscribe(subject string, handler any, opts ...SubscribeOption) (Subscription, error) {
+
+	subject = c.getSubject(subject)
+	return c.subscribe(subject, handler, SubscribeFilters(NewSubscribeOptions(opts...)))
+}
+
+func (c *natsConnetion) Close() (err error) {
+	if err = c.Conn.Drain(); err != nil {
+		return err
+	}
+	c.Conn.Close()
+	return
+}
+
+// Dissect the cb Handler's signature
+func argInfo(cb nats.Handler) (reflect.Type, int) {
+	cbType := reflect.TypeOf(cb)
+	if cbType.Kind() != reflect.Func {
+		panic("handler needs to be a func")
+	}
+	numArgs := cbType.NumIn()
+	if numArgs == 0 {
+		return nil, numArgs
+	}
+	return cbType.In(numArgs - 1), numArgs
+}
+
+var emptyMsgType = reflect.TypeOf(&nats.Msg{})
+
+type MsgFilter func(*nats.Msg) *nats.Msg
+
+// Internal implementation that all public functions will use.
+func (c *natsConnetion) subscribe(subject string, cb nats.Handler, filters []MsgFilter) (*nats.Subscription, error) {
+	if cb == nil {
+		return nil, errors.New("handler required for subscription")
+	}
+	argType, numArgs := argInfo(cb)
+	if argType == nil {
+		return nil, errors.New("handler requires at least one argument")
+	}
+
+	cbValue := reflect.ValueOf(cb)
+	wantsRaw := (argType == emptyMsgType)
+
+	natsCB := func(m *nats.Msg) {
+		if len(filters) > 0 {
+			for _, f := range filters {
+				if m = f(m); m == nil {
+					return
+				}
+			}
+		}
+
+		var oV []reflect.Value
+		if wantsRaw {
+			oV = []reflect.Value{reflect.ValueOf(m)}
+		} else {
+			var oPtr reflect.Value
+			if argType.Kind() != reflect.Ptr {
+				oPtr = reflect.New(argType)
+			} else {
+				oPtr = reflect.New(argType.Elem())
+			}
+			if err := c.enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil {
+				if errorHandler := c.Conn.ErrorHandler(); errorHandler != nil {
+					errorHandler(c.Conn, m.Sub, errors.Wrap(err, "Got an unmarshal error"))
+				}
+				return
+			}
+			if argType.Kind() != reflect.Ptr {
+				oPtr = reflect.Indirect(oPtr)
+			}
+
+			switch numArgs {
+			case 1:
+				oV = []reflect.Value{oPtr}
+			case 2:
+				subV := reflect.ValueOf(m.Subject)
+				oV = []reflect.Value{subV, oPtr}
+			case 3:
+				subV := reflect.ValueOf(m.Subject)
+				replyV := reflect.ValueOf(m.Reply)
+				oV = []reflect.Value{subV, replyV, oPtr}
+			}
+
+		}
+		cbValue.Call(oV)
+	}
+
+	return c.Conn.Subscribe(subject, natsCB)
+}
+
+func PublishFilters(opts *PublishOptions) []MsgFilter {
+	if opts == nil {
+		return nil
+	}
+	var filters []MsgFilter
+
+	if len(opts.Tags) > 0 {
+		filters = append(filters, func(msg *nats.Msg) *nats.Msg {
+			if msg.Header == nil {
+				msg.Header = make(nats.Header)
+			}
+			for _, v := range opts.Tags {
+				msg.Header.Add("Tag", v)
+			}
+			return msg
+		})
+	}
+
+	return filters
+}
+
+func SubscribeFilters(opts *SubscribeOptions) []MsgFilter {
+	if opts == nil {
+		return nil
+	}
+	var filters []MsgFilter
+
+	if len(opts.FilterTags) > 0 {
+		filters = append(filters, func(msg *nats.Msg) *nats.Msg {
+			tags := msg.Header.Values("Tag")
+			for _, tag := range tags {
+				for _, v := range opts.FilterTags {
+					if v == tag {
+						return msg
+					}
+				}
+			}
+			return nil
+		})
+	}
+
+	return filters
+}
diff --git a/pkg/events/nats_integration_test.go b/pkg/events/nats_integration_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8fe38dbe1080d6a1ec7acdc9c565afd4c3852cdc
--- /dev/null
+++ b/pkg/events/nats_integration_test.go
@@ -0,0 +1,82 @@
+//go:build integration
+
+package events
+
+import (
+	"testing"
+	"time"
+
+	pb "git.perx.ru/perxis/perxis-go/pkg/events/test_proto"
+	"github.com/golang/protobuf/proto"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+type Test struct {
+	Text string
+}
+
+func (t *Test) ToProto() (proto.Message, error) {
+	return &pb.Test{Text: t.Text}, nil
+}
+
+func (t *Test) FromProto(message proto.Message) error {
+	t.Text = message.(*pb.Test).Text
+	return nil
+}
+
+func TestNatsBroker(t *testing.T) {
+
+	b, err := Open("nats://localhost:4222", "")
+	require.NoError(t, err)
+
+	resCh := make(chan string, 3)
+	_, err = b.Subscribe("a.*.c.>", func(t *Test) { resCh <- t.Text })
+	require.NoError(t, err)
+
+	require.NoError(t, b.Publish("a.b.c", &Test{Text: "1"}))
+	require.NoError(t, b.Publish("a.b.c.d", &Test{Text: "2"}))
+	require.NoError(t, b.Publish("a.b.c.d.e", &Test{Text: "3"}))
+	require.NoError(t, b.Publish("a.x.c", &Test{Text: "4"}))
+	require.NoError(t, b.Publish("a.x.c.d", &Test{Text: "5"}))
+
+	time.Sleep(200 * time.Millisecond)
+	require.NoError(t, b.Close())
+	close(resCh)
+	assert.ElementsMatch(t, []string{"2", "3", "5"}, func() []string {
+		var res []string
+		for v := range resCh {
+			res = append(res, v)
+		}
+		return res
+	}())
+}
+
+func TestTags(t *testing.T) {
+
+	b, err := Open("nats://localhost:4222", "")
+	require.NoError(t, err)
+
+	resCh := make(chan string, 3)
+	_, err = b.Subscribe("a.*.c.>", func(t *Test) { resCh <- t.Text }, FilterTag("one", "two", "three"))
+	require.NoError(t, err)
+
+	require.NoError(t, b.Publish("a.b.c", &Test{Text: "1"}))
+	require.NoError(t, b.Publish("a.b.c.d", &Test{Text: "2"}))
+	require.NoError(t, b.Publish("a.b.c.d.e", &Test{Text: "3"}, Tag("one")))
+	require.NoError(t, b.Publish("a.x.c", &Test{Text: "4"}))
+	require.NoError(t, b.Publish("a.x.c.d", &Test{Text: "5"}, Tag("two")))
+	require.NoError(t, b.Publish("a.x.c.d", &Test{Text: "6"}, Tag("two", "one")))
+	require.NoError(t, b.Publish("a.x.c.d", &Test{Text: "7"}, Tag("four")))
+
+	time.Sleep(200 * time.Millisecond)
+	require.NoError(t, b.Close())
+	close(resCh)
+	assert.ElementsMatch(t, []string{"3", "5", "6"}, func() []string {
+		var res []string
+		for v := range resCh {
+			res = append(res, v)
+		}
+		return res
+	}())
+}
diff --git a/pkg/events/proto_encoder.go b/pkg/events/proto_encoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..f18c2f576daddd4af61c9ca8138bbbc1903ad280
--- /dev/null
+++ b/pkg/events/proto_encoder.go
@@ -0,0 +1,63 @@
+package events
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"github.com/golang/protobuf/proto"
+	"github.com/nats-io/nats.go"
+	"github.com/nats-io/nats.go/encoders/protobuf"
+)
+
+type ProtoEncoder interface {
+	ToProto() (proto.Message, error)
+	FromProto(message proto.Message) error
+}
+
+const (
+	ProtobufEncoderName = "protobuf"
+)
+
+func init() {
+	nats.RegisterEncoder(ProtobufEncoderName, &ProtobufEncoder{})
+}
+
+type ProtobufEncoder struct {
+	protobuf.ProtobufEncoder
+}
+
+var (
+	ErrInvalidProtoMsgEncode = errors.New("events: object passed to encode must implement ProtoEncoder")
+	ErrInvalidProtoMsgDecode = errors.New("events: object passed to decode must implement ProtoDecoder")
+)
+
+func (pb *ProtobufEncoder) Encode(subject string, v interface{}) ([]byte, error) {
+	if v == nil {
+		return nil, nil
+	}
+	e, ok := v.(ProtoEncoder)
+	if !ok {
+		return nil, ErrInvalidProtoMsgEncode
+	}
+
+	m, err := e.ToProto()
+	if err != nil {
+		return nil, errors.Wrap(err, "nats: encode to proto")
+	}
+
+	return pb.ProtobufEncoder.Encode(subject, m)
+}
+
+func (pb *ProtobufEncoder) Decode(subject string, data []byte, vPtr interface{}) error {
+
+	enc, ok := vPtr.(ProtoEncoder)
+	if !ok {
+		return ErrInvalidProtoMsgDecode
+	}
+
+	msg, _ := enc.ToProto()
+
+	if err := pb.ProtobufEncoder.Decode(subject, data, msg); err != nil {
+		return err
+	}
+
+	return enc.FromProto(msg)
+}
diff --git a/pkg/events/test_proto/test.pb.go b/pkg/events/test_proto/test.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..de333160b391355e2d56b0976e547d58f63a62e1
--- /dev/null
+++ b/pkg/events/test_proto/test.pb.go
@@ -0,0 +1,143 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.27.1
+// 	protoc        v3.21.5
+// source: test.proto
+
+package test_proto
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Test struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
+}
+
+func (x *Test) Reset() {
+	*x = Test{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_test_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Test) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Test) ProtoMessage() {}
+
+func (x *Test) ProtoReflect() protoreflect.Message {
+	mi := &file_test_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Test.ProtoReflect.Descriptor instead.
+func (*Test) Descriptor() ([]byte, []int) {
+	return file_test_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Test) GetText() string {
+	if x != nil {
+		return x.Text
+	}
+	return ""
+}
+
+var File_test_proto protoreflect.FileDescriptor
+
+var file_test_proto_rawDesc = []byte{
+	0x0a, 0x0a, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x74, 0x65,
+	0x73, 0x74, 0x22, 0x1a, 0x0a, 0x04, 0x54, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65,
+	0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x42, 0x38,
+	0x5a, 0x36, 0x67, 0x69, 0x74, 0x2e, 0x70, 0x65, 0x72, 0x78, 0x2e, 0x72, 0x75, 0x2f, 0x70, 0x65,
+	0x72, 0x78, 0x69, 0x73, 0x2f, 0x70, 0x65, 0x72, 0x78, 0x69, 0x73, 0x2f, 0x62, 0x72, 0x6f, 0x6b,
+	0x65, 0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x74, 0x65,
+	0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_test_proto_rawDescOnce sync.Once
+	file_test_proto_rawDescData = file_test_proto_rawDesc
+)
+
+func file_test_proto_rawDescGZIP() []byte {
+	file_test_proto_rawDescOnce.Do(func() {
+		file_test_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_proto_rawDescData)
+	})
+	return file_test_proto_rawDescData
+}
+
+var file_test_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_test_proto_goTypes = []interface{}{
+	(*Test)(nil), // 0: test.Test
+}
+var file_test_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_test_proto_init() }
+func file_test_proto_init() {
+	if File_test_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_test_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Test); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_test_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_test_proto_goTypes,
+		DependencyIndexes: file_test_proto_depIdxs,
+		MessageInfos:      file_test_proto_msgTypes,
+	}.Build()
+	File_test_proto = out.File
+	file_test_proto_rawDesc = nil
+	file_test_proto_goTypes = nil
+	file_test_proto_depIdxs = nil
+}
diff --git a/pkg/events/test_proto/test.proto b/pkg/events/test_proto/test.proto
new file mode 100644
index 0000000000000000000000000000000000000000..fecbc9d39bf39c65d97dc8d21cba8933a4243450
--- /dev/null
+++ b/pkg/events/test_proto/test.proto
@@ -0,0 +1,9 @@
+syntax = "proto3";
+
+option go_package = "git.perx.ru/perxis/perxis-go/broker/test_proto;test_proto";
+
+package test;
+
+message Test {
+  string text = 1;
+}
diff --git a/pkg/filter/filter.go b/pkg/filter/filter.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea2f1d436aba0ecced0a6473440e9fb4a782664d
--- /dev/null
+++ b/pkg/filter/filter.go
@@ -0,0 +1,410 @@
+package filter
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/field"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/validate"
+	"github.com/hashicorp/go-multierror"
+	"github.com/mitchellh/mapstructure"
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/x/bsonx"
+)
+
+type Op string
+
+const (
+	Equal          Op = "eq"
+	NotEqual       Op = "neq"
+	Less           Op = "lt"
+	LessOrEqual    Op = "lte"
+	Greater        Op = "gt"
+	GreaterOrEqual Op = "gte"
+	In             Op = "in"
+	NotIn          Op = "nin"
+	Contains       Op = "contains"
+	NotContains    Op = "ncontains"
+	Or             Op = "or"
+	And            Op = "and"
+	Near           Op = "near"
+)
+
+type Filter struct {
+	Op    Op
+	Field string
+	Value interface{}
+}
+
+func (f Filter) Format(s fmt.State, verb rune) {
+	fmt.Fprintf(s, "{Op:%s Field:%s Value:%+v}", f.Op, f.Field, f.Value)
+}
+
+func NewFilter(op Op, field string, val interface{}) *Filter {
+	return &Filter{
+		Op:    op,
+		Field: field,
+		Value: val,
+	}
+}
+
+type FilterHandler struct {
+	schemas  []*schema.Schema
+	qbuilder QueryBuilder
+	prefix   string
+}
+
+func NewFilterHandler(sch ...*schema.Schema) *FilterHandler {
+	return &FilterHandler{
+		schemas: sch,
+		//qbuilder: qb,
+	}
+}
+
+func (h *FilterHandler) SetTrimPrefix(prefix string) *FilterHandler {
+	h.prefix = prefix
+	return h
+}
+
+func (h *FilterHandler) removeFieldPrefix(f string) string {
+	if h.prefix != "" {
+		return strings.TrimPrefix(f, h.prefix+".")
+	}
+	return f
+}
+
+func (h *FilterHandler) AddSchema(sch ...*schema.Schema) *FilterHandler {
+	for _, s := range sch {
+		h.schemas = append(h.schemas, s)
+	}
+	return h
+}
+
+func (h *FilterHandler) SetQueryBuilder(qb QueryBuilder) {
+	h.qbuilder = qb
+}
+
+func (h *FilterHandler) Validate(filter ...*Filter) (err error) {
+	if len(h.schemas) == 0 {
+		return errors.New("no schema provided")
+	}
+
+	for _, sch := range h.schemas {
+		var merr *multierror.Error
+
+		for _, f := range filter {
+			if err := h.validate(sch, f); err != nil {
+				merr = multierror.Append(merr, err)
+			}
+		}
+		if merr != nil {
+			merr.ErrorFormat = func(i []error) string {
+				return fmt.Sprintf("%d validation error(s)", len(i))
+			}
+			return errors.WithField(merr, "filter")
+		}
+	}
+	return nil
+}
+
+// todo: '$elemMatch' - запросы к полю-массиву на попадание в условие: '{ results: { $elemMatch: { $gte: 80, $lt: 85 } }' ?
+
+func (h *FilterHandler) validate(sch *schema.Schema, f *Filter) (err error) {
+	if f == nil {
+		return
+	}
+
+	fld := h.removeFieldPrefix(f.Field)
+
+	switch f.Op {
+	case Equal, NotEqual, Less, LessOrEqual, Greater, GreaterOrEqual:
+		fld := sch.GetField(fld)
+		if fld == nil {
+			return h.formatErr(f.Field, f.Op, errors.New("field not found in collection schema"))
+		}
+
+		if f.Value, err = schema.Decode(nil, fld, f.Value); err != nil {
+			return h.formatErr(f.Field, f.Op, err)
+		}
+		if err = validate.Validate(nil, fld, f.Value); err != nil {
+			return h.formatErr(f.Field, f.Op, err)
+		}
+	case In, NotIn:
+		fld := sch.GetField(fld)
+		if fld == nil {
+			return h.formatErr(f.Field, f.Op, errors.New("field not found in collection schema"))
+		}
+		val := reflect.ValueOf(f.Value)
+		if val.IsZero() || (val.Kind() != reflect.Array && val.Kind() != reflect.Slice) {
+			return h.formatErr(f.Field, f.Op, errors.New("\"IN/NOT IN\" operations require array type for value"))
+		}
+
+		switch fld.GetType().(type) {
+		case *field.ArrayType:
+			f.Value, err = schema.Decode(nil, fld, f.Value)
+			if err != nil {
+				return h.formatErr(f.Field, f.Op, err)
+			}
+		default:
+			decodedVal := make([]interface{}, 0, val.Len())
+			for i := 0; i < val.Len(); i++ {
+				v, err := schema.Decode(nil, fld, val.Index(i).Interface())
+				if err != nil {
+					return h.formatErr(f.Field, f.Op, err)
+				}
+				decodedVal = append(decodedVal, v)
+			}
+
+			f.Value = decodedVal
+		}
+
+	case Contains, NotContains:
+		fld := sch.GetField(fld)
+		if fld == nil {
+			return h.formatErr(f.Field, f.Op, errors.New("field not found in collection schema"))
+		}
+
+		typ := fld.GetType()
+
+		if typ.Name() != "string" && typ.Name() != "array" {
+			return h.formatErr(f.Field, f.Op, errors.New("\"CONTAINS/NOT CONTAINS\" operations require field to be 'string' or 'string array'"))
+		}
+		if typ.Name() == "array" {
+			params := fld.Params.(*field.ArrayParameters)
+			if params.Item != nil || params.Item.GetType().Name() != "string" {
+				return h.formatErr(f.Field, f.Op, errors.New("\"CONTAINS/NOT CONTAINS\" operations require field to be 'string' or 'string array'"))
+			}
+		}
+
+		if reflect.TypeOf(f.Value).Kind() != reflect.String {
+			return h.formatErr(f.Field, f.Op, errors.New("\"CONTAINS/NOT CONTAINS\" operations require value to be 'string'"))
+		}
+
+	case Or, And:
+		fltrs, ok := f.Value.([]*Filter)
+		if !ok {
+			return h.formatErr(f.Field, f.Op, errors.New("array of filters should be provided for operations "))
+		}
+		for _, f := range fltrs {
+			err = h.validate(sch, f)
+			if err != nil {
+				return err
+			}
+		}
+
+	case Near:
+		fld := sch.GetField(fld)
+		if fld == nil {
+			return h.formatErr(f.Field, f.Op, errors.New("field not found in collection schema"))
+		}
+
+		_, ok := fld.Params.(*field.LocationParameters)
+		if !ok {
+			return h.formatErr(f.Field, f.Op, errors.New("field must be a location"))
+		}
+
+		value, ok := f.Value.(map[string]interface{})
+		if !ok {
+			return h.formatErr(f.Field, f.Op, errors.New("filter value should be map"))
+		}
+
+		point, ok := value["point"]
+		if !ok {
+			return h.formatErr(f.Field, f.Op, errors.New("filter value should have location"))
+		}
+
+		var p field.GeoJSON
+		if err := mapstructure.Decode(map[string]interface{}{"type": "Point", "coordinates": point}, &p); err != nil {
+			return h.formatErr(f.Field, f.Op, err)
+		}
+
+		maxD, ok := value["distance"]
+		if ok {
+			v := reflect.ValueOf(maxD)
+			if !v.Type().ConvertibleTo(reflect.TypeOf(float64(0))) {
+				return h.formatErr(f.Field, f.Op, errors.New("filter value distance must be a number"))
+			}
+			val := v.Convert(reflect.TypeOf(float64(0)))
+			if val.Float() < 0 {
+				return h.formatErr(f.Field, f.Op, errors.New("filter value distance should not be negative"))
+			}
+		}
+
+	default:
+		return h.formatErr(f.Field, f.Op, errors.New("unknown operation"))
+	}
+
+	return nil
+}
+
+func (*FilterHandler) formatErr(args ...interface{}) error {
+	var (
+		f   string
+		op  Op
+		err error
+	)
+	for _, arg := range args {
+		switch v := arg.(type) {
+		case string:
+			f = v
+		case Op:
+			op = v
+		case error:
+			err = v
+		}
+	}
+	return errors.WithField(fmt.Errorf("op: '%s' %s", op, err), f)
+}
+
+func (h *FilterHandler) Query(filter ...*Filter) interface{} {
+	return h.qbuilder.Query(filter...)
+}
+
+type QueryBuilder interface {
+	Query(filter ...*Filter) interface{}
+	SetFieldPrefix(string)
+}
+
+type mongoQueryBuilder struct {
+	m      map[Op]string
+	prefix string
+}
+
+func NewMongoQueryBuilder() QueryBuilder {
+	b := new(mongoQueryBuilder)
+	b.m = map[Op]string{
+		Equal:          "$eq",
+		NotEqual:       "$ne",
+		Less:           "$lt",
+		LessOrEqual:    "$lte",
+		Greater:        "$gt",
+		GreaterOrEqual: "$gte",
+		In:             "$in",
+		NotIn:          "$nin",
+		Contains:       "$regex",
+		NotContains:    "$not",
+		Or:             "$or",
+		And:            "$and",
+		Near:           "$near",
+	}
+	return b
+}
+
+func (b *mongoQueryBuilder) getOp(op Op) string {
+	return b.m[op]
+}
+
+func (b *mongoQueryBuilder) SetFieldPrefix(prefix string) {
+	b.prefix = prefix
+}
+
+func (b *mongoQueryBuilder) Query(filters ...*Filter) interface{} {
+	if len(filters) == 0 {
+		return bson.M{}
+	}
+	filter := &Filter{Op: And, Value: filters}
+	return b.query(filter)
+}
+
+func (b *mongoQueryBuilder) query(f *Filter) bson.M {
+	if f == nil {
+		return nil
+	}
+
+	switch f.Op {
+	case Equal, NotEqual, Less, LessOrEqual, Greater, GreaterOrEqual, In, NotIn:
+		return bson.M{
+			b.field(f.Field): bson.M{
+				b.getOp(f.Op): f.Value,
+			},
+		}
+	case Contains, NotContains:
+
+		val, _ := f.Value.(string)
+		return bson.M{
+			b.field(f.Field): bson.M{
+				b.getOp(f.Op): bsonx.Regex(val, ""),
+			},
+		}
+
+	case Or, And:
+		fltrs, ok := f.Value.([]*Filter)
+		if !ok {
+			return nil
+		}
+
+		arr := bson.A{}
+		for _, fltr := range fltrs {
+			arr = append(arr, b.query(fltr))
+		}
+		return bson.M{
+			b.getOp(f.Op): arr,
+		}
+	case Near:
+		val, ok := f.Value.(map[string]interface{})
+		if ok {
+			var p field.GeoJSON
+			c, ok := val["point"]
+			if !ok {
+				return nil
+			}
+			if err := mapstructure.Decode(map[string]interface{}{"type": "Point", "coordinates": c}, &p); err != nil {
+				return nil
+			}
+			q := bson.D{{Key: "$geometry", Value: p}}
+
+			if maxD, ok := val["distance"]; ok {
+				q = append(q, bson.E{Key: "$maxDistance", Value: maxD})
+			}
+
+			return bson.M{
+				b.field(f.Field + ".geometry"): bson.M{b.getOp(f.Op): q},
+			}
+		}
+	}
+
+	return nil
+}
+
+func (b *mongoQueryBuilder) field(f string) string {
+	if b.prefix == "" || strings.HasPrefix(f, b.prefix) {
+		return f
+	}
+	return b.prefix + "." + f
+}
+
+// $text search ??
+//func (b *mongoQueryBuilder) textSearchQuery(filters ...*Filter) string {
+//	cnt, notcnt := "", ""
+//	for _, f := range filters {
+//		val, ok := f.Value.(string)
+//		if !ok {
+//			continue
+//		}
+//		switch f.Op {
+//		case Contains:
+//			if len(cnt) > 0 {
+//				cnt += " "
+//			}
+//			cnt += val
+//		case NotContains:
+//			words := strings.Split(val, " ")
+//			for _, w := range words {
+//				if len(notcnt) > 0 {
+//					notcnt += " "
+//				}
+//				notcnt += "-" + w
+//			}
+//		}
+//	}
+//	if len(cnt) == 0 {
+//		return ""
+//	}
+//	if len(notcnt) > 0 {
+//		cnt += " " + notcnt
+//	}
+//	return cnt
+//}
diff --git a/pkg/filter/filter_test.go b/pkg/filter/filter_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..7283e26a0dfebcc5159211e49c30dc29150f12db
--- /dev/null
+++ b/pkg/filter/filter_test.go
@@ -0,0 +1,473 @@
+package filter
+
+import (
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/field"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+func TestFilterHandler(t *testing.T) {
+
+	sch := schema.New(
+		"str", field.String(),
+		"num", field.Number(field.NumberFormatInt),
+		"obj", field.Object(
+			"bool", field.Bool(),
+			"arr", field.Array(field.Time()),
+			"list", field.Array(
+				field.Object(
+					"num1", field.Number(field.NumberFormatFloat),
+					"str1", field.String(),
+				),
+			),
+		),
+		"date", field.Time(),
+		"geo", field.Location(),
+	)
+	h := NewFilterHandler(sch)
+	ph := NewFilterHandler(sch).SetTrimPrefix("data")
+
+	h.SetQueryBuilder(NewMongoQueryBuilder())
+	ph.SetQueryBuilder(NewMongoQueryBuilder())
+
+	var err error
+
+	t.Run("Validate", func(t *testing.T) {
+		t.Run("Simple", func(t *testing.T) {
+			t.Run("String", func(t *testing.T) {
+				f := &Filter{Op: Equal, Field: "str", Value: "zzz"}
+				err = h.Validate(f)
+				require.NoError(t, err)
+
+				f = &Filter{Op: Equal, Field: "data.str", Value: "zzz"}
+				err = ph.Validate(f)
+				require.NoError(t, err)
+			})
+			t.Run("Int", func(t *testing.T) {
+				f := &Filter{Op: NotEqual, Field: "num", Value: 5.0}
+				err = h.Validate(f)
+				require.NoError(t, err)
+				assert.IsType(t, int64(0), f.Value)
+
+				f = &Filter{Op: NotEqual, Field: "data.num", Value: 5.0}
+				err = ph.Validate(f)
+				require.NoError(t, err)
+				assert.IsType(t, int64(0), f.Value)
+			})
+			t.Run("Time", func(t *testing.T) {
+				f := &Filter{Op: LessOrEqual, Field: "date", Value: "22 Dec 1997"}
+				err = h.Validate(f)
+				require.Error(t, err)
+
+				f = &Filter{Op: LessOrEqual, Field: "data.date", Value: "22 Dec 1997"}
+				err = ph.Validate(f)
+				require.Error(t, err)
+			})
+			t.Run("Location", func(t *testing.T) {
+				f := &Filter{Op: Near, Field: "geo", Value: ""}
+				err = h.Validate(f)
+				require.Error(t, err)
+
+				f = &Filter{Op: Near, Field: "data.geo", Value: ""}
+				err = ph.Validate(f)
+				require.Error(t, err)
+
+				fv := map[string]interface{}{
+					"point":    []float64{55, 55},
+					"distance": 1000,
+				}
+
+				f = &Filter{Op: Near, Field: "data.geo", Value: fv}
+				err = ph.Validate(f)
+				require.NoError(t, err)
+
+				fv["distance"] = -1
+				f = &Filter{Op: Near, Field: "data.geo", Value: fv}
+				err = ph.Validate(f)
+				require.Error(t, err)
+
+			})
+		})
+		t.Run("Embedded array field", func(t *testing.T) {
+			w, _ := time.Parse(time.RFC3339, "2012-11-01T22:08:41Z")
+			f := &Filter{Op: In, Field: "obj.arr", Value: []interface{}{"2012-11-01T22:08:41Z"}}
+			err = h.Validate(f)
+			require.NoError(t, err)
+			assert.Equal(t, w, f.Value.([]interface{})[0])
+
+			f = &Filter{Op: In, Field: "data.obj.arr", Value: []interface{}{"2012-11-01T22:08:41Z"}}
+			err = ph.Validate(f)
+			require.NoError(t, err)
+			assert.Equal(t, w, f.Value.([]interface{})[0])
+		})
+		t.Run("Embedded string contains", func(t *testing.T) {
+			f := &Filter{Op: Contains, Field: "obj.list.str1", Value: "zzz"}
+			err = h.Validate(f)
+			require.NoError(t, err)
+
+			f = &Filter{Op: Contains, Field: "data.obj.list.str1", Value: "zzz"}
+			err = ph.Validate(f)
+			require.NoError(t, err)
+		})
+		t.Run("Compound filter with 'OR' operation", func(t *testing.T) {
+			t.Run("No Err", func(t *testing.T) {
+				w1, _ := time.Parse(time.RFC3339, "2012-11-01T22:08:41Z")
+				w2, _ := time.Parse(time.RFC3339, "2015-12-01T22:08:41Z")
+
+				ff := []*Filter{
+					{Op: In, Field: "date", Value: []interface{}{"2012-11-01T22:08:41Z", "2015-12-01T22:08:41Z"}},
+					{Op: Or, Field: "", Value: []*Filter{
+						{Op: And, Field: "", Value: []*Filter{
+							{Op: GreaterOrEqual, Field: "date", Value: "2012-11-01T22:08:41Z"},
+							{Op: LessOrEqual, Field: "date", Value: "2015-12-01T22:08:41Z"},
+						}},
+						{Op: Equal, Field: "obj.bool", Value: true},
+					}},
+				}
+				err = h.Validate(ff...)
+				require.NoError(t, err)
+				assert.ElementsMatch(t, []interface{}{w1, w2}, ff[0].Value.([]interface{}))
+				assert.Equal(t, w1, ff[1].Value.([]*Filter)[0].Value.([]*Filter)[0].Value)
+				assert.Equal(t, w2, ff[1].Value.([]*Filter)[0].Value.([]*Filter)[1].Value)
+
+				ff = []*Filter{
+					{Op: In, Field: "data.date", Value: []interface{}{"2012-11-01T22:08:41Z", "2015-12-01T22:08:41Z"}},
+					{Op: Or, Field: "", Value: []*Filter{
+						{Op: And, Field: "", Value: []*Filter{
+							{Op: GreaterOrEqual, Field: "data.date", Value: "2012-11-01T22:08:41Z"},
+							{Op: LessOrEqual, Field: "data.date", Value: "2015-12-01T22:08:41Z"},
+						}},
+						{Op: Equal, Field: "data.obj.bool", Value: true},
+					}},
+				}
+				err = ph.Validate(ff...)
+				require.NoError(t, err)
+				assert.ElementsMatch(t, []interface{}{w1, w2}, ff[0].Value.([]interface{}))
+				assert.Equal(t, w1, ff[1].Value.([]*Filter)[0].Value.([]*Filter)[0].Value)
+				assert.Equal(t, w2, ff[1].Value.([]*Filter)[0].Value.([]*Filter)[1].Value)
+			})
+			t.Run("Multiple Errors", func(t *testing.T) {
+				ff := []*Filter{
+					{Op: In, Field: "date", Value: []interface{}{"5 Jan 2020", "10 June 2020"}},
+					{Op: Or, Field: "", Value: []*Filter{
+						{Op: And, Field: "", Value: []*Filter{
+							{Op: GreaterOrEqual, Field: "date", Value: "2012-11-01T22:08:41Z"},
+							{Op: LessOrEqual, Field: "date", Value: "2015-12-01T22:08:41Z"},
+						}},
+						{Op: Equal, Field: "obj.bool", Value: 15},
+					}},
+				}
+				err = h.Validate(ff...)
+				require.Error(t, err)
+				assert.Equal(t, err.Error(), "2 validation error(s)")
+
+				ff = []*Filter{
+					{Op: In, Field: "data.date", Value: []interface{}{"5 Jan 2020", "10 June 2020"}},
+					{Op: Or, Field: "", Value: []*Filter{
+						{Op: And, Field: "", Value: []*Filter{
+							{Op: GreaterOrEqual, Field: "data.date", Value: "2012-11-01T22:08:41Z"},
+							{Op: LessOrEqual, Field: "data.date", Value: "2015-12-01T22:08:41Z"},
+						}},
+						{Op: Equal, Field: "data.obj.bool", Value: 15},
+					}},
+				}
+				err = h.Validate(ff...)
+				require.Error(t, err)
+				assert.Equal(t, err.Error(), "2 validation error(s)")
+			})
+		})
+	})
+
+	t.Run("Build Query", func(t *testing.T) {
+		t.Run("No Filters", func(t *testing.T) {
+			res := h.Query()
+			require.IsType(t, res, primitive.M{})
+
+			pres := ph.Query()
+			assert.Equal(t, res, pres, "пустой запрос с префиксом и без должны быть одинаковые")
+		})
+		t.Run("Equal String", func(t *testing.T) {
+			f := &Filter{Op: Equal, Field: "data.str", Value: "zzz"}
+			res := h.Query(f)
+			b, ok := res.(primitive.M)
+			require.True(t, ok)
+			assert.Equal(t, primitive.M{"$and": primitive.A{primitive.M{"data.str": primitive.M{"$eq": "zzz"}}}}, b)
+
+			pf := &Filter{Op: Equal, Field: "data.str", Value: "zzz"}
+			pres := ph.Query(pf)
+			assert.Equal(t, res, pres, "запрос в БД с полями с префиксом и без должны быть одинаковые")
+		})
+		t.Run("In Array", func(t *testing.T) {
+			w, _ := time.Parse(time.RFC3339, "2012-11-01T22:08:41Z")
+			f := &Filter{Op: In, Field: "obj.arr", Value: []interface{}{w}}
+			res := h.Query(f)
+			b, ok := res.(primitive.M)
+			require.True(t, ok)
+			assert.Equal(t, primitive.M{"$and": primitive.A{primitive.M{"obj.arr": primitive.M{"$in": []interface{}{w}}}}}, b)
+		})
+		t.Run("Several ops for one field", func(t *testing.T) {
+			w, _ := time.Parse(time.RFC3339, "2012-11-01T22:08:41Z")
+			f := &Filter{Op: In, Field: "obj.arr", Value: []interface{}{w}}
+			res := h.Query(f)
+			b, ok := res.(primitive.M)
+			require.True(t, ok)
+			assert.Equal(t, primitive.M{"$and": primitive.A{primitive.M{"obj.arr": primitive.M{"$in": []interface{}{w}}}}}, b)
+		})
+	})
+}
+
+//func TestFilterHandler_Integration(t *testing.T) {
+//	ctx := context.Background()
+//
+//	uri := os.Getenv("MONGO_URL")
+//	if uri == "" {
+//		uri = "mongodb://localhost:27017"
+//	}
+//	opts := options.Client().SetConnectTimeout(15 * time.Second).ApplyURI(uri)
+//	client, err := mongo.Connect(context.Background(), opts)
+//	require.NoError(t, err)
+//	err = client.Ping(ctx, nil)
+//	require.NoError(t, err)
+//
+//	sch := schema.New(
+//		"name", field.String(validate.Required()),
+//		"color", field.String(),
+//		"qty", field.Number(field.NumberFormatInt),
+//		"info", field.Object(
+//			"is_fruit", field.Bool(),
+//			"similar", field.Array(
+//				field.Object(
+//					"name", field.Number(field.NumberFormatFloat),
+//					"color", field.String(),
+//				),
+//			),
+//			"desc", field.String(),
+//		),
+//		"produced", field.Time(),
+//		"shipment", field.Array(field.String()),
+//	)
+//
+//	w1, _ := time.Parse(time.RFC3339, "2020-01-01T10:08:41Z")
+//	w2, _ := time.Parse(time.RFC3339, "2020-05-01T10:08:41Z")
+//	w3, _ := time.Parse(time.RFC3339, "2020-10-01T10:08:41Z")
+//
+//	items := []map[string]interface{}{
+//		{
+//			"name":  "apple",
+//			"color": "red",
+//			"qty":   25,
+//			"info": map[string]interface{}{
+//				"is_fruit": true,
+//				"similar": []interface{}{
+//					map[string]interface{}{"name": "pear", "color": "yellow"},
+//					map[string]interface{}{"name": "lemon", "color": "yellow"},
+//				},
+//				"desc": "An apple is the edible fruit . Apple trees are cultivated worldwide and have religious and mythological " +
+//					"significance in many cultures. Apples are eaten with honey at the Jewish New Year of Rosh Hashanah to symbolize a sweet new year.",
+//			},
+//			"produced":   w1,
+//			"shipment":   []interface{}{"Russia", "Iran"},
+//			"storepoint": map[string]interface{}{"type": "Point", "coordinates": []float64{55.751472, 37.618727}},
+//		},
+//		{
+//			"name":  "orange",
+//			"color": "orange",
+//			"qty":   10,
+//			"info": map[string]interface{}{
+//				"is_fruit": true,
+//				"similar": []interface{}{
+//					map[string]interface{}{"name": "lemon", "color": "yellow"},
+//					map[string]interface{}{"name": "grapefruit", "color": "red"},
+//				},
+//				"desc": "The orange is the edible fruit of various citrus species; a hybrid between pomelo and mandarin. Orange trees are widely grown" +
+//					" in tropical and subtropical climates for their sweet fruit. The fruit of the orange tree can be eaten fresh, or processed for its juice or fragrant peel.",
+//			},
+//			"produced":   w2,
+//			"shipment":   []interface{}{"Egypt", "Iran"},
+//			"storepoint": map[string]interface{}{"type": "Point", "coordinates": []float64{55.716797, 37.552809}},
+//		},
+//		{
+//			"name":  "tomato",
+//			"color": "red",
+//			"qty":   1,
+//			"info": map[string]interface{}{
+//				"is_fruit": false,
+//				"similar": []interface{}{
+//					map[string]interface{}{"name": "cucumber", "color": "green"},
+//					map[string]interface{}{"name": "apple", "color": "yellow"},
+//				},
+//				"desc": "The tomato is the edible red berry. The tomato is consumed in diverse ways, raw or cooked, in many dishes, " +
+//					"sauces, salads, and drinks. Numerous varieties of the tomato plant are widely grown in temperate climates across the world.",
+//			},
+//			"produced":   w3,
+//			"shipment":   []interface{}{"Russia", "Italy"},
+//			"storepoint": map[string]interface{}{"type": "Point", "coordinates": []float64{55.760688, 37.619125}},
+//		},
+//	}
+//
+//	db := client.Database("perxis_test_filter")
+//	coll := db.Collection("items")
+//	coll.Drop(ctx)
+//
+//	for _, item := range items {
+//		_, err = coll.InsertOne(ctx, item)
+//		require.NoError(t, err)
+//	}
+//
+//	h := NewFilterHandler(sch)
+//	h.SetQueryBuilder(NewMongoQueryBuilder())
+//
+//	t.Run("By Color [Equal/NotEqual]", func(t *testing.T) {
+//		t.Run("Red", func(t *testing.T) {
+//			query := h.Query(&Filter{Op: Equal, Field: "color", Value: "red"})
+//			res, err := coll.Find(ctx, query)
+//			require.NoError(t, err)
+//
+//			var data []map[string]interface{}
+//			err = res.All(ctx, &data)
+//			require.NoError(t, err)
+//			require.Len(t, data, 2)
+//			assert.ElementsMatch(t, []interface{}{"apple", "tomato"}, []interface{}{data[0]["name"], data[1]["name"]})
+//		})
+//		t.Run("Not Red", func(t *testing.T) {
+//			query := h.Query(&Filter{Op: NotEqual, Field: "color", Value: "red"})
+//			res, err := coll.Find(ctx, query)
+//			require.NoError(t, err)
+//
+//			var data []map[string]interface{}
+//			err = res.All(ctx, &data)
+//			require.NoError(t, err)
+//			require.Len(t, data, 1)
+//			assert.Equal(t, "orange", data[0]["name"])
+//		})
+//	})
+//	t.Run("By Quantity [Less/Greater]", func(t *testing.T) {
+//		query := h.Query(&Filter{Op: LessOrEqual, Field: "qty", Value: 25}, &Filter{Op: Greater, Field: "qty", Value: 1})
+//		res, err := coll.Find(ctx, query)
+//		require.NoError(t, err)
+//
+//		var data []map[string]interface{}
+//		err = res.All(ctx, &data)
+//		require.NoError(t, err)
+//		require.Len(t, data, 2)
+//		assert.ElementsMatch(t, []interface{}{"apple", "orange"}, []interface{}{data[0]["name"], data[1]["name"]})
+//	})
+//	t.Run("Not Fruit [Equal embedded field]", func(t *testing.T) {
+//		query := h.Query(&Filter{Op: Equal, Field: "info.is_fruit", Value: false})
+//		res, err := coll.Find(ctx, query)
+//		require.NoError(t, err)
+//
+//		var data []map[string]interface{}
+//		err = res.All(ctx, &data)
+//		require.NoError(t, err)
+//		require.Len(t, data, 1)
+//		assert.Equal(t, "tomato", data[0]["name"])
+//	})
+//	t.Run("By Similar [In/NotIn]", func(t *testing.T) {
+//		t.Run("Similar to cucumber, pear", func(t *testing.T) {
+//			query := h.Query(&Filter{Op: In, Field: "info.similar.name", Value: []string{"cucumber", "pear"}})
+//			res, err := coll.Find(ctx, query)
+//			require.NoError(t, err)
+//
+//			var data []map[string]interface{}
+//			err = res.All(ctx, &data)
+//			require.NoError(t, err)
+//			require.Len(t, data, 2)
+//			assert.ElementsMatch(t, []interface{}{"apple", "tomato"}, []interface{}{data[0]["name"], data[1]["name"]})
+//		})
+//		t.Run("Not Similar to cucumber, pear", func(t *testing.T) {
+//			query := h.Query(&Filter{Op: NotIn, Field: "info.similar.name", Value: []string{"cucumber", "grapefruit"}})
+//			res, err := coll.Find(ctx, query)
+//			require.NoError(t, err)
+//
+//			var data []map[string]interface{}
+//			err = res.All(ctx, &data)
+//			require.NoError(t, err)
+//			require.Len(t, data, 1)
+//			assert.Equal(t, "apple", data[0]["name"])
+//		})
+//	})
+//	t.Run("By Description [Contains/NotContains]", func(t *testing.T) {
+//		t.Run("Contains", func(t *testing.T) {
+//			query := h.Query(&Filter{Op: And, Value: []*Filter{
+//				&Filter{Op: In, Field: "info.similar.color", Value: []string{"yellow"}},
+//				&Filter{Op: Contains, Field: "info.desc", Value: "edible fruit"},
+//			}})
+//			res, err := coll.Find(ctx, query)
+//			require.NoError(t, err)
+//			var data []map[string]interface{}
+//			err = res.All(ctx, &data)
+//			require.NoError(t, err)
+//			require.Len(t, data, 2)
+//			assert.ElementsMatch(t, []interface{}{"apple", "orange"}, []interface{}{data[0]["name"], data[1]["name"]})
+//		})
+//		t.Run("Not Contains", func(t *testing.T) {
+//			query := h.Query(&Filter{Op: NotContains, Field: "info.desc", Value: "fruit"})
+//			res, err := coll.Find(ctx, query)
+//			require.NoError(t, err)
+//
+//			var data []map[string]interface{}
+//			err = res.All(ctx, &data)
+//			require.NoError(t, err)
+//			for _, d := range data {
+//				fmt.Println(d["name"])
+//			}
+//			require.Len(t, data, 1)
+//			assert.Equal(t, "tomato", data[0]["name"])
+//		})
+//	})
+//	t.Run("By Shipment [Contains/NotContains]", func(t *testing.T) {
+//		t.Run("Contains", func(t *testing.T) {
+//			query := h.Query(
+//				&Filter{Op: Contains, Field: "shipment", Value: "Russia"},
+//			)
+//			res, err := coll.Find(ctx, query)
+//			require.NoError(t, err)
+//			var data []map[string]interface{}
+//			err = res.All(ctx, &data)
+//			require.NoError(t, err)
+//			require.Len(t, data, 2)
+//			assert.ElementsMatch(t, []interface{}{"apple", "tomato"}, []interface{}{data[0]["name"], data[1]["name"]})
+//		})
+//		t.Run("Not Contains", func(t *testing.T) {
+//			query := h.Query(&Filter{Op: NotContains, Field: "shipment", Value: "Iran"})
+//			res, err := coll.Find(ctx, query)
+//			require.NoError(t, err)
+//
+//			var data []map[string]interface{}
+//			err = res.All(ctx, &data)
+//			require.NoError(t, err)
+//			for _, d := range data {
+//				fmt.Println(d["name"])
+//			}
+//			require.Len(t, data, 1)
+//			assert.Equal(t, "tomato", data[0]["name"])
+//		})
+//	})
+//	t.Run("Compound Query", func(t *testing.T) {
+//		query := h.Query(&Filter{Op: Or, Value: []*Filter{
+//			&Filter{Op: And, Value: []*Filter{
+//				&Filter{Op: In, Field: "color", Value: []interface{}{"red", "yellow", "green"}},
+//				&Filter{Op: Less, Field: "qty", Value: 10},
+//			}}, // 1 - tomato
+//			&Filter{Op: Equal, Field: "name", Value: "pepper"}, // 0
+//			&Filter{Op: And, Value: []*Filter{
+//				&Filter{Op: GreaterOrEqual, Field: "produced", Value: w1},
+//				&Filter{Op: Less, Field: "produced", Value: w2}, // 1 - apple
+//			}},
+//		}})
+//		res, err := coll.Find(ctx, query)
+//		require.NoError(t, err)
+//
+//		var data []map[string]interface{}
+//		err = res.All(ctx, &data)
+//		require.NoError(t, err)
+//		require.Len(t, data, 2)
+//		assert.ElementsMatch(t, []interface{}{"apple", "tomato"}, []interface{}{data[0]["name"], data[1]["name"]})
+//	})
+//}
diff --git a/pkg/invitations/middleware/caching_middleware.go b/pkg/invitations/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..97a1bcb3367fa0cf0ab0f1bc7c638191cb72911c
--- /dev/null
+++ b/pkg/invitations/middleware/caching_middleware.go
@@ -0,0 +1,62 @@
+package service
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	service "git.perx.ru/perxis/perxis-go/pkg/invitations"
+	services "git.perx.ru/perxis/perxis-go/pkg/options"
+)
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Invitations) service.Invitations {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Invitations
+}
+
+func (m cachingMiddleware) Create(ctx context.Context, invitation *service.Invitation) (inv *service.Invitation, err error) {
+	return m.next.Create(ctx, invitation)
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, invitationId string) (inv *service.Invitation, err error) {
+
+	value, e := m.cache.Get(invitationId)
+	if e == nil {
+		return value.(*service.Invitation), err
+	}
+	inv, err = m.next.Get(ctx, invitationId)
+	if err == nil {
+		m.cache.Set(invitationId, inv)
+	}
+	return inv, err
+}
+
+func (m cachingMiddleware) Accept(ctx context.Context, invitationId string, userId string) (err error) {
+
+	err = m.next.Accept(ctx, invitationId, userId)
+	if err == nil {
+		m.cache.Remove(invitationId)
+	}
+	return err
+}
+
+func (m cachingMiddleware) Find(ctx context.Context, filter *service.Filter, opts *services.FindOptions) (invitations []*service.Invitation, total int, err error) {
+	return m.next.Find(ctx, filter, opts)
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, invitationId string) (err error) {
+
+	err = m.next.Delete(ctx, invitationId)
+	if err == nil {
+		m.cache.Remove(invitationId)
+	}
+	return err
+}
diff --git a/pkg/invitations/middleware/caching_middleware_test.go b/pkg/invitations/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b635ee0cd09b28245a8f29c67d7902a08392eba7
--- /dev/null
+++ b/pkg/invitations/middleware/caching_middleware_test.go
@@ -0,0 +1,129 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/invitations"
+	invmocks "git.perx.ru/perxis/perxis-go/pkg/invitations/mocks"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestLocalesCache(t *testing.T) {
+
+	const (
+		orgID = "orgID"
+		email = "123@321.ru"
+		invID = "invID"
+		usrID = "usrID"
+		size  = 5
+		ttl   = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	ctx := context.Background()
+
+	t.Run("Get from Cache", func(t *testing.T) {
+		inv := &invmocks.Invitations{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(inv)
+
+		inv.On("Get", mock.Anything, invID).Return(&invitations.Invitation{ID: invID, Email: email, OrgID: orgID}, nil).Once()
+
+		v1, err := svc.Get(ctx, invID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, invID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается что при повторном запросе объект будет получен из кэша.")
+
+		inv.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("Get from Accept", func(t *testing.T) {
+			inv := &invmocks.Invitations{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(inv)
+
+			inv.On("Get", mock.Anything, invID).Return(&invitations.Invitation{ID: invID, Email: email, OrgID: orgID}, nil).Once()
+
+			v1, err := svc.Get(ctx, invID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, invID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается что при повторном запросе объект будет получен из кэша.")
+
+			inv.On("Accept", mock.Anything, invID, usrID).Return(nil).Once()
+			inv.On("Get", mock.Anything, invID).Return(nil, errNotFound).Once()
+
+			err = svc.Accept(ctx, invID, usrID)
+			require.NoError(t, err)
+
+			_, err = svc.Get(ctx, invID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается что после подтверждения объект будет удален из кэша и получена ошибка от сервиса.")
+
+			inv.AssertExpectations(t)
+		})
+
+		t.Run("Get from Delete", func(t *testing.T) {
+			inv := &invmocks.Invitations{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(inv)
+
+			inv.On("Get", mock.Anything, invID).Return(&invitations.Invitation{ID: invID, Email: email, OrgID: orgID}, nil).Once()
+
+			v1, err := svc.Get(ctx, invID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, invID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается что при повторном запросе объект будет получен из кэша.")
+
+			inv.On("Delete", mock.Anything, invID).Return(nil).Once()
+			inv.On("Get", mock.Anything, invID).Return(nil, errNotFound).Once()
+
+			err = svc.Delete(ctx, invID)
+			require.NoError(t, err)
+
+			_, err = svc.Get(ctx, invID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается что после удаления кэша будет очищен и получена ошибка от сервиса.")
+
+			inv.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			inv := &invmocks.Invitations{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(inv)
+
+			inv.On("Get", mock.Anything, invID).Return(&invitations.Invitation{ID: invID, Email: email, OrgID: orgID}, nil).Once()
+
+			v1, err := svc.Get(ctx, invID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, invID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается что при повторном запросе объект будет получен из кэша.")
+
+			time.Sleep(2 * ttl)
+
+			inv.On("Get", mock.Anything, invID).Return(&invitations.Invitation{ID: invID, Email: email, OrgID: orgID}, nil).Once()
+
+			v3, err := svc.Get(ctx, invID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается что при истечении ttl кеш будет очищен..")
+
+			inv.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/invitations/middleware/error_logging_middleware.go b/pkg/invitations/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..39c823ae6fdf7fb7cb5c181c42099ca16c1a2bad
--- /dev/null
+++ b/pkg/invitations/middleware/error_logging_middleware.go
@@ -0,0 +1,81 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/invitations -i Invitations -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/invitations"
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements invitations.Invitations that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   invitations.Invitations
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the invitations.Invitations with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next invitations.Invitations) invitations.Invitations {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Accept(ctx context.Context, invitationId string, userId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Accept(ctx, invitationId, userId)
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, invitation *invitations.Invitation) (created *invitations.Invitation, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, invitation)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, invitationId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, invitationId)
+}
+
+func (m *errorLoggingMiddleware) Find(ctx context.Context, filter *invitations.Filter, opts *options.FindOptions) (invitations []*invitations.Invitation, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Find(ctx, filter, opts)
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, invitationId string) (invitation *invitations.Invitation, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, invitationId)
+}
diff --git a/pkg/invitations/middleware/logging_middleware.go b/pkg/invitations/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..8f1ceb9959497794cc4bc7a2d6a963a949d9b1a3
--- /dev/null
+++ b/pkg/invitations/middleware/logging_middleware.go
@@ -0,0 +1,216 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/invitations -i Invitations -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/invitations"
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements invitations.Invitations that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   invitations.Invitations
+}
+
+// LoggingMiddleware instruments an implementation of the invitations.Invitations with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next invitations.Invitations) invitations.Invitations {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Accept(ctx context.Context, invitationId string, userId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"invitationId": invitationId,
+		"userId":       userId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Accept.Request", fields...)
+
+	err = m.next.Accept(ctx, invitationId, userId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Accept.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, invitation *invitations.Invitation) (created *invitations.Invitation, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":        ctx,
+		"invitation": invitation} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	created, err = m.next.Create(ctx, invitation)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"created": created,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return created, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, invitationId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"invitationId": invitationId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, invitationId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Find(ctx context.Context, filter *invitations.Filter, opts *options.FindOptions) (invitations []*invitations.Invitation, total int, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"filter": filter,
+		"opts":   opts} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Find.Request", fields...)
+
+	invitations, total, err = m.next.Find(ctx, filter, opts)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"invitations": invitations,
+		"total":       total,
+		"err":         err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Find.Response", fields...)
+
+	return invitations, total, err
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, invitationId string) (invitation *invitations.Invitation, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"invitationId": invitationId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	invitation, err = m.next.Get(ctx, invitationId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"invitation": invitation,
+		"err":        err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return invitation, err
+}
diff --git a/pkg/invitations/middleware/middleware.go b/pkg/invitations/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..1c054d8ae96ab5d45f1dd83af9bb440b3d429817
--- /dev/null
+++ b/pkg/invitations/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/invitations -i Invitations -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/invitations"
+	"go.uber.org/zap"
+)
+
+type Middleware func(invitations.Invitations) invitations.Invitations
+
+func WithLog(s invitations.Invitations, logger *zap.Logger, log_access bool) invitations.Invitations {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Invitations")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/invitations/middleware/recovering_middleware.go b/pkg/invitations/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..195933ecce0d9766d8a92bbe1e52df6dc4d7064e
--- /dev/null
+++ b/pkg/invitations/middleware/recovering_middleware.go
@@ -0,0 +1,92 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/invitations -i Invitations -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/invitations"
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements invitations.Invitations that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   invitations.Invitations
+}
+
+// RecoveringMiddleware instruments an implementation of the invitations.Invitations with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next invitations.Invitations) invitations.Invitations {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Accept(ctx context.Context, invitationId string, userId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Accept(ctx, invitationId, userId)
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, invitation *invitations.Invitation) (created *invitations.Invitation, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, invitation)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, invitationId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, invitationId)
+}
+
+func (m *recoveringMiddleware) Find(ctx context.Context, filter *invitations.Filter, opts *options.FindOptions) (invitations []*invitations.Invitation, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Find(ctx, filter, opts)
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, invitationId string) (invitation *invitations.Invitation, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, invitationId)
+}
diff --git a/pkg/items/codec.go b/pkg/items/codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..6264c3b582a2af08c1746c763807b315c0ae2fa9
--- /dev/null
+++ b/pkg/items/codec.go
@@ -0,0 +1,9 @@
+package items
+
+type Encoder interface {
+	Encode(item *Item) (any, error)
+}
+
+type Decoder interface {
+	Decode(value any, item *Item) error
+}
diff --git a/pkg/items/context.go b/pkg/items/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..87e600e5b40da50381245a626e8228ab20485de8
--- /dev/null
+++ b/pkg/items/context.go
@@ -0,0 +1,71 @@
+package items
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/clients"
+	"git.perx.ru/perxis/perxis-go/pkg/environments"
+	"git.perx.ru/perxis/perxis-go/pkg/spaces"
+)
+
+type Context struct {
+	Items
+	Clients clients.Clients
+
+	SpaceID      string
+	EnvID        string
+	CollectionID string
+	ItemID       string
+	Item         *Item
+	Space        *spaces.Space
+	Environment  *environments.Environment
+
+	ViewSpaceID       string
+	ViewEnvironmentID string
+	ViewCollectionID  string
+	ViewSpace         *spaces.Space
+	ViewEnvironment   *environments.Environment
+}
+
+type itemsCtx struct{}
+
+func WithContext(ctx context.Context, itmCtx *Context) context.Context {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if itmCtx.ViewSpaceID == "" {
+		itmCtx.ViewSpaceID = itmCtx.SpaceID
+	}
+	if itmCtx.ViewEnvironmentID == "" {
+		itmCtx.ViewEnvironmentID = itmCtx.EnvID
+	}
+	if itmCtx.ViewCollectionID == "" {
+		itmCtx.ViewCollectionID = itmCtx.CollectionID
+	}
+	if itmCtx.ViewSpace == nil {
+		itmCtx.ViewSpace = itmCtx.Space
+	}
+	if itmCtx.ViewEnvironment == nil {
+		itmCtx.ViewEnvironment = itmCtx.Environment
+	}
+
+	p, _ := ctx.Value(itemsCtx{}).(*Context)
+	if p != nil {
+		*p = *itmCtx
+		return ctx
+	}
+
+	return context.WithValue(ctx, itemsCtx{}, itmCtx)
+}
+
+func GetContext(ctx context.Context) *Context {
+	if ctx == nil {
+		return new(Context)
+	}
+	p, _ := ctx.Value(itemsCtx{}).(*Context)
+	if p == nil {
+		return new(Context)
+	}
+	return p
+}
diff --git a/pkg/items/events.go b/pkg/items/events.go
new file mode 100644
index 0000000000000000000000000000000000000000..14ff72216edb875ed3ca15cc879913fdb21a7ed7
--- /dev/null
+++ b/pkg/items/events.go
@@ -0,0 +1,140 @@
+package items
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	pb "git.perx.ru/perxis/perxis-go/proto/items"
+	"github.com/golang/protobuf/proto"
+)
+
+const (
+	EventCreateItem    = "create_item"
+	EventUpdateItem    = "update_item"
+	EventPublishItem   = "publish_item"
+	EventUnpublishItem = "unpublish_item"
+	EventDeleteItem    = "delete_item"
+
+	DefaultEventSubject = "content.{{.EventType}}.{{.SpaceID}}.{{.EnvID}}.{{.CollectionID}}.{{.ItemID}}"
+)
+
+var (
+	ErrInvalidEventType = func(expected string, got any) error {
+		return errors.Errorf("invalid message type: expected '%s', got '%t'", expected, got)
+	}
+)
+
+type EventCreate struct {
+	SpaceID      string
+	EnvID        string
+	CollectionID string
+	ItemID       string
+}
+
+func (e EventCreate) ToProto() (proto.Message, error) {
+	return &pb.EventCreate{SpaceId: e.SpaceID, EnvId: e.EnvID, CollectionId: e.CollectionID, ItemId: e.ItemID}, nil
+}
+
+func (e *EventCreate) FromProto(message proto.Message) error {
+	p, ok := message.(*pb.EventCreate)
+	if !ok {
+		return ErrInvalidEventType("*pb.EventCreate", message)
+	}
+
+	e.SpaceID = p.SpaceId
+	e.EnvID = p.EnvId
+	e.CollectionID = p.CollectionId
+	e.ItemID = p.ItemId
+	return nil
+}
+
+type EventUpdate struct {
+	SpaceID      string
+	EnvID        string
+	CollectionID string
+	ItemID       string
+}
+
+func (e EventUpdate) ToProto() (proto.Message, error) {
+	return &pb.EventUpdate{SpaceId: e.SpaceID, EnvId: e.EnvID, CollectionId: e.CollectionID, ItemId: e.ItemID}, nil
+}
+
+func (e *EventUpdate) FromProto(message proto.Message) error {
+	p, ok := message.(*pb.EventUpdate)
+	if !ok {
+		return ErrInvalidEventType("*pb.EventUpdate", message)
+	}
+
+	e.SpaceID = p.SpaceId
+	e.EnvID = p.EnvId
+	e.CollectionID = p.CollectionId
+	e.ItemID = p.ItemId
+	return nil
+}
+
+type EventPublish struct {
+	SpaceID      string
+	EnvID        string
+	CollectionID string
+	ItemID       string
+}
+
+func (e EventPublish) ToProto() (proto.Message, error) {
+	return &pb.EventPublish{SpaceId: e.SpaceID, EnvId: e.EnvID, CollectionId: e.CollectionID, ItemId: e.ItemID}, nil
+}
+
+func (e *EventPublish) FromProto(message proto.Message) error {
+	p, ok := message.(*pb.EventPublish)
+	if !ok {
+		return ErrInvalidEventType("*pb.EventPublish", message)
+	}
+	e.SpaceID = p.SpaceId
+	e.EnvID = p.EnvId
+	e.CollectionID = p.CollectionId
+	e.ItemID = p.ItemId
+	return nil
+}
+
+type EventUnpublish struct {
+	SpaceID      string
+	EnvID        string
+	CollectionID string
+	ItemID       string
+}
+
+func (e EventUnpublish) ToProto() (proto.Message, error) {
+	return &pb.EventUnpublish{SpaceId: e.SpaceID, EnvId: e.EnvID, CollectionId: e.CollectionID, ItemId: e.ItemID}, nil
+}
+
+func (e *EventUnpublish) FromProto(message proto.Message) error {
+	p, ok := message.(*pb.EventUnpublish)
+	if !ok {
+		return ErrInvalidEventType("*pb.EventUnpublish", message)
+	}
+	e.SpaceID = p.SpaceId
+	e.EnvID = p.EnvId
+	e.CollectionID = p.CollectionId
+	e.ItemID = p.ItemId
+	return nil
+}
+
+type EventDelete struct {
+	SpaceID      string
+	EnvID        string
+	CollectionID string
+	ItemID       string
+}
+
+func (e EventDelete) ToProto() (proto.Message, error) {
+	return &pb.EventDelete{SpaceId: e.SpaceID, EnvId: e.EnvID, CollectionId: e.CollectionID, ItemId: e.ItemID}, nil
+}
+
+func (e *EventDelete) FromProto(message proto.Message) error {
+	p, ok := message.(*pb.EventDelete)
+	if !ok {
+		return ErrInvalidEventType("*pb.EventDelete", message)
+	}
+	e.SpaceID = p.SpaceId
+	e.EnvID = p.EnvId
+	e.CollectionID = p.CollectionId
+	e.ItemID = p.ItemId
+	return nil
+}
diff --git a/pkg/items/item.go b/pkg/items/item.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc3a5154f621b601c02761c126be3ce72aa979ca
--- /dev/null
+++ b/pkg/items/item.go
@@ -0,0 +1,566 @@
+package items
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/data"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/field"
+	pb "git.perx.ru/perxis/perxis-go/proto/items"
+	"google.golang.org/protobuf/types/known/structpb"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+var (
+	ErrNotSystemField = errors.New("not a system field")
+	ErrIncorrectValue = errors.New("incorrect value")
+	ErrIncorrectField = errors.New("incorrect field")
+)
+
+type State int
+
+func (s State) String() string {
+	switch s {
+	case StateDraft:
+		return "Draft"
+	case StateArchived:
+		return "Archived"
+	case StateChanged:
+		return "Changed"
+	case StatePublished:
+		return "Published"
+	}
+	return "Unknown"
+}
+
+const (
+	StateDraft State = iota
+	StatePublished
+	StateChanged
+	StateArchived
+
+	StateMax = StateArchived
+
+	SoftDeleteSeparator = "___"
+)
+
+var PermissionsAllowAny = &Permissions{
+	Edit:       true,
+	Archive:    true,
+	Publish:    true,
+	SoftDelete: true,
+	HardDelete: true,
+}
+
+// SystemFields - системные поля Item
+var SystemFields = []string{
+	"id",
+	"space_id",
+	"env_id",
+	"collection_id",
+	"state",
+	"created_rev_at",
+	"created_by",
+	"created_at",
+	"updated_at",
+	"updated_by",
+	"revision_id",
+	"published_at",
+	"published_by",
+	"archived_at",
+	"archived_by",
+	"data",
+	"translations",
+	"locale",
+	"deleted",
+	"hidden",
+	"template",
+}
+
+type Permissions struct {
+	Edit       bool
+	Archive    bool
+	Publish    bool
+	SoftDelete bool
+	HardDelete bool
+}
+
+type Item struct {
+	ID           string                            `json:"id" bson:"_id"` // ID - Идентификатор записи. Автоматически генерируется системой при сохранении первой ревизии.
+	SpaceID      string                            `json:"spaceId" bson:"-"`
+	EnvID        string                            `json:"envId" bson:"-"`
+	CollectionID string                            `json:"collectionId" bson:"-"`
+	State        State                             `json:"state" bson:"state"`
+	CreatedRevAt time.Time                         `json:"createdRevAt,omitempty" bson:"created_rev_at,omitempty"`
+	CreatedBy    string                            `json:"createdBy,omitempty" bson:"created_by,omitempty"`
+	CreatedAt    time.Time                         `json:"createdAt,omitempty" bson:"created_at,omitempty"`
+	UpdatedAt    time.Time                         `json:"updatedAt,omitempty" bson:"updated_at,omitempty"`
+	UpdatedBy    string                            `json:"updatedBy,omitempty" bson:"updated_by,omitempty"`
+	Data         map[string]interface{}            `json:"data" bson:"data"`
+	Locale       string                            `json:"locale" bson:"-"`
+	Translations map[string]map[string]interface{} `json:"translations" bson:"translations,omitempty"`
+	RevisionID   string                            `json:"revId,omitempty" bson:"revision_id"`
+	PublishedAt  time.Time                         `json:"publishedAt,omitempty" bson:"published_at,omitempty"`
+	PublishedBy  string                            `json:"publishedBy,omitempty" bson:"published_by,omitempty"`
+	ArchivedAt   time.Time                         `json:"archivedAt,omitempty" bson:"archived_at,omitempty"`
+	ArchivedBy   string                            `json:"archivedBy,omitempty" bson:"archived_by,omitempty"`
+	Permissions  *Permissions                      `json:"permissions,omitempty" bson:"-"`
+
+	// Флаги записи
+	Deleted  bool `json:"deleted" bson:"deleted,omitempty"`
+	Hidden   bool `json:"hidden" bson:"hidden,omitempty"`
+	Template bool `json:"template" bson:"template,omitempty"`
+}
+
+func NewItem(spaceID, envID, collID, id string, data map[string]interface{}, translations map[string]map[string]interface{}) *Item {
+	return &Item{
+		ID:           id,
+		SpaceID:      spaceID,
+		EnvID:        envID,
+		CollectionID: collID,
+		Data:         data,
+		Translations: translations,
+	}
+}
+
+func (i *Item) Clone() *Item {
+	itm := *i
+	itm.Data = data.CloneMap(i.Data)
+
+	if i.Translations != nil {
+		itm.Translations = make(map[string]map[string]interface{}, len(i.Translations))
+		for t, m := range i.Translations {
+			itm.Translations[t] = data.CloneMap(m)
+		}
+	}
+
+	return &itm
+}
+
+func (i *Item) ToMap() map[string]interface{} {
+	return map[string]interface{}{
+		"id":             i.ID,
+		"space_id":       i.SpaceID,
+		"env_id":         i.EnvID,
+		"collection_id":  i.CollectionID,
+		"state":          i.State,
+		"created_rev_at": i.CreatedRevAt,
+		"created_by":     i.CreatedBy,
+		"created_at":     i.CreatedAt,
+		"updated_at":     i.UpdatedAt,
+		"updated_by":     i.UpdatedBy,
+		"revision_id":    i.RevisionID,
+		"published_at":   i.PublishedAt,
+		"published_by":   i.PublishedBy,
+		"archived_at":    i.ArchivedAt,
+		"archived_by":    i.ArchivedBy,
+		"data":           i.Data,
+		"translations":   i.Translations,
+		"locale":         i.Locale,
+		"deleted":        i.Deleted,
+		"hidden":         i.Hidden,
+		"template":       i.Template,
+	}
+}
+
+func (i *Item) SetData(locale string, data map[string]interface{}) {
+	if locale != "" {
+		if i.Translations == nil {
+			i.Translations = make(map[string]map[string]interface{})
+		}
+		i.Translations[locale] = data
+		return
+	}
+	i.Data = data
+}
+
+func (i *Item) GetData(locale string) map[string]interface{} {
+	if locale != "" && i.Translations != nil {
+		translation, _ := i.Translations[locale]
+		return MergeData(i.Data, translation)
+	}
+	return i.Data
+}
+
+func (i Item) Encode(ctx context.Context, s *schema.Schema) (*Item, error) {
+	if i.Data != nil {
+		dt, err := schema.Encode(nil, s, i.Data)
+		if err != nil {
+			//return errors.WithField(err, "data")
+			return nil, err
+		}
+		i.Data = dt.(map[string]interface{})
+	}
+	if len(i.Translations) > 0 {
+		for l, v := range i.Translations {
+			dt, err := schema.Encode(nil, s, v)
+			if err != nil {
+				//return errors.WithField(err, fmt.Sprintf("translations.%s", l))
+				return nil, err
+			}
+			i.Translations[l] = dt.(map[string]interface{})
+		}
+	}
+	return &i, nil
+}
+
+func (i Item) Decode(ctx context.Context, s *schema.Schema) (res *Item, err error) {
+
+	if i.Data != nil {
+		i.Data, err = s.Decode(ctx, i.Data)
+		if err != nil {
+			return nil, err
+			//return errors.WithField(err, "data")
+		}
+	}
+
+	return &i, nil
+}
+
+// MergeData дополняет отсутствующие данные из оригинальных данных
+func MergeData(data ...map[string]interface{}) map[string]interface{} {
+	merge := make(map[string]interface{})
+	for _, d := range data {
+		for k, v := range d {
+			merge[k] = v
+		}
+	}
+	return merge
+}
+
+// ClearData убирает данные которые не изменились по сравнению с оригинальными данными
+func ClearData(data ...map[string]interface{}) map[string]interface{} {
+	var clear map[string]interface{}
+
+	for _, d := range data {
+		if clear == nil {
+			clear = d
+			continue
+		}
+
+		for k, v := range d {
+			if reflect.DeepEqual(clear[k], v) {
+				delete(clear, k)
+			}
+		}
+	}
+
+	return clear
+}
+
+type ProcessDataFunc func(ctx context.Context, sch *schema.Schema, data map[string]interface{}) (map[string]interface{}, error)
+
+func (i Item) ProcessData(ctx context.Context, sch *schema.Schema, fn ProcessDataFunc, locales ...string) (*Item, error) {
+	if i.Data != nil {
+		dt, err := fn(ctx, sch, i.Data)
+		if err != nil {
+			return nil, errors.WithField(err, "data")
+		}
+		i.Data = dt
+	}
+
+	tr := make(map[string]map[string]interface{})
+	for _, l := range locales {
+
+		data := i.GetData(l)
+
+		dt, err := fn(ctx, sch, data)
+		if err != nil {
+			return nil, errors.WithField(err, fmt.Sprintf("translations.%s", l))
+		}
+		tr[l] = dt
+
+	}
+
+	i.Translations = nil
+	if len(tr) > 0 {
+		i.Translations = tr
+	}
+
+	return &i, nil
+}
+
+// IsSystemField возвращает являться ли поле системным
+func IsSystemField(field string) bool {
+	if data.Contains(field, SystemFields) {
+		return true
+	}
+	return false
+}
+
+// SetSystemField устанавливает значение системного поля
+func (i *Item) SetSystemField(field string, value interface{}) error {
+	ok := true
+	switch field {
+	case "id":
+		i.ID, ok = value.(string)
+	case "space_id":
+		i.SpaceID, ok = value.(string)
+	case "env_id":
+		i.EnvID, ok = value.(string)
+	case "collection_id":
+		i.CollectionID, ok = value.(string)
+	case "created_rev_at":
+		i.CreatedRevAt, ok = value.(time.Time)
+	case "created_by":
+		i.CreatedBy, ok = value.(string)
+	case "created_at":
+		i.CreatedAt, ok = value.(time.Time)
+	case "updated_by":
+		i.UpdatedBy, ok = value.(string)
+	case "updated_at":
+		i.UpdatedAt, ok = value.(time.Time)
+	case "revision_id":
+		i.RevisionID, ok = value.(string)
+	case "published_by":
+		i.PublishedBy, ok = value.(string)
+	case "published_at":
+		i.PublishedAt, ok = value.(time.Time)
+	case "hidden":
+		i.Hidden, ok = value.(bool)
+	case "deleted":
+		i.Deleted, ok = value.(bool)
+	case "template":
+		i.Template, ok = value.(bool)
+	default:
+		return ErrNotSystemField
+	}
+
+	if !ok {
+		return ErrIncorrectValue
+	}
+
+	return nil
+}
+
+// GetSystem устанавливает значение системного поля
+func (i *Item) GetSystem(field string) (any, error) {
+	switch field {
+	case "id":
+		return i.ID, nil
+	case "space_id":
+		return i.SpaceID, nil
+	case "env_id":
+		return i.EnvID, nil
+	case "collection_id":
+		return i.CollectionID, nil
+	case "created_rev_at":
+		return i.CreatedRevAt, nil
+	case "created_by":
+		return i.CreatedBy, nil
+	case "created_at":
+		return i.CreatedAt, nil
+	case "updated_by":
+		return i.UpdatedBy, nil
+	case "updated_at":
+		return i.UpdatedAt, nil
+	case "revision_id":
+		return i.RevisionID, nil
+	case "published_by":
+		return i.PublishedBy, nil
+	case "published_at":
+		return i.PublishedAt, nil
+	case "hidden":
+		return i.Hidden, nil
+	case "deleted":
+		return i.Deleted, nil
+	case "template":
+		return i.Template, nil
+	}
+
+	return nil, ErrNotSystemField
+}
+
+func (i *Item) setItemData(field string, value interface{}) error {
+	if i.Data == nil {
+		i.Data = make(map[string]any)
+	}
+
+	return data.Set(field, i.Data, value)
+}
+
+func (i *Item) getItemData(field string) (any, error) {
+	if i.Data != nil {
+		if v, ok := data.Get(field, i.Data); ok {
+			return v, nil
+		}
+	}
+
+	return nil, ErrIncorrectField
+}
+
+// Set устанавливает значение поля
+func (i *Item) Set(field string, value interface{}) error {
+	if err := i.SetSystemField(field, value); !errors.Is(err, ErrNotSystemField) {
+		return errors.Wrapf(err, "fail to set system field '%s' value", field)
+	}
+
+	return i.setItemData(field, value)
+}
+
+// Get возвращает значение поля
+func (i *Item) Get(field string) (any, error) {
+	if v, err := i.GetSystem(field); err == nil {
+		return v, err
+	}
+
+	return i.getItemData(field)
+}
+
+// GetSystemField возвращает описание поля для системных аттрибутов Item
+func GetSystemField(fld string) (*field.Field, error) {
+	switch fld {
+	case "id", "space_id", "env_id", "collection_id", "revision_id":
+		return field.String(), nil
+	case "created_rev_at", "created_at", "updated_at", "published_at":
+		return field.Time(), nil
+	case "created_by", "updated_by", "published_by":
+		return field.String(), nil
+	case "hidden", "deleted", "template":
+		return field.Bool(), nil
+	}
+
+	return nil, ErrNotSystemField
+}
+
+// GetField возвращает значение поля
+func GetField(field string, sch *schema.Schema) (*field.Field, error) {
+	if f, err := GetSystemField(field); err == nil {
+		return f, err
+	}
+
+	f := sch.GetField(field)
+	if f == nil {
+		return nil, ErrIncorrectField
+	}
+
+	return f, nil
+}
+
+// GetSystemNamedFields возвращает описание всех системных полей Item
+func GetSystemNamedFields() []field.NamedField {
+	fields := make([]field.NamedField, 0, len(SystemFields))
+	for _, n := range SystemFields {
+		f := field.NamedField{Name: n}
+		f.Field, _ = GetSystemField(n)
+		fields = append(fields, f)
+	}
+
+	return fields
+}
+
+func ItemToProto(item *Item) *pb.Item {
+	if item == nil {
+		return nil
+	}
+
+	protoItem := &pb.Item{
+		Id:           item.ID,
+		SpaceId:      item.SpaceID,
+		EnvId:        item.EnvID,
+		CollectionId: item.CollectionID,
+		State:        pb.Item_State(item.State),
+		CreatedBy:    item.CreatedBy,
+		UpdatedBy:    item.UpdatedBy,
+		RevisionId:   item.RevisionID,
+		PublishedBy:  item.PublishedBy,
+		ArchivedBy:   item.ArchivedBy,
+		Locale:       item.Locale,
+		Hidden:       item.Hidden,
+		Template:     item.Template,
+		Deleted:      item.Deleted,
+	}
+
+	if item.Data != nil {
+		protoItem.Data, _ = structpb.NewStruct(item.Data)
+	}
+	if item.Translations != nil {
+		protoItem.Translations = make(map[string]*structpb.Struct, len(item.Translations))
+		for k, v := range item.Translations {
+			protoItem.Translations[k], _ = structpb.NewStruct(v)
+		}
+	}
+
+	protoItem.CreatedRevAt = timestamppb.New(item.CreatedRevAt)
+	protoItem.PublishedAt = timestamppb.New(item.PublishedAt)
+	protoItem.ArchivedAt = timestamppb.New(item.ArchivedAt)
+	protoItem.CreatedAt = timestamppb.New(item.CreatedAt)
+	protoItem.UpdatedAt = timestamppb.New(item.UpdatedAt)
+
+	if item.Permissions != nil {
+		protoItem.Permissions = &pb.Permissions{
+			Edit:       item.Permissions.Edit,
+			Archive:    item.Permissions.Archive,
+			Publish:    item.Permissions.Publish,
+			SoftDelete: item.Permissions.SoftDelete,
+			HardDelete: item.Permissions.HardDelete,
+		}
+	}
+
+	return protoItem
+}
+
+func ItemFromProto(protoItem *pb.Item) *Item {
+
+	if protoItem == nil {
+		return nil
+	}
+
+	item := &Item{
+		ID:           protoItem.Id,
+		SpaceID:      protoItem.SpaceId,
+		EnvID:        protoItem.EnvId,
+		CollectionID: protoItem.CollectionId,
+		State:        State(protoItem.State),
+		CreatedBy:    protoItem.CreatedBy,
+		UpdatedBy:    protoItem.UpdatedBy,
+		RevisionID:   protoItem.RevisionId,
+		PublishedBy:  protoItem.PublishedBy,
+		ArchivedBy:   protoItem.ArchivedBy,
+		Locale:       protoItem.Locale,
+		Hidden:       protoItem.Hidden,
+		Template:     protoItem.Template,
+		Deleted:      protoItem.Deleted,
+	}
+
+	if protoItem.Data != nil {
+		item.Data = protoItem.Data.AsMap()
+	}
+
+	if protoItem.Translations != nil {
+		item.Translations = make(map[string]map[string]interface{}, len(protoItem.Translations))
+		for k, v := range protoItem.Translations {
+			item.Translations[k] = v.AsMap()
+		}
+	}
+
+	if protoItem.Permissions != nil {
+		item.Permissions = &Permissions{
+			Edit:       protoItem.Permissions.Edit,
+			Archive:    protoItem.Permissions.Archive,
+			Publish:    protoItem.Permissions.Publish,
+			SoftDelete: protoItem.Permissions.SoftDelete,
+			HardDelete: protoItem.Permissions.HardDelete,
+		}
+	}
+
+	item.CreatedRevAt = protoItem.CreatedRevAt.AsTime()
+	item.PublishedAt = protoItem.PublishedAt.AsTime()
+	item.ArchivedAt = protoItem.ArchivedAt.AsTime()
+	item.CreatedAt = protoItem.CreatedAt.AsTime()
+	item.UpdatedAt = protoItem.UpdatedAt.AsTime()
+
+	return item
+}
+
+func GetItemIDs(arr []*Item) []string {
+	res := make([]string, len(arr))
+	for i, e := range arr {
+		res[i] = e.ID
+	}
+	return res
+}
diff --git a/pkg/items/item_test.go b/pkg/items/item_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb54fc501f45281bbafd37983de3b8638d5692d4
--- /dev/null
+++ b/pkg/items/item_test.go
@@ -0,0 +1,61 @@
+package items
+
+import (
+	"fmt"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/field"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestItem_Set(t *testing.T) {
+	item := &Item{}
+
+	item.Set("id", "id")
+	assert.Equal(t, "id", item.ID)
+	now := time.Now()
+
+	item.Set("created_at", now)
+	assert.Equal(t, now, item.CreatedAt)
+
+	item.Set("a.b.c", 101)
+	assert.Equal(t, map[string]any{"a": map[string]any{"b": map[string]any{"c": 101}}}, item.Data)
+
+}
+
+func TestGetField(t *testing.T) {
+	sch := schema.New(
+		"a", field.String(),
+		"obj", field.Object(
+			"a", field.Number(field.NumberFormatFloat),
+			"b", field.String(),
+		),
+		"arr", field.Array(field.Object("a", field.Time())),
+	)
+
+	tests := []struct {
+		name    string
+		field   string
+		want    *field.Field
+		wantErr assert.ErrorAssertionFunc
+	}{
+		{"Simple", "a", field.String(), assert.NoError},
+		{"Incorrect field", "b", nil, assert.Error},
+		{"Object", "obj", field.Object("a", field.Number(field.NumberFormatFloat), "b", field.String()), assert.NoError},
+		{"Object path", "obj.a", field.Number(field.NumberFormatFloat), assert.NoError},
+		{"Array", "arr", field.Array(field.Object("a", field.Time())), assert.NoError},
+		{"Array path", "arr.a", field.Time(), assert.NoError},
+		{"Array item", "arr.", field.Object("a", field.Time()), assert.NoError},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := GetField(tt.field, sch)
+			if !tt.wantErr(t, err, fmt.Sprintf("GetField(%v, sch)", tt.field)) {
+				return
+			}
+			assert.Equalf(t, tt.want, got, "GetField(%v, sch)", tt.field)
+		})
+	}
+}
diff --git a/pkg/items/middleware/caching_middleware.go b/pkg/items/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..0455cb276c5ccae86aeb27619fe55369a735550a
--- /dev/null
+++ b/pkg/items/middleware/caching_middleware.go
@@ -0,0 +1,176 @@
+package service
+
+import (
+	"context"
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	envService "git.perx.ru/perxis/perxis-go/pkg/environments"
+	service "git.perx.ru/perxis/perxis-go/pkg/items"
+)
+
+func makeKey(ss ...string) string {
+	return strings.Join(ss, "-")
+}
+
+func CachingMiddleware(cache, cachePublished *cache.Cache, envs envService.Environments) Middleware {
+	return func(next service.Items) service.Items {
+		return &cachingMiddleware{
+			cache:          cache,
+			cachePublished: cachePublished,
+			Items:          next,
+			envs:           envs,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache          *cache.Cache
+	cachePublished *cache.Cache
+	envs           envService.Environments
+	service.Items
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*service.GetOptions) (itm *service.Item, err error) {
+
+	value, e := m.cache.Get(makeKey(spaceId, envId, collectionId, itemId))
+	if e == nil {
+		return value.(*service.Item), err
+	}
+	itm, err = m.Items.Get(ctx, spaceId, envId, collectionId, itemId, options...)
+	if err == nil {
+		env, err := m.envs.Get(ctx, itm.SpaceID, itm.EnvID)
+		if err != nil {
+			return nil, err
+		}
+		m.cache.Set(makeKey(itm.SpaceID, env.ID, itm.CollectionID, itm.ID), itm)
+		for _, al := range env.Aliases {
+			m.cache.Set(makeKey(itm.SpaceID, al, itm.CollectionID, itm.ID), itm)
+		}
+	}
+	return itm, err
+}
+
+func (m cachingMiddleware) Update(ctx context.Context, item *service.Item, options ...*service.UpdateOptions) (err error) {
+
+	err = m.Items.Update(ctx, item, options...)
+	if err == nil {
+		env, err := m.envs.Get(ctx, item.SpaceID, item.EnvID)
+		if err != nil {
+			return err
+		}
+		m.cache.Remove(makeKey(item.SpaceID, env.ID, item.CollectionID, item.ID))
+		m.cachePublished.Remove(makeKey(item.SpaceID, env.ID, item.CollectionID, item.ID))
+		for _, al := range env.Aliases {
+			m.cache.Remove(makeKey(item.SpaceID, al, item.CollectionID, item.ID))
+			m.cachePublished.Remove(makeKey(item.SpaceID, al, item.CollectionID, item.ID))
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*service.DeleteOptions) (err error) {
+
+	err = m.Items.Delete(ctx, spaceId, envId, collectionId, itemId, options...)
+	if err == nil {
+		env, err := m.envs.Get(ctx, spaceId, envId)
+		if err != nil {
+			return err
+		}
+		m.cache.Remove(makeKey(spaceId, env.ID, collectionId, itemId))
+		m.cachePublished.Remove(makeKey(spaceId, env.ID, collectionId, itemId))
+		for _, al := range env.Aliases {
+			m.cache.Remove(makeKey(spaceId, al, collectionId, itemId))
+			m.cachePublished.Remove(makeKey(spaceId, al, collectionId, itemId))
+		}
+
+	}
+	return err
+}
+
+func (m cachingMiddleware) Publish(ctx context.Context, item *service.Item, options ...*service.PublishOptions) (err error) {
+
+	err = m.Items.Publish(ctx, item, options...)
+	if err == nil {
+		env, err := m.envs.Get(ctx, item.SpaceID, item.EnvID)
+		if err != nil {
+			return err
+		}
+		m.cache.Remove(makeKey(item.SpaceID, env.ID, item.CollectionID, item.ID))
+		m.cachePublished.Remove(makeKey(item.SpaceID, env.ID, item.CollectionID, item.ID))
+		for _, al := range env.Aliases {
+			m.cache.Remove(makeKey(item.SpaceID, al, item.CollectionID, item.ID))
+			m.cachePublished.Remove(makeKey(item.SpaceID, al, item.CollectionID, item.ID))
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) Unpublish(ctx context.Context, item *service.Item, options ...*service.UnpublishOptions) (err error) {
+
+	err = m.Items.Unpublish(ctx, item, options...)
+	if err == nil {
+		env, err := m.envs.Get(ctx, item.SpaceID, item.EnvID)
+		if err != nil {
+			return err
+		}
+		m.cache.Remove(makeKey(item.SpaceID, env.ID, item.CollectionID, item.ID))
+		m.cachePublished.Remove(makeKey(item.SpaceID, env.ID, item.CollectionID, item.ID))
+		for _, al := range env.Aliases {
+			m.cache.Remove(makeKey(item.SpaceID, al, item.CollectionID, item.ID))
+			m.cachePublished.Remove(makeKey(item.SpaceID, al, item.CollectionID, item.ID))
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) GetPublished(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*service.GetPublishedOptions) (itm *service.Item, err error) {
+
+	opts := service.MergeGetPublishedOptions(options...)
+
+	val, e := m.cachePublished.Get(makeKey(spaceId, envId, collectionId, itemId))
+	if e == nil {
+		value := val.(map[string]*service.Item)
+		if i, ok := value[opts.LocaleID]; ok {
+			return i, nil
+		}
+	}
+
+	itm, err = m.Items.GetPublished(ctx, spaceId, envId, collectionId, itemId, opts)
+
+	if err == nil {
+		env, err := m.envs.Get(ctx, itm.SpaceID, itm.EnvID)
+		if err != nil {
+			return nil, err
+		}
+		var value = make(map[string]*service.Item)
+		if val != nil {
+			value = val.(map[string]*service.Item)
+		}
+		value[opts.LocaleID] = itm
+		m.cachePublished.Set(makeKey(itm.SpaceID, env.ID, itm.CollectionID, itm.ID), value)
+		for _, al := range env.Aliases {
+			m.cachePublished.Set(makeKey(itm.SpaceID, al, itm.CollectionID, itm.ID), value)
+		}
+	}
+
+	return itm, err
+}
+
+func (m cachingMiddleware) Archive(ctx context.Context, item *service.Item, options ...*service.ArchiveOptions) (err error) {
+
+	err = m.Items.Archive(ctx, item, options...)
+	if err == nil {
+		env, err := m.envs.Get(ctx, item.SpaceID, item.EnvID)
+		if err != nil {
+			return err
+		}
+		m.cache.Remove(makeKey(item.SpaceID, env.ID, item.CollectionID, item.ID))
+		m.cachePublished.Remove(makeKey(item.SpaceID, env.ID, item.CollectionID, item.ID))
+		for _, al := range env.Aliases {
+			m.cache.Remove(makeKey(item.SpaceID, al, item.CollectionID, item.ID))
+			m.cachePublished.Remove(makeKey(item.SpaceID, al, item.CollectionID, item.ID))
+		}
+	}
+	return err
+}
diff --git a/pkg/items/middleware/caching_middleware_test.go b/pkg/items/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..9e02b49beb80507ef7fbf25d3c67d5c8a022ce1b
--- /dev/null
+++ b/pkg/items/middleware/caching_middleware_test.go
@@ -0,0 +1,685 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/environments"
+	envmocks "git.perx.ru/perxis/perxis-go/pkg/environments/mocks"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	itmsmocks "git.perx.ru/perxis/perxis-go/pkg/items/mocks"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestItemsCache(t *testing.T) {
+
+	const (
+		colID    = "colID"
+		spaceID  = "spaceID"
+		envID    = "envID"
+		envAlias = "envAlias"
+		itemID   = "itemID"
+		locID    = "locID"
+		size     = 5
+		ttl      = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	ctx := context.Background()
+
+	t.Run("Get from cache", func(t *testing.T) {
+		itms := &itmsmocks.Items{}
+		env := &envmocks.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+		env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+		itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+		v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кеша, при повторном запросе.")
+
+		v3, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+		assert.Same(t, v3, v2, "Ожидается получение объекта из кеша, при запросе того же объекта по alias окружения.")
+		require.NoError(t, err)
+
+		env.AssertExpectations(t)
+		itms.AssertExpectations(t)
+	})
+
+	t.Run("Get from cache(by Alias)", func(t *testing.T) {
+		itms := &itmsmocks.Items{}
+		env := &envmocks.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+		env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+		itms.On("Get", mock.Anything, spaceID, envAlias, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+		v1, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кеша, при повторном запросе.")
+
+		v3, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+		assert.Same(t, v3, v2, "Ожидается получение объекта из кеша, при запросе того же объекта по ID окружения.")
+		require.NoError(t, err)
+
+		env.AssertExpectations(t)
+		itms.AssertExpectations(t)
+	})
+
+	t.Run("GetPublished from cache", func(t *testing.T) {
+		itms := &itmsmocks.Items{}
+		env := &envmocks.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+		env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+		itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+		v1, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+		require.NoError(t, err)
+
+		v2, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+		v3, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+		require.NoError(t, err)
+		assert.Same(t, v2, v3, "Ожидается получение объекта из кеша, при запросе того же объекта по alias окружения.")
+
+		env.AssertExpectations(t)
+		itms.AssertExpectations(t)
+	})
+
+	t.Run("GetPublished from cache(by Alias)", func(t *testing.T) {
+		itms := &itmsmocks.Items{}
+		env := &envmocks.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+		env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+		itms.On("GetPublished", mock.Anything, spaceID, envAlias, colID, itemID, mock.Anything).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+		v1, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+		require.NoError(t, err)
+
+		v2, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+		v3, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+		require.NoError(t, err)
+		assert.Same(t, v2, v3, "Ожидается получение объекта из кеша, при запросе того же объекта по ID окружения.")
+
+		env.AssertExpectations(t)
+		itms.AssertExpectations(t)
+	})
+
+	t.Run("GetPublished from cache (with different locales)", func(t *testing.T) {
+		const (
+			loc1 = "loc1"
+			loc2 = "loc2"
+		)
+
+		itms := &itmsmocks.Items{}
+		env := &envmocks.Environments{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+		env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Twice()
+		itms.On("GetPublished", mock.Anything, spaceID, envAlias, colID, itemID, mock.Anything).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+		itms.On("GetPublished", mock.Anything, spaceID, envAlias, colID, itemID, mock.Anything).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+		v1loc1, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: loc1})
+		require.NoError(t, err, "Ожидается получение объекта из сервиса и добавление его в кеш с loc1.")
+
+		v1loc2, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: loc2})
+		require.NoError(t, err, "Ожидается получение объекта из сервиса и добавление его в кеш с loc2 вместе с loc1.")
+
+		v2loc1, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: loc1})
+		require.NoError(t, err)
+		assert.Same(t, v1loc1, v2loc1, "Ожидается получение объекта c локализацией loc1 из кеша.")
+
+		v2loc2, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: loc2})
+		require.NoError(t, err)
+		assert.Same(t, v1loc2, v2loc2, "Ожидается получение объекта c локализацией loc2 из кеша.")
+
+		v3loc1, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: loc1})
+		require.NoError(t, err)
+		assert.Same(t, v2loc1, v3loc1, "Ожидается получение объекта c локализацией loc1 из кеша, при запросе того же объекта по ID окружения.")
+
+		v3loc2, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: loc2})
+		require.NoError(t, err)
+		assert.Same(t, v2loc2, v3loc2, "Ожидается получение объекта c локализацией loc2 из кеша, при запросе того же объекта по ID окружения.")
+
+		env.AssertExpectations(t)
+		itms.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("After Update(Get)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, Data: map[string]interface{}{"f1": "d1"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			itms.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Update(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, Data: map[string]interface{}{"f1": "d2"}})
+			require.NoError(t, err)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, Data: map[string]interface{}{"f1": "d2"}}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v2, "Ожидается удаление объекта из кэша после обновления и получение его заново из сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Archive(Get)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			itms.On("Archive", mock.Anything, mock.Anything).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Archive(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft})
+			require.NoError(t, err)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateArchived}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v2, "Ожидается удаление объекта из кэша после архивации и получение из сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Publish(Get)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			itms.On("Publish", mock.Anything, mock.Anything).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			err = svc.Publish(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft})
+			require.NoError(t, err)
+
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished}, nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v2, "Ожидается удаление объекта из кэша после публикации и получение заново из сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Delete", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша при повторном запросе.")
+
+			v3, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кеша по alias окружения.")
+
+			itms.On("Delete", mock.Anything, spaceID, envID, colID, itemID).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Delete(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(nil, errNotFound).Once()
+			itms.On("Get", mock.Anything, spaceID, envAlias, colID, itemID).Return(nil, errNotFound).Once()
+			_, err = svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление из кэша после удаления объекта и получение ошибки от сервиса.")
+
+			_, err = svc.Get(ctx, spaceID, envAlias, colID, itemID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление из кэша после удаления объекта и получение ошибки от сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Unpublish(Get)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Unpublish", mock.Anything, mock.Anything).Return(nil).Once()
+
+			err = svc.Unpublish(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished})
+			require.NoError(t, err)
+
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v2, "Ожидается удаление объекта из кэша после снятия с публикации и получение заново из сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Publish(Get by Alias)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша по alias окружения.")
+
+			itms.On("Publish", mock.Anything, mock.Anything).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Publish(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft})
+			require.NoError(t, err)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envAlias, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v2, "Ожидается удаление объекта из кэша после публикации и получение из сервиса по alias окружения.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Update(Get by Alias)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, Data: map[string]interface{}{"f1": "d1"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша по alias окружения.")
+
+			itms.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Update(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, Data: map[string]interface{}{"f1": "d2"}})
+			require.NoError(t, err)
+
+			itms.On("Get", mock.Anything, spaceID, envAlias, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, Data: map[string]interface{}{"f1": "d2"}}, nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v2, "Ожидается удаление объекта из кэша при обновлении и получение из сервиса по alias окружения.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Unpublish(Get by Alias)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша по alias окружения.")
+
+			itms.On("Unpublish", mock.Anything, mock.Anything).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Unpublish(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished})
+			require.NoError(t, err)
+
+			itms.On("Get", mock.Anything, spaceID, envAlias, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, envAlias, colID, itemID)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v2, "Ожидается удаление объекта из кэша после снятия с публикации и получение из сервиса по alias окружения.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Update(GetPublished)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished, Data: map[string]interface{}{"f1": "d1"}}, nil).Once()
+
+			v1, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+
+			v2, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			v3, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кеша по о alias окружения.")
+
+			itms.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Update(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished, Data: map[string]interface{}{"f1": "d2"}})
+			require.NoError(t, err)
+
+			itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(nil, errNotFound).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envAlias, colID, itemID, mock.Anything).Return(nil, errNotFound).Once()
+
+			_, err = svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление объекта из кэша по ID окружения после его обновления и получение ошибки от сервиса.")
+
+			_, err = svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление объекта из кэша по alias окружения после его обновления и получение ошибки от сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Archive(GetPublished)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+			v1, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+
+			v2, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			v3, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кеша по о alias окружения.")
+
+			itms.On("Archive", mock.Anything, mock.Anything).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Archive(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft})
+			require.NoError(t, err)
+
+			itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(nil, errNotFound).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envAlias, colID, itemID, mock.Anything).Return(nil, errNotFound).Once()
+
+			_, err = svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление объекта из кэша по ID окружения после его архивации и получение ошибки от сервиса.")
+
+			_, err = svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление объекта из кэша по alias окружения после его архивации и получение ошибки от сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Delete(GetPublished)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished}, nil).Once()
+
+			v1, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+
+			v2, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			v3, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кеша по о alias окружения.")
+
+			itms.On("Delete", mock.Anything, spaceID, envID, colID, itemID).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Delete(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(nil, errNotFound).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envAlias, colID, itemID, mock.Anything).Return(nil, errNotFound).Once()
+
+			_, err = svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление объекта из кэша после удаления из хранилища и получение ошибки от сервиса.")
+
+			_, err = svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается очистка кеша по alias окружения после удаления объекта и получение ошибки от сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Unpublish(GetPublished)", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished}, nil).Once()
+
+			v1, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+
+			v2, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			v3, err := svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+			assert.Same(t, v2, v3, "Ожидается получение объекта из кеша по о alias окружения.")
+
+			itms.On("Unpublish", mock.Anything, mock.Anything).Return(nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			err = svc.Unpublish(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished})
+			require.NoError(t, err)
+
+			itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(nil, errNotFound).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envAlias, colID, itemID, mock.Anything).Return(nil, errNotFound).Once()
+
+			_, err = svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление объекта из кэша по ID окружения после снятия с публикации и получение ошибки от сервиса.")
+
+			_, err = svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление объекта из кэша по alias окружения после снятия с публикации и получение ошибки от сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After Unpublish by Alias", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			env.On("Get", mock.Anything, spaceID, envAlias).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished}, nil).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envID, colID, itemID, mock.Anything).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StatePublished}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			v3, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+
+			v4, err := svc.GetPublished(ctx, spaceID, envID, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.NoError(t, err)
+			assert.Same(t, v3, v4, "Ожидается получение опубликованного объекта из кеша.")
+
+			itms.On("Unpublish", mock.Anything, mock.Anything).Return(nil).Once()
+			err = svc.Unpublish(ctx, &items.Item{ID: itemID, SpaceID: spaceID, EnvID: envAlias, CollectionID: colID, State: items.StatePublished})
+			require.NoError(t, err)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+			itms.On("GetPublished", mock.Anything, spaceID, envAlias, colID, itemID, mock.Anything).Return(nil, errNotFound).Once()
+
+			v5, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.NotSame(t, v5, v2, "Ожидается удаление объекта из кэша и получение заново из сервиса.")
+
+			_, err = svc.GetPublished(ctx, spaceID, envAlias, colID, itemID, &items.GetPublishedOptions{LocaleID: locID})
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление объекта из кэша и получение ошибки от сервиса из сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			itms := &itmsmocks.Items{}
+			env := &envmocks.Environments{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl), cache.NewCache(size, ttl), env)(itms)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кеша.")
+
+			time.Sleep(2 * ttl)
+
+			env.On("Get", mock.Anything, spaceID, envID).Return(&environments.Environment{ID: envID, SpaceID: spaceID, Aliases: []string{envAlias}}, nil).Once()
+			itms.On("Get", mock.Anything, spaceID, envID, colID, itemID).Return(&items.Item{ID: itemID, SpaceID: spaceID, EnvID: envID, CollectionID: colID, State: items.StateDraft}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, envID, colID, itemID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается удаление объекта из кэша и получение из сервиса.")
+
+			env.AssertExpectations(t)
+			itms.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/items/middleware/client_encode_middleware.go b/pkg/items/middleware/client_encode_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..3aaa6b94badd9726e74d1cb3b0abbd02893f5838
--- /dev/null
+++ b/pkg/items/middleware/client_encode_middleware.go
@@ -0,0 +1,317 @@
+package service
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/collections"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+)
+
+// ClientEncodeMiddleware выполняет операции encode/decode для передаваемых данных
+func ClientEncodeMiddleware(colls collections.Collections) Middleware {
+	return func(items items.Items) items.Items {
+		return &encodeDecodeMiddleware{
+			next:  items,
+			colls: colls,
+		}
+
+	}
+}
+
+type encodeDecodeMiddleware struct {
+	next  items.Items
+	colls collections.Collections
+}
+
+func (m *encodeDecodeMiddleware) Introspect(ctx context.Context, item *items.Item, opts ...*items.IntrospectOptions) (itm *items.Item, sch *schema.Schema, err error) {
+	coll, err := m.colls.Get(ctx, item.SpaceID, item.EnvID, item.CollectionID)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if item, err = item.Encode(ctx, coll.Schema); err != nil {
+		return
+	}
+
+	itm, sch, err = m.next.Introspect(ctx, item, opts...)
+	if itm != nil && sch != nil {
+		var err error
+		if itm, err = itm.Decode(ctx, sch); err != nil {
+			return nil, nil, err
+		}
+	}
+	return itm, sch, err
+
+}
+
+func (m *encodeDecodeMiddleware) Create(ctx context.Context, item *items.Item, opts ...*items.CreateOptions) (created *items.Item, err error) {
+
+	var col *collections.Collection
+
+	if item != nil && (item.Data != nil || item.Translations != nil) {
+
+		col, err = m.colls.Get(ctx, item.SpaceID, item.EnvID, item.CollectionID)
+		if err != nil {
+			return nil, err
+		}
+
+		if item, err = item.Encode(ctx, col.Schema); err != nil {
+			return nil, err
+		}
+	}
+
+	res, err := m.next.Create(ctx, item, opts...)
+	if err == nil && (res.Data != nil || res.Translations != nil) {
+
+		if col == nil {
+			col, err = m.colls.Get(ctx, item.SpaceID, item.EnvID, item.CollectionID)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		res, err = res.Decode(ctx, col.Schema)
+	}
+
+	return res, err
+}
+
+func (m *encodeDecodeMiddleware) Update(ctx context.Context, upd *items.Item, options ...*items.UpdateOptions) (err error) {
+	var col *collections.Collection
+	if upd != nil && (upd.Data != nil || upd.Translations != nil) {
+		col, err = m.colls.Get(ctx, upd.SpaceID, upd.EnvID, upd.CollectionID)
+		if err != nil {
+			return err
+		}
+		if upd, err = upd.Encode(ctx, col.Schema); err != nil {
+			return err
+		}
+	}
+	return m.next.Update(ctx, upd, options...)
+}
+
+func (m *encodeDecodeMiddleware) Find(ctx context.Context, spaceId, envId, collectionId string, filter *items.Filter, options ...*items.FindOptions) (items []*items.Item, total int, err error) {
+	items, total, err = m.next.Find(ctx, spaceId, envId, collectionId, filter, options...)
+	if err == nil && total > 0 {
+		col, err := m.colls.Get(ctx, spaceId, envId, collectionId)
+		if err != nil {
+			return nil, 0, err
+		}
+		for i, itm := range items {
+			itm, err = itm.Decode(ctx, col.Schema)
+			if err != nil {
+				return nil, 0, err
+			}
+
+			items[i] = itm
+		}
+	}
+	return
+}
+
+func (m *encodeDecodeMiddleware) Get(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*items.GetOptions) (item *items.Item, err error) {
+	item, err = m.next.Get(ctx, spaceId, envId, collectionId, itemId, options...)
+	if err == nil && item != nil {
+		col, err := m.colls.Get(ctx, spaceId, envId, collectionId)
+		if err != nil {
+			return nil, err
+		}
+		item, err = item.Decode(ctx, col.Schema)
+		if err != nil {
+			return nil, err
+
+		}
+	}
+	return
+}
+
+func (m *encodeDecodeMiddleware) Publish(ctx context.Context, item *items.Item, opts ...*items.PublishOptions) (err error) {
+	if item != nil && (item.Data != nil || item.Translations != nil) {
+		col, err := m.colls.Get(ctx, item.SpaceID, item.EnvID, item.CollectionID)
+		if err != nil {
+			return err
+		}
+
+		if item, err = item.Encode(ctx, col.Schema); err != nil {
+			return err
+		}
+	}
+
+	return m.next.Publish(ctx, item, opts...)
+}
+
+func (m *encodeDecodeMiddleware) Unpublish(ctx context.Context, item *items.Item, opts ...*items.UnpublishOptions) (err error) {
+	if item != nil && (item.Data != nil || item.Translations != nil) {
+		col, err := m.colls.Get(ctx, item.SpaceID, item.EnvID, item.CollectionID)
+		if err != nil {
+			return err
+		}
+
+		if item, err = item.Encode(ctx, col.Schema); err != nil {
+			return err
+		}
+	}
+
+	return m.next.Unpublish(ctx, item, opts...)
+}
+
+func (m *encodeDecodeMiddleware) GetPublished(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*items.GetPublishedOptions) (item *items.Item, err error) {
+	item, err = m.next.GetPublished(ctx, spaceId, envId, collectionId, itemId, options...)
+	if err == nil && item != nil {
+		col, err := m.colls.Get(ctx, spaceId, envId, collectionId)
+		if err != nil {
+			return nil, err
+		}
+		item, err = item.Decode(ctx, col.Schema)
+		if err != nil {
+			return nil, err
+
+		}
+	}
+	return
+}
+
+func (m *encodeDecodeMiddleware) FindPublished(ctx context.Context, spaceId, envId, collectionId string, filter *items.Filter, options ...*items.FindPublishedOptions) (items []*items.Item, total int, err error) {
+	items, total, err = m.next.FindPublished(ctx, spaceId, envId, collectionId, filter, options...)
+	if err == nil && total > 0 {
+		col, err := m.colls.Get(ctx, spaceId, envId, collectionId)
+		if err != nil {
+			return nil, 0, err
+		}
+		for i, itm := range items {
+			itm, err = itm.Decode(ctx, col.Schema)
+			if err != nil {
+				return nil, 0, err
+			}
+
+			items[i] = itm
+		}
+	}
+	return
+}
+
+func (m *encodeDecodeMiddleware) GetRevision(ctx context.Context, spaceId, envId, collectionId, itemId, revisionId string, options ...*items.GetRevisionOptions) (item *items.Item, err error) {
+	item, err = m.next.GetRevision(ctx, spaceId, envId, collectionId, itemId, revisionId, options...)
+	if err == nil && item != nil {
+		col, err := m.colls.Get(ctx, spaceId, envId, collectionId)
+		if err != nil {
+			return nil, err
+		}
+		item, err = item.Decode(ctx, col.Schema)
+		if err != nil {
+			return nil, err
+
+		}
+	}
+	return
+}
+
+func (m *encodeDecodeMiddleware) ListRevisions(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*items.ListRevisionsOptions) (items []*items.Item, err error) {
+	items, err = m.next.ListRevisions(ctx, spaceId, envId, collectionId, itemId, options...)
+	if err == nil && len(items) > 0 {
+		col, err := m.colls.Get(ctx, spaceId, envId, collectionId)
+		if err != nil {
+			return nil, err
+		}
+		for i, itm := range items {
+			itm, err = itm.Decode(ctx, col.Schema)
+			if err != nil {
+				return nil, err
+			}
+
+			items[i] = itm
+		}
+	}
+	return
+}
+
+func (m *encodeDecodeMiddleware) FindArchived(ctx context.Context, spaceId, envId, collectionId string, filter *items.Filter, options ...*items.FindArchivedOptions) (items []*items.Item, total int, err error) {
+	items, total, err = m.next.FindArchived(ctx, spaceId, envId, collectionId, filter, options...)
+	if err == nil && total > 0 {
+		col, err := m.colls.Get(ctx, spaceId, envId, collectionId)
+		if err != nil {
+			return nil, 0, err
+		}
+		for i, itm := range items {
+			itm, err = itm.Decode(ctx, col.Schema)
+			if err != nil {
+				return nil, 0, err
+			}
+
+			items[i] = itm
+		}
+	}
+	return
+}
+
+func (m *encodeDecodeMiddleware) Archive(ctx context.Context, item *items.Item, opts ...*items.ArchiveOptions) (err error) {
+	if item != nil && (item.Data != nil || item.Translations != nil) {
+		col, err := m.colls.Get(ctx, item.SpaceID, item.EnvID, item.CollectionID)
+		if err != nil {
+			return err
+		}
+
+		if item, err = item.Encode(ctx, col.Schema); err != nil {
+			return err
+		}
+	}
+
+	return m.next.Archive(ctx, item, opts...)
+}
+
+func (m *encodeDecodeMiddleware) Unarchive(ctx context.Context, item *items.Item, opts ...*items.UnarchiveOptions) (err error) {
+	if item != nil && (item.Data != nil || item.Translations != nil) {
+		col, err := m.colls.Get(ctx, item.SpaceID, item.EnvID, item.CollectionID)
+		if err != nil {
+			return err
+		}
+
+		if item, err = item.Encode(ctx, col.Schema); err != nil {
+			return err
+		}
+	}
+
+	return m.next.Unarchive(ctx, item, opts...)
+}
+
+func (m *encodeDecodeMiddleware) Delete(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*items.DeleteOptions) (err error) {
+	return m.next.Delete(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *encodeDecodeMiddleware) Undelete(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*items.UndeleteOptions) (err error) {
+	return m.next.Undelete(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *encodeDecodeMiddleware) Aggregate(ctx context.Context, spaceId, envId, collectionId string, filter *items.Filter, options ...*items.AggregateOptions) (result map[string]interface{}, err error) {
+	res, err := m.next.Aggregate(ctx, spaceId, envId, collectionId, filter, options...)
+	if len(res) > 0 && len(options) > 0 {
+		col, err := m.colls.Get(ctx, spaceId, envId, collectionId)
+		if err != nil {
+			return nil, errors.Wrap(err, "encode aggregate result")
+		}
+		o := items.MergeAggregateOptions(options...)
+		res, err = items.DecodeAggregateResult(ctx, o.Fields, res, col.Schema)
+		if err != nil {
+			return nil, errors.Wrap(err, "encode aggregate result")
+		}
+	}
+	return res, err
+}
+
+func (m *encodeDecodeMiddleware) AggregatePublished(ctx context.Context, spaceId, envId, collectionId string, filter *items.Filter, options ...*items.AggregatePublishedOptions) (result map[string]interface{}, err error) {
+	res, err := m.next.AggregatePublished(ctx, spaceId, envId, collectionId, filter, options...)
+	if len(res) > 0 && len(options) > 0 {
+		col, err := m.colls.Get(ctx, spaceId, envId, collectionId)
+		if err != nil {
+			return nil, errors.Wrap(err, "get collection")
+		}
+		o := items.MergeAggregatePublishedOptions(options...)
+		res, err = items.DecodeAggregateResult(ctx, o.Fields, res, col.Schema)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return res, err
+}
diff --git a/pkg/items/middleware/error_logging_middleware.go b/pkg/items/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..97967808d150cf951f3fb22b16b2836765fc611f
--- /dev/null
+++ b/pkg/items/middleware/error_logging_middleware.go
@@ -0,0 +1,211 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/items -i Items -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements items.Items that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   items.Items
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the items.Items with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next items.Items) items.Items {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Aggregate(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.AggregateOptions) (result map[string]interface{}, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Aggregate(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *errorLoggingMiddleware) AggregatePublished(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.AggregatePublishedOptions) (result map[string]interface{}, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.AggregatePublished(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *errorLoggingMiddleware) Archive(ctx context.Context, item *items.Item, options ...*items.ArchiveOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Archive(ctx, item, options...)
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, item *items.Item, opts ...*items.CreateOptions) (created *items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, item, opts...)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.DeleteOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *errorLoggingMiddleware) Find(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindOptions) (items []*items.Item, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Find(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *errorLoggingMiddleware) FindArchived(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindArchivedOptions) (items []*items.Item, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.FindArchived(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *errorLoggingMiddleware) FindPublished(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindPublishedOptions) (items []*items.Item, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.FindPublished(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.GetOptions) (item *items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *errorLoggingMiddleware) GetPublished(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.GetPublishedOptions) (item *items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.GetPublished(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *errorLoggingMiddleware) GetRevision(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, revisionId string, options ...*items.GetRevisionOptions) (item *items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.GetRevision(ctx, spaceId, envId, collectionId, itemId, revisionId, options...)
+}
+
+func (m *errorLoggingMiddleware) Introspect(ctx context.Context, item *items.Item, opts ...*items.IntrospectOptions) (itm *items.Item, sch *schema.Schema, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Introspect(ctx, item, opts...)
+}
+
+func (m *errorLoggingMiddleware) ListRevisions(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.ListRevisionsOptions) (items []*items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.ListRevisions(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *errorLoggingMiddleware) Publish(ctx context.Context, item *items.Item, options ...*items.PublishOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Publish(ctx, item, options...)
+}
+
+func (m *errorLoggingMiddleware) Unarchive(ctx context.Context, item *items.Item, options ...*items.UnarchiveOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Unarchive(ctx, item, options...)
+}
+
+func (m *errorLoggingMiddleware) Undelete(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.UndeleteOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Undelete(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *errorLoggingMiddleware) Unpublish(ctx context.Context, item *items.Item, options ...*items.UnpublishOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Unpublish(ctx, item, options...)
+}
+
+func (m *errorLoggingMiddleware) Update(ctx context.Context, item *items.Item, options ...*items.UpdateOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Update(ctx, item, options...)
+}
diff --git a/pkg/items/middleware/logging_middleware.go b/pkg/items/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..102b91874c63655c169e5c101b10490e375833b9
--- /dev/null
+++ b/pkg/items/middleware/logging_middleware.go
@@ -0,0 +1,732 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/items -i Items -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements items.Items that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   items.Items
+}
+
+// LoggingMiddleware instruments an implementation of the items.Items with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next items.Items) items.Items {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Aggregate(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.AggregateOptions) (result map[string]interface{}, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"filter":       filter,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Aggregate.Request", fields...)
+
+	result, err = m.next.Aggregate(ctx, spaceId, envId, collectionId, filter, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"result": result,
+		"err":    err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Aggregate.Response", fields...)
+
+	return result, err
+}
+
+func (m *loggingMiddleware) AggregatePublished(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.AggregatePublishedOptions) (result map[string]interface{}, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"filter":       filter,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("AggregatePublished.Request", fields...)
+
+	result, err = m.next.AggregatePublished(ctx, spaceId, envId, collectionId, filter, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"result": result,
+		"err":    err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("AggregatePublished.Response", fields...)
+
+	return result, err
+}
+
+func (m *loggingMiddleware) Archive(ctx context.Context, item *items.Item, options ...*items.ArchiveOptions) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"item":    item,
+		"options": options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Archive.Request", fields...)
+
+	err = m.next.Archive(ctx, item, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Archive.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, item *items.Item, opts ...*items.CreateOptions) (created *items.Item, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":  ctx,
+		"item": item,
+		"opts": opts} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	created, err = m.next.Create(ctx, item, opts...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"created": created,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return created, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.DeleteOptions) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"itemId":       itemId,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, spaceId, envId, collectionId, itemId, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Find(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindOptions) (items []*items.Item, total int, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"filter":       filter,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Find.Request", fields...)
+
+	items, total, err = m.next.Find(ctx, spaceId, envId, collectionId, filter, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"items": items,
+		"total": total,
+		"err":   err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Find.Response", fields...)
+
+	return items, total, err
+}
+
+func (m *loggingMiddleware) FindArchived(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindArchivedOptions) (items []*items.Item, total int, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"filter":       filter,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("FindArchived.Request", fields...)
+
+	items, total, err = m.next.FindArchived(ctx, spaceId, envId, collectionId, filter, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"items": items,
+		"total": total,
+		"err":   err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("FindArchived.Response", fields...)
+
+	return items, total, err
+}
+
+func (m *loggingMiddleware) FindPublished(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindPublishedOptions) (items []*items.Item, total int, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"filter":       filter,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("FindPublished.Request", fields...)
+
+	items, total, err = m.next.FindPublished(ctx, spaceId, envId, collectionId, filter, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"items": items,
+		"total": total,
+		"err":   err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("FindPublished.Response", fields...)
+
+	return items, total, err
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.GetOptions) (item *items.Item, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"itemId":       itemId,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	item, err = m.next.Get(ctx, spaceId, envId, collectionId, itemId, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"item": item,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return item, err
+}
+
+func (m *loggingMiddleware) GetPublished(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.GetPublishedOptions) (item *items.Item, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"itemId":       itemId,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("GetPublished.Request", fields...)
+
+	item, err = m.next.GetPublished(ctx, spaceId, envId, collectionId, itemId, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"item": item,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("GetPublished.Response", fields...)
+
+	return item, err
+}
+
+func (m *loggingMiddleware) GetRevision(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, revisionId string, options ...*items.GetRevisionOptions) (item *items.Item, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"itemId":       itemId,
+		"revisionId":   revisionId,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("GetRevision.Request", fields...)
+
+	item, err = m.next.GetRevision(ctx, spaceId, envId, collectionId, itemId, revisionId, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"item": item,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("GetRevision.Response", fields...)
+
+	return item, err
+}
+
+func (m *loggingMiddleware) Introspect(ctx context.Context, item *items.Item, opts ...*items.IntrospectOptions) (itm *items.Item, sch *schema.Schema, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":  ctx,
+		"item": item,
+		"opts": opts} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Introspect.Request", fields...)
+
+	itm, sch, err = m.next.Introspect(ctx, item, opts...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"itm": itm,
+		"sch": sch,
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Introspect.Response", fields...)
+
+	return itm, sch, err
+}
+
+func (m *loggingMiddleware) ListRevisions(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.ListRevisionsOptions) (items []*items.Item, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"itemId":       itemId,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListRevisions.Request", fields...)
+
+	items, err = m.next.ListRevisions(ctx, spaceId, envId, collectionId, itemId, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"items": items,
+		"err":   err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListRevisions.Response", fields...)
+
+	return items, err
+}
+
+func (m *loggingMiddleware) Publish(ctx context.Context, item *items.Item, options ...*items.PublishOptions) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"item":    item,
+		"options": options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Publish.Request", fields...)
+
+	err = m.next.Publish(ctx, item, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Publish.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Unarchive(ctx context.Context, item *items.Item, options ...*items.UnarchiveOptions) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"item":    item,
+		"options": options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Unarchive.Request", fields...)
+
+	err = m.next.Unarchive(ctx, item, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Unarchive.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Undelete(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.UndeleteOptions) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":          ctx,
+		"spaceId":      spaceId,
+		"envId":        envId,
+		"collectionId": collectionId,
+		"itemId":       itemId,
+		"options":      options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Undelete.Request", fields...)
+
+	err = m.next.Undelete(ctx, spaceId, envId, collectionId, itemId, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Undelete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Unpublish(ctx context.Context, item *items.Item, options ...*items.UnpublishOptions) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"item":    item,
+		"options": options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Unpublish.Request", fields...)
+
+	err = m.next.Unpublish(ctx, item, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Unpublish.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Update(ctx context.Context, item *items.Item, options ...*items.UpdateOptions) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"item":    item,
+		"options": options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Request", fields...)
+
+	err = m.next.Update(ctx, item, options...)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Response", fields...)
+
+	return err
+}
diff --git a/pkg/items/middleware/middleware.go b/pkg/items/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..a1090fe5ad072cd42682c1a4fad8504f22136926
--- /dev/null
+++ b/pkg/items/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/items -i Items -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"go.uber.org/zap"
+)
+
+type Middleware func(items.Items) items.Items
+
+func WithLog(s items.Items, logger *zap.Logger, log_access bool) items.Items {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Items")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/items/middleware/recovering_middleware.go b/pkg/items/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..244fc8a0638fa9ad812afeb3f919907171fb9b1e
--- /dev/null
+++ b/pkg/items/middleware/recovering_middleware.go
@@ -0,0 +1,248 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/items -i Items -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements items.Items that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   items.Items
+}
+
+// RecoveringMiddleware instruments an implementation of the items.Items with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next items.Items) items.Items {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Aggregate(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.AggregateOptions) (result map[string]interface{}, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Aggregate(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *recoveringMiddleware) AggregatePublished(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.AggregatePublishedOptions) (result map[string]interface{}, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.AggregatePublished(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *recoveringMiddleware) Archive(ctx context.Context, item *items.Item, options ...*items.ArchiveOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Archive(ctx, item, options...)
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, item *items.Item, opts ...*items.CreateOptions) (created *items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, item, opts...)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.DeleteOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *recoveringMiddleware) Find(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindOptions) (items []*items.Item, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Find(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *recoveringMiddleware) FindArchived(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindArchivedOptions) (items []*items.Item, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.FindArchived(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *recoveringMiddleware) FindPublished(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindPublishedOptions) (items []*items.Item, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.FindPublished(ctx, spaceId, envId, collectionId, filter, options...)
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.GetOptions) (item *items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *recoveringMiddleware) GetPublished(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.GetPublishedOptions) (item *items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.GetPublished(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *recoveringMiddleware) GetRevision(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, revisionId string, options ...*items.GetRevisionOptions) (item *items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.GetRevision(ctx, spaceId, envId, collectionId, itemId, revisionId, options...)
+}
+
+func (m *recoveringMiddleware) Introspect(ctx context.Context, item *items.Item, opts ...*items.IntrospectOptions) (itm *items.Item, sch *schema.Schema, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Introspect(ctx, item, opts...)
+}
+
+func (m *recoveringMiddleware) ListRevisions(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.ListRevisionsOptions) (items []*items.Item, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.ListRevisions(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *recoveringMiddleware) Publish(ctx context.Context, item *items.Item, options ...*items.PublishOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Publish(ctx, item, options...)
+}
+
+func (m *recoveringMiddleware) Unarchive(ctx context.Context, item *items.Item, options ...*items.UnarchiveOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Unarchive(ctx, item, options...)
+}
+
+func (m *recoveringMiddleware) Undelete(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.UndeleteOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Undelete(ctx, spaceId, envId, collectionId, itemId, options...)
+}
+
+func (m *recoveringMiddleware) Unpublish(ctx context.Context, item *items.Item, options ...*items.UnpublishOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Unpublish(ctx, item, options...)
+}
+
+func (m *recoveringMiddleware) Update(ctx context.Context, item *items.Item, options ...*items.UpdateOptions) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Update(ctx, item, options...)
+}
diff --git a/pkg/items/mocks/Items.go b/pkg/items/mocks/Items.go
new file mode 100644
index 0000000000000000000000000000000000000000..1d3ea35f22d13e65afc70e6c7cc847c60aa0f8a7
--- /dev/null
+++ b/pkg/items/mocks/Items.go
@@ -0,0 +1,538 @@
+// Code generated by mockery v2.14.0. DO NOT EDIT.
+
+package mocks
+
+import (
+	context "context"
+
+	items "git.perx.ru/perxis/perxis-go/pkg/items"
+	schema "git.perx.ru/perxis/perxis-go/pkg/schema"
+	mock "github.com/stretchr/testify/mock"
+)
+
+// Items is an autogenerated mock type for the Items type
+type Items struct {
+	mock.Mock
+}
+
+// Aggregate provides a mock function with given fields: ctx, spaceId, envId, collectionId, filter, options
+func (_m *Items) Aggregate(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.AggregateOptions) (map[string]interface{}, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, filter)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 map[string]interface{}
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *items.Filter, ...*items.AggregateOptions) map[string]interface{}); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(map[string]interface{})
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, *items.Filter, ...*items.AggregateOptions) error); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// AggregatePublished provides a mock function with given fields: ctx, spaceId, envId, collectionId, filter, options
+func (_m *Items) AggregatePublished(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.AggregatePublishedOptions) (map[string]interface{}, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, filter)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 map[string]interface{}
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *items.Filter, ...*items.AggregatePublishedOptions) map[string]interface{}); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(map[string]interface{})
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, *items.Filter, ...*items.AggregatePublishedOptions) error); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// Archive provides a mock function with given fields: ctx, item, options
+func (_m *Items) Archive(ctx context.Context, item *items.Item, options ...*items.ArchiveOptions) error {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, item)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, *items.Item, ...*items.ArchiveOptions) error); ok {
+		r0 = rf(ctx, item, options...)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Create provides a mock function with given fields: ctx, item, opts
+func (_m *Items) Create(ctx context.Context, item *items.Item, opts ...*items.CreateOptions) (*items.Item, error) {
+	_va := make([]interface{}, len(opts))
+	for _i := range opts {
+		_va[_i] = opts[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, item)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 *items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, *items.Item, ...*items.CreateOptions) *items.Item); ok {
+		r0 = rf(ctx, item, opts...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(*items.Item)
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, *items.Item, ...*items.CreateOptions) error); ok {
+		r1 = rf(ctx, item, opts...)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// Delete provides a mock function with given fields: ctx, spaceId, envId, collectionId, itemId, options
+func (_m *Items) Delete(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.DeleteOptions) error {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, itemId)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, ...*items.DeleteOptions) error); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, itemId, options...)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Find provides a mock function with given fields: ctx, spaceId, envId, collectionId, filter, options
+func (_m *Items) Find(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindOptions) ([]*items.Item, int, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, filter)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 []*items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *items.Filter, ...*items.FindOptions) []*items.Item); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).([]*items.Item)
+		}
+	}
+
+	var r1 int
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, *items.Filter, ...*items.FindOptions) int); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		r1 = ret.Get(1).(int)
+	}
+
+	var r2 error
+	if rf, ok := ret.Get(2).(func(context.Context, string, string, string, *items.Filter, ...*items.FindOptions) error); ok {
+		r2 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		r2 = ret.Error(2)
+	}
+
+	return r0, r1, r2
+}
+
+// FindArchived provides a mock function with given fields: ctx, spaceId, envId, collectionId, filter, options
+func (_m *Items) FindArchived(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindArchivedOptions) ([]*items.Item, int, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, filter)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 []*items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *items.Filter, ...*items.FindArchivedOptions) []*items.Item); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).([]*items.Item)
+		}
+	}
+
+	var r1 int
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, *items.Filter, ...*items.FindArchivedOptions) int); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		r1 = ret.Get(1).(int)
+	}
+
+	var r2 error
+	if rf, ok := ret.Get(2).(func(context.Context, string, string, string, *items.Filter, ...*items.FindArchivedOptions) error); ok {
+		r2 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		r2 = ret.Error(2)
+	}
+
+	return r0, r1, r2
+}
+
+// FindPublished provides a mock function with given fields: ctx, spaceId, envId, collectionId, filter, options
+func (_m *Items) FindPublished(ctx context.Context, spaceId string, envId string, collectionId string, filter *items.Filter, options ...*items.FindPublishedOptions) ([]*items.Item, int, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, filter)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 []*items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *items.Filter, ...*items.FindPublishedOptions) []*items.Item); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).([]*items.Item)
+		}
+	}
+
+	var r1 int
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, *items.Filter, ...*items.FindPublishedOptions) int); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		r1 = ret.Get(1).(int)
+	}
+
+	var r2 error
+	if rf, ok := ret.Get(2).(func(context.Context, string, string, string, *items.Filter, ...*items.FindPublishedOptions) error); ok {
+		r2 = rf(ctx, spaceId, envId, collectionId, filter, options...)
+	} else {
+		r2 = ret.Error(2)
+	}
+
+	return r0, r1, r2
+}
+
+// Get provides a mock function with given fields: ctx, spaceId, envId, collectionId, itemId, options
+func (_m *Items) Get(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.GetOptions) (*items.Item, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, itemId)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 *items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, ...*items.GetOptions) *items.Item); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, itemId, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(*items.Item)
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string, ...*items.GetOptions) error); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, itemId, options...)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// GetPublished provides a mock function with given fields: ctx, spaceId, envId, collectionId, itemId, options
+func (_m *Items) GetPublished(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.GetPublishedOptions) (*items.Item, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, itemId)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 *items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, ...*items.GetPublishedOptions) *items.Item); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, itemId, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(*items.Item)
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string, ...*items.GetPublishedOptions) error); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, itemId, options...)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// GetRevision provides a mock function with given fields: ctx, spaceId, envId, collectionId, itemId, revisionId, options
+func (_m *Items) GetRevision(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, revisionId string, options ...*items.GetRevisionOptions) (*items.Item, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, itemId, revisionId)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 *items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string, ...*items.GetRevisionOptions) *items.Item); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, itemId, revisionId, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(*items.Item)
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string, string, ...*items.GetRevisionOptions) error); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, itemId, revisionId, options...)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// Introspect provides a mock function with given fields: ctx, item, opts
+func (_m *Items) Introspect(ctx context.Context, item *items.Item, opts ...*items.IntrospectOptions) (*items.Item, *schema.Schema, error) {
+	_va := make([]interface{}, len(opts))
+	for _i := range opts {
+		_va[_i] = opts[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, item)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 *items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, *items.Item, ...*items.IntrospectOptions) *items.Item); ok {
+		r0 = rf(ctx, item, opts...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(*items.Item)
+		}
+	}
+
+	var r1 *schema.Schema
+	if rf, ok := ret.Get(1).(func(context.Context, *items.Item, ...*items.IntrospectOptions) *schema.Schema); ok {
+		r1 = rf(ctx, item, opts...)
+	} else {
+		if ret.Get(1) != nil {
+			r1 = ret.Get(1).(*schema.Schema)
+		}
+	}
+
+	var r2 error
+	if rf, ok := ret.Get(2).(func(context.Context, *items.Item, ...*items.IntrospectOptions) error); ok {
+		r2 = rf(ctx, item, opts...)
+	} else {
+		r2 = ret.Error(2)
+	}
+
+	return r0, r1, r2
+}
+
+// ListRevisions provides a mock function with given fields: ctx, spaceId, envId, collectionId, itemId, options
+func (_m *Items) ListRevisions(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.ListRevisionsOptions) ([]*items.Item, error) {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, itemId)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 []*items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, ...*items.ListRevisionsOptions) []*items.Item); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, itemId, options...)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).([]*items.Item)
+		}
+	}
+
+	var r1 error
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string, ...*items.ListRevisionsOptions) error); ok {
+		r1 = rf(ctx, spaceId, envId, collectionId, itemId, options...)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// Publish provides a mock function with given fields: ctx, item, options
+func (_m *Items) Publish(ctx context.Context, item *items.Item, options ...*items.PublishOptions) error {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, item)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, *items.Item, ...*items.PublishOptions) error); ok {
+		r0 = rf(ctx, item, options...)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Unarchive provides a mock function with given fields: ctx, item, options
+func (_m *Items) Unarchive(ctx context.Context, item *items.Item, options ...*items.UnarchiveOptions) error {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, item)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, *items.Item, ...*items.UnarchiveOptions) error); ok {
+		r0 = rf(ctx, item, options...)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Undelete provides a mock function with given fields: ctx, spaceId, envId, collectionId, itemId, options
+func (_m *Items) Undelete(ctx context.Context, spaceId string, envId string, collectionId string, itemId string, options ...*items.UndeleteOptions) error {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, spaceId, envId, collectionId, itemId)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, ...*items.UndeleteOptions) error); ok {
+		r0 = rf(ctx, spaceId, envId, collectionId, itemId, options...)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Unpublish provides a mock function with given fields: ctx, item, options
+func (_m *Items) Unpublish(ctx context.Context, item *items.Item, options ...*items.UnpublishOptions) error {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, item)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, *items.Item, ...*items.UnpublishOptions) error); ok {
+		r0 = rf(ctx, item, options...)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+// Update provides a mock function with given fields: ctx, item, options
+func (_m *Items) Update(ctx context.Context, item *items.Item, options ...*items.UpdateOptions) error {
+	_va := make([]interface{}, len(options))
+	for _i := range options {
+		_va[_i] = options[_i]
+	}
+	var _ca []interface{}
+	_ca = append(_ca, ctx, item)
+	_ca = append(_ca, _va...)
+	ret := _m.Called(_ca...)
+
+	var r0 error
+	if rf, ok := ret.Get(0).(func(context.Context, *items.Item, ...*items.UpdateOptions) error); ok {
+		r0 = rf(ctx, item, options...)
+	} else {
+		r0 = ret.Error(0)
+	}
+
+	return r0
+}
+
+type mockConstructorTestingTNewItems interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewItems creates a new instance of Items. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewItems(t mockConstructorTestingTNewItems) *Items {
+	mock := &Items{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/items/mocks/PreSaver.go b/pkg/items/mocks/PreSaver.go
new file mode 100644
index 0000000000000000000000000000000000000000..6010e17bd14db83518507ac53ab35076a6cbc5bf
--- /dev/null
+++ b/pkg/items/mocks/PreSaver.go
@@ -0,0 +1,62 @@
+// Code generated by mockery v2.14.0. DO NOT EDIT.
+
+package mocks
+
+import (
+	context "context"
+
+	items "git.perx.ru/perxis/perxis-go/pkg/items"
+	field "git.perx.ru/perxis/perxis-go/pkg/schema/field"
+
+	mock "github.com/stretchr/testify/mock"
+)
+
+// PreSaver is an autogenerated mock type for the PreSaver type
+type PreSaver struct {
+	mock.Mock
+}
+
+// PreSave provides a mock function with given fields: ctx, f, v, itemCtx
+func (_m *PreSaver) PreSave(ctx context.Context, f *field.Field, v interface{}, itemCtx *items.Context) (interface{}, bool, error) {
+	ret := _m.Called(ctx, f, v, itemCtx)
+
+	var r0 interface{}
+	if rf, ok := ret.Get(0).(func(context.Context, *field.Field, interface{}, *items.Context) interface{}); ok {
+		r0 = rf(ctx, f, v, itemCtx)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(interface{})
+		}
+	}
+
+	var r1 bool
+	if rf, ok := ret.Get(1).(func(context.Context, *field.Field, interface{}, *items.Context) bool); ok {
+		r1 = rf(ctx, f, v, itemCtx)
+	} else {
+		r1 = ret.Get(1).(bool)
+	}
+
+	var r2 error
+	if rf, ok := ret.Get(2).(func(context.Context, *field.Field, interface{}, *items.Context) error); ok {
+		r2 = rf(ctx, f, v, itemCtx)
+	} else {
+		r2 = ret.Error(2)
+	}
+
+	return r0, r1, r2
+}
+
+type mockConstructorTestingTNewPreSaver interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewPreSaver creates a new instance of PreSaver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewPreSaver(t mockConstructorTestingTNewPreSaver) *PreSaver {
+	mock := &PreSaver{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/items/options.go b/pkg/items/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..d48a1cdaacb157e46c1185a37602c1703bb3b59e
--- /dev/null
+++ b/pkg/items/options.go
@@ -0,0 +1,422 @@
+package items
+
+import "git.perx.ru/perxis/perxis-go/pkg/options"
+
+type Options struct {
+	Env               map[string]interface{}
+	Filter            []string
+	PermissionsFilter []string
+}
+
+func MergeOptions(opts ...Options) Options {
+	o := Options{
+		Env:    make(map[string]interface{}),
+		Filter: make([]string, 0),
+	}
+
+	for _, opt := range opts {
+
+		for k, v := range opt.Env {
+			o.Env[k] = v
+		}
+
+		o.Filter = append(o.Filter, opt.Filter...)
+		o.PermissionsFilter = append(o.PermissionsFilter, opt.PermissionsFilter...)
+	}
+
+	return o
+}
+
+type CreateOptions struct {
+	Options
+
+	UpdateAttrs bool
+}
+
+func MergeCreateOptions(opts ...*CreateOptions) *CreateOptions {
+	o := &CreateOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.UpdateAttrs {
+			o.UpdateAttrs = true
+		}
+
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type IntrospectOptions struct {
+	Options
+	Locale string
+}
+
+func MergeIntrospectOptions(opts ...*IntrospectOptions) *IntrospectOptions {
+	o := &IntrospectOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type GetOptions struct {
+	Options
+}
+
+func MergeGetOptions(opts ...*GetOptions) *GetOptions {
+	o := &GetOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type FindOptions struct {
+	Options
+	options.FindOptions
+	Deleted   bool
+	Regular   bool
+	Hidden    bool
+	Templates bool
+}
+
+func NewFindOptions(opts ...interface{}) *FindOptions {
+	fo := &FindOptions{}
+	fo.FindOptions = *options.MergeFindOptions(opts...)
+	return fo
+}
+
+func MergeFindOptions(opts ...*FindOptions) *FindOptions {
+	o := NewFindOptions()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Regular = o.Regular || opt.Regular
+		o.Templates = o.Templates || opt.Templates
+		o.Hidden = o.Hidden || opt.Hidden
+		o.Deleted = o.Deleted || opt.Deleted
+		o.Options = MergeOptions(o.Options, opt.Options)
+		o.FindOptions = *options.MergeFindOptions(&o.FindOptions, &opt.FindOptions)
+	}
+	return o
+}
+
+type UpdateOptions struct {
+	Options
+
+	UpdateAttrs bool
+}
+
+func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions {
+	o := &UpdateOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.UpdateAttrs {
+			o.UpdateAttrs = true
+		}
+
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type DeleteOptions struct {
+	Options
+
+	Erase bool
+}
+
+func MergeDeleteOptions(opts ...*DeleteOptions) *DeleteOptions {
+	o := &DeleteOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Erase {
+			o.Erase = true
+		}
+
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type SoftDeleteOptions struct {
+	Options
+}
+
+func MergeSoftDeleteOptions(opts ...*SoftDeleteOptions) *SoftDeleteOptions {
+	o := &SoftDeleteOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type UndeleteOptions struct {
+	Options
+}
+
+func MergeUndeleteOptions(opts ...*UndeleteOptions) *UndeleteOptions {
+	o := &UndeleteOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type PublishOptions struct {
+	Options
+
+	UpdateAttrs bool
+}
+
+func MergePublishOptions(opts ...*PublishOptions) *PublishOptions {
+	o := &PublishOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.UpdateAttrs {
+			o.UpdateAttrs = true
+		}
+
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type UnpublishOptions struct {
+	Options
+}
+
+func MergeUnpublishOptions(opts ...*UnpublishOptions) *UnpublishOptions {
+	o := &UnpublishOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type GetPublishedOptions struct {
+	Options
+	LocaleID string
+}
+
+func NewGetPublishedOptions(oo ...interface{}) *GetPublishedOptions {
+	fo := &GetPublishedOptions{}
+	for _, o := range oo {
+		switch o := o.(type) {
+		case string:
+			fo.LocaleID = o
+		}
+	}
+	return fo
+}
+
+func MergeGetPublishedOptions(opts ...*GetPublishedOptions) *GetPublishedOptions {
+	o := &GetPublishedOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+		if opt.LocaleID != "" {
+			o.LocaleID = opt.LocaleID
+		}
+	}
+	return o
+}
+
+type FindPublishedOptions struct {
+	Options
+	options.FindOptions
+	LocaleID  string
+	Regular   bool
+	Hidden    bool
+	Templates bool
+}
+
+func NewFindPublishedOptions(opts ...interface{}) *FindPublishedOptions {
+	fo := &FindPublishedOptions{}
+	for _, o := range opts {
+		switch o := o.(type) {
+		case string:
+			fo.LocaleID = o
+		}
+	}
+
+	fo.FindOptions = *options.MergeFindOptions(opts...)
+	return fo
+}
+
+func MergeFindPublishedOptions(opts ...*FindPublishedOptions) *FindPublishedOptions {
+	o := NewFindPublishedOptions()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Regular = o.Regular || opt.Regular
+		o.Templates = o.Templates || opt.Templates
+		o.Hidden = o.Hidden || opt.Hidden
+		o.Options = MergeOptions(o.Options, opt.Options)
+		o.FindOptions = *options.MergeFindOptions(&o.FindOptions, &opt.FindOptions)
+
+		if opt.LocaleID != "" {
+			o.LocaleID = opt.LocaleID
+		}
+	}
+	return o
+}
+
+type GetRevisionOptions struct {
+	Options
+}
+
+func MergeGetRevisionOptions(opts ...*GetRevisionOptions) *GetRevisionOptions {
+	o := &GetRevisionOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type ListRevisionsOptions struct {
+	Options
+	options.FindOptions
+}
+
+func MergeListRevisionsOptions(opts ...*ListRevisionsOptions) *ListRevisionsOptions {
+	o := &ListRevisionsOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+		o.FindOptions = *options.MergeFindOptions(&o.FindOptions, &opt.FindOptions)
+	}
+	return o
+}
+
+type ArchiveOptions struct {
+	Options
+}
+
+func MergeArchiveOptions(opts ...*ArchiveOptions) *ArchiveOptions {
+	o := &ArchiveOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type FindArchivedOptions struct {
+	Options
+	options.FindOptions
+}
+
+func NewFindArchivedOptions(oo ...interface{}) *FindArchivedOptions {
+	fo := &FindArchivedOptions{}
+	fo.FindOptions = *options.MergeFindOptions(oo...)
+	return fo
+}
+
+func MergeFindArchivedOptions(opts ...*FindArchivedOptions) *FindArchivedOptions {
+	o := NewFindArchivedOptions()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+		o.FindOptions = *options.MergeFindOptions(o.FindOptions, opt.FindOptions)
+	}
+	return o
+}
+
+type UnarchiveOptions struct {
+	Options
+}
+
+func MergeUnarchiveOptions(opts ...*UnarchiveOptions) *UnarchiveOptions {
+	o := &UnarchiveOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+	}
+	return o
+}
+
+type AggregateOptions struct {
+	Options
+	options.SortOptions
+
+	// Fields поля которые должны быть возвращены или вычислены в результате.
+	// Ключ (string) - имя поля под которым будет добавляться результат.
+	// Значение (string) - является выражением, вычисление которого сформирует результат
+	// Функции для выражений (для поля F, типа T):
+	// - distinct(F) - все значения поля, тип результат []T
+	// - min(F) - минимальное значение поля, тип результат T
+	// - max(F) - максимальное значение поля, тип результат T
+	// - avg(F) - среднее значения поля, тип результат T
+	// - sum(F) - сумма значений поля, тип результат T
+	// - count() - число записей, тип результат int
+	Fields map[string]string
+}
+
+func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions {
+	o := &AggregateOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		o.Options = MergeOptions(o.Options, opt.Options)
+
+		if o.Fields == nil {
+			o.Fields = opt.Fields
+			continue
+		}
+		for k, v := range opt.Fields {
+			o.Fields[k] = v
+		}
+	}
+	return o
+}
+
+type AggregatePublishedOptions AggregateOptions
+
+func MergeAggregatePublishedOptions(opts ...*AggregatePublishedOptions) *AggregatePublishedOptions {
+	ao := make([]*AggregateOptions, len(opts))
+	for i, opt := range opts {
+		ao[i] = (*AggregateOptions)(opt)
+	}
+	merged := MergeAggregateOptions(ao...)
+	return (*AggregatePublishedOptions)(merged)
+}
diff --git a/pkg/items/pagination.go b/pkg/items/pagination.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f990dc6c6e8d0684553e4039b86580236cc2ef0
--- /dev/null
+++ b/pkg/items/pagination.go
@@ -0,0 +1,137 @@
+package items
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/content"
+	"git.perx.ru/perxis/perxis-go/pkg/data"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"google.golang.org/grpc/codes"
+)
+
+type BatchProcessor struct {
+	Content                      *content.Content
+	SpaceID, EnvID, CollectionID string
+	FindOptions                  *FindOptions
+	FindPublishedOptions         *FindPublishedOptions
+	Filter                       *Filter
+
+	pageSize, pageNum int
+	sort              []string
+	processed         int
+}
+
+func (b *BatchProcessor) getBatch(ctx context.Context) ([]*Item, bool, error) {
+	var res []*Item
+	var err error
+	var total int
+
+	if b.FindPublishedOptions != nil {
+		res, total, err = b.Content.Items.FindPublished(
+			ctx,
+			b.SpaceID,
+			b.EnvID,
+			b.CollectionID,
+			b.Filter,
+			&FindPublishedOptions{
+				Regular:     b.FindPublishedOptions.Regular,
+				Hidden:      b.FindPublishedOptions.Hidden,
+				Templates:   b.FindPublishedOptions.Templates,
+				FindOptions: *options.NewFindOptions(b.pageNum, b.pageSize, b.sort...),
+			},
+		)
+	} else {
+		res, total, err = b.Content.Items.Find(
+			ctx,
+			b.SpaceID,
+			b.EnvID,
+			b.CollectionID,
+			b.Filter,
+			&FindOptions{
+				Deleted:     b.FindOptions.Deleted,
+				Regular:     b.FindOptions.Regular,
+				Hidden:      b.FindOptions.Hidden,
+				Templates:   b.FindOptions.Templates,
+				FindOptions: *options.NewFindOptions(b.pageNum, b.pageSize, b.sort...),
+			},
+		)
+	}
+
+	if err == nil {
+		b.processed += len(res)
+		b.pageNum++
+	}
+
+	return res, b.processed != total, err
+}
+
+func (b *BatchProcessor) next(ctx context.Context) (res []*Item, next bool, err error) {
+
+	for {
+		res, next, err = b.getBatch(ctx)
+		if err != nil {
+			if errors.GetStatusCode(err) == codes.ResourceExhausted && b.reducePageSize() {
+				continue
+			}
+
+			return nil, false, err
+		}
+
+		break
+	}
+
+	return res, next, nil
+}
+
+func (b *BatchProcessor) reducePageSize() bool {
+	if b.pageSize == 1 {
+		return false
+	}
+
+	b.pageNum = 2 * b.pageNum
+	b.pageSize = b.pageSize / 2
+
+	return true
+}
+
+func (b *BatchProcessor) Do(ctx context.Context, f func(batch []*Item) error) (int, error) {
+
+	if b.FindOptions == nil && b.FindPublishedOptions == nil {
+		b.FindOptions = new(FindOptions)
+	}
+	if b.FindOptions != nil {
+		b.pageSize = b.FindOptions.PageSize
+		b.sort = b.FindOptions.Sort
+	}
+	if b.FindPublishedOptions != nil {
+		b.pageSize = b.FindPublishedOptions.PageSize
+		b.sort = b.FindPublishedOptions.Sort
+	}
+
+	if b.pageSize == 0 {
+		b.pageSize = 128
+	}
+
+	if b.Filter != nil && (len(b.Filter.ID) > 0 || len(b.Filter.Q) > 0) && !data.Contains("_id", b.sort) {
+		b.sort = append(b.sort, "_id")
+	}
+
+	var err error
+
+	next := true
+	for next {
+
+		var batch []*Item
+
+		batch, next, err = b.next(ctx)
+		if err != nil {
+			return 0, err
+		}
+
+		if err = f(batch); err != nil {
+			return 0, err
+		}
+	}
+	return b.processed, nil
+}
diff --git a/pkg/items/service.go b/pkg/items/service.go
new file mode 100644
index 0000000000000000000000000000000000000000..c10a69c55878a1fbfe571dc0fad594c338dccb99
--- /dev/null
+++ b/pkg/items/service.go
@@ -0,0 +1,151 @@
+package items
+
+import (
+	"context"
+	"regexp"
+
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/filter"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/field"
+)
+
+// @microgen grpc
+// @protobuf git.perx.ru/perxis/perxis-go/proto/items
+// @grpc-addr content.items.Items
+type Items interface {
+	Create(ctx context.Context, item *Item, opts ...*CreateOptions) (created *Item, err error)
+	Introspect(ctx context.Context, item *Item, opts ...*IntrospectOptions) (itm *Item, sch *schema.Schema, err error)
+	Get(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*GetOptions) (item *Item, err error)
+	Find(ctx context.Context, spaceId, envId, collectionId string, filter *Filter, options ...*FindOptions) (items []*Item, total int, err error)
+	Update(ctx context.Context, item *Item, options ...*UpdateOptions) (err error)
+
+	// Delete выполняет удаление элемента
+	// Если установлен флаг DeleteOptions.Erase то данные будут полностью удалены из системы.
+	// В противном случае выполняется "мягкое удаление", элемент помечается как удаленный и может быть восстановлен с помощью метода Items.Undelete и получен в Items.Get/Find
+	Delete(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*DeleteOptions) (err error)
+
+	// Undelete восстанавливает элементы после "мягкого удаление"
+	Undelete(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*UndeleteOptions) (err error)
+
+	Publish(ctx context.Context, item *Item, options ...*PublishOptions) (err error)
+	Unpublish(ctx context.Context, item *Item, options ...*UnpublishOptions) (err error)
+	GetPublished(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*GetPublishedOptions) (item *Item, err error)
+	FindPublished(ctx context.Context, spaceId, envId, collectionId string, filter *Filter, options ...*FindPublishedOptions) (items []*Item, total int, err error)
+
+	GetRevision(ctx context.Context, spaceId, envId, collectionId, itemId, revisionId string, options ...*GetRevisionOptions) (item *Item, err error)
+	ListRevisions(ctx context.Context, spaceId, envId, collectionId, itemId string, options ...*ListRevisionsOptions) (items []*Item, err error)
+
+	Archive(ctx context.Context, item *Item, options ...*ArchiveOptions) (err error)
+	FindArchived(ctx context.Context, spaceId, envId, collectionId string, filter *Filter, options ...*FindArchivedOptions) (items []*Item, total int, err error)
+	Unarchive(ctx context.Context, item *Item, options ...*UnarchiveOptions) (err error)
+
+	// Aggregate выполняет агрегацию данных
+	Aggregate(ctx context.Context, spaceId, envId, collectionId string, filter *Filter, options ...*AggregateOptions) (result map[string]interface{}, err error)
+	// AggregatePublished выполняет агрегацию опубликованных данных
+	AggregatePublished(ctx context.Context, spaceId, envId, collectionId string, filter *Filter, options ...*AggregatePublishedOptions) (result map[string]interface{}, err error)
+}
+
+// PreSaver - интерфейс, который может быть реализован полем, чтобы получать событие PreSave перед сохранением Item в Storage
+type PreSaver interface {
+	PreSave(ctx context.Context, f *field.Field, v interface{}, itemCtx *Context) (interface{}, bool, error)
+}
+
+type Filter struct {
+	ID     []string
+	Data   []*filter.Filter
+	Search string // Поиск, одновременно поддерживается только один запрос
+	Q      []string
+}
+
+func NewFilter(params ...interface{}) *Filter {
+	f := &Filter{}
+	for _, p := range params {
+		switch v := p.(type) {
+		case *filter.Filter:
+			f.Data = append(f.Data, v)
+		case string:
+			f.Q = append(f.Q, v)
+		}
+	}
+	return f
+}
+
+// AggregateExpRe - формат, которому должна соответствовать формула расчета данных
+var AggregateExpRe = regexp.MustCompile(`([a-zA-Z]+)\((.*)\)`)
+
+func ParseAggregateExp(exp string) (string, string, bool) {
+	ss := AggregateExpRe.FindAllStringSubmatch(exp, -1)
+	if len(ss) == 0 || len(ss[0]) < 2 {
+		return "", "", false
+	}
+	return ss[0][1], ss[0][2], true
+}
+
+func DecodeAggregateResult(ctx context.Context, request map[string]string, r map[string]interface{}, s *schema.Schema) (map[string]interface{}, error) {
+	result := make(map[string]interface{}, len(r))
+	for outputField, exp := range request {
+
+		funcName, fldName, ok := ParseAggregateExp(exp)
+		if !ok || fldName == "" {
+			if v, ok := r[outputField]; ok {
+				result[outputField] = v
+			}
+			continue
+		}
+
+		schemaFld := s.GetField(fldName)
+		if schemaFld == nil {
+			if v, ok := r[outputField]; ok {
+				result[outputField] = v
+			}
+			continue
+		}
+
+		if funcName == "distinct" {
+			schemaFld = field.Array(schemaFld)
+		}
+
+		data, err := schema.Decode(ctx, schemaFld, r[outputField])
+		if err != nil {
+			return nil, errors.Wrapf(err, "decode data for field '%s'", outputField)
+		}
+		result[outputField] = data
+	}
+
+	return result, nil
+}
+
+func EncodeAggregateResult(ctx context.Context, request map[string]string, r map[string]interface{}, s *schema.Schema) (map[string]interface{}, error) {
+	result := make(map[string]interface{}, len(r))
+	for outputField, exp := range request {
+
+		funcName, fldName, ok := ParseAggregateExp(exp)
+		if !ok || fldName == "" {
+			if v, ok := r[outputField]; ok {
+				result[outputField] = v
+			}
+			continue
+		}
+
+		schemaFld := s.GetField(fldName)
+		if schemaFld == nil {
+			if v, ok := r[outputField]; ok {
+				result[outputField] = v
+			}
+			continue
+		}
+
+		if funcName == "distinct" {
+			schemaFld = field.Array(schemaFld)
+		}
+
+		data, err := schema.Encode(ctx, schemaFld, r[outputField])
+		if err != nil {
+			return nil, errors.Wrapf(err, "decode data for field '%s'", outputField)
+		}
+		result[outputField] = data
+	}
+
+	return result, nil
+}
diff --git a/pkg/items/transport/client.go b/pkg/items/transport/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..3f6bd04ceab90dad415d963c6db3d1a9f4fb4b47
--- /dev/null
+++ b/pkg/items/transport/client.go
@@ -0,0 +1,266 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"github.com/hashicorp/go-multierror"
+	"google.golang.org/grpc/status"
+)
+
+func (set EndpointsSet) Create(arg0 context.Context, arg1 *items.Item, arg2 ...*items.CreateOptions) (res0 *items.Item, res1 error) {
+	request := CreateRequest{
+		Item: arg1,
+		Opts: arg2,
+	}
+	response, res1 := set.CreateEndpoint(arg0, &request)
+	if res1 != nil {
+		return
+	}
+	return response.(*CreateResponse).Created, res1
+}
+
+func (set EndpointsSet) Introspect(arg0 context.Context, arg1 *items.Item, arg2 ...*items.IntrospectOptions) (res0 *items.Item, res1 *schema.Schema, res2 error) {
+	request := IntrospectRequest{
+		Item: arg1,
+		Opts: arg2,
+	}
+	response, res2 := set.IntrospectEndpoint(arg0, &request)
+	if res2 != nil {
+		return
+	}
+	resp := response.(*IntrospectResponse)
+
+	if len(resp.ValidationErrors) > 0 {
+		var merr *multierror.Error
+		for _, err := range resp.ValidationErrors {
+			var fieldErr errors.FieldError
+			if errors.As(err, &fieldErr) {
+				merr = multierror.Append(merr, fieldErr)
+			}
+		}
+
+		res2 = errors.Wrap(merr, "validation error")
+
+	}
+	return resp.Item, resp.Schema, res2
+}
+
+func (set EndpointsSet) Get(arg0 context.Context, arg1, arg2, arg3, arg4 string, arg5 ...*items.GetOptions) (res0 *items.Item, res1 error) {
+	request := GetRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		ItemId:       arg4,
+		SpaceId:      arg1,
+		Options:      arg5,
+	}
+	response, res1 := set.GetEndpoint(arg0, &request)
+	if res1 != nil {
+		return
+	}
+	return response.(*GetResponse).Item, res1
+}
+
+func (set EndpointsSet) Find(arg0 context.Context, arg1, arg2, arg3 string, arg4 *items.Filter, arg5 ...*items.FindOptions) (res0 []*items.Item, res1 int, res2 error) {
+	request := FindRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		Filter:       arg4,
+		Options:      arg5,
+		SpaceId:      arg1,
+	}
+	response, res2 := set.FindEndpoint(arg0, &request)
+	if res2 != nil {
+		return
+	}
+	return response.(*FindResponse).Items, response.(*FindResponse).Total, res2
+}
+
+func (set EndpointsSet) Update(arg0 context.Context, arg1 *items.Item, arg2 ...*items.UpdateOptions) (res0 error) {
+	request := UpdateRequest{Item: arg1, Options: arg2}
+	_, res0 = set.UpdateEndpoint(arg0, &request)
+	if res0 != nil {
+		return
+	}
+	return res0
+}
+
+func (set EndpointsSet) Delete(arg0 context.Context, arg1, arg2, arg3, arg4 string, options ...*items.DeleteOptions) (res0 error) {
+	request := DeleteRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		ItemId:       arg4,
+		SpaceId:      arg1,
+		Options:      options,
+	}
+	_, res0 = set.DeleteEndpoint(arg0, &request)
+	if res0 != nil {
+		return
+	}
+	return res0
+}
+
+func (set EndpointsSet) Undelete(arg0 context.Context, arg1, arg2, arg3, arg4 string, options ...*items.UndeleteOptions) (res0 error) {
+	request := UndeleteRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		ItemId:       arg4,
+		SpaceId:      arg1,
+		Options:      options,
+	}
+	_, res0 = set.UndeleteEndpoint(arg0, &request)
+	if res0 != nil {
+		return
+	}
+	return res0
+}
+
+func (set EndpointsSet) Publish(arg0 context.Context, arg1 *items.Item, arg2 ...*items.PublishOptions) (res0 error) {
+	request := PublishRequest{Item: arg1, Options: arg2}
+	_, res0 = set.PublishEndpoint(arg0, &request)
+	if res0 != nil {
+		return
+	}
+	return res0
+}
+
+func (set EndpointsSet) Unpublish(arg0 context.Context, arg1 *items.Item, arg2 ...*items.UnpublishOptions) (res0 error) {
+	request := UnpublishRequest{Item: arg1, Options: arg2}
+	_, res0 = set.UnpublishEndpoint(arg0, &request)
+	if res0 != nil {
+		return
+	}
+	return res0
+}
+
+func (set EndpointsSet) GetPublished(arg0 context.Context, arg1, arg2, arg3, arg4 string, arg5 ...*items.GetPublishedOptions) (res0 *items.Item, res1 error) {
+	request := GetPublishedRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		ItemId:       arg4,
+		SpaceId:      arg1,
+		Options:      arg5,
+	}
+	response, res1 := set.GetPublishedEndpoint(arg0, &request)
+	if res1 != nil {
+		return
+	}
+	return response.(*GetPublishedResponse).Item, res1
+}
+
+func (set EndpointsSet) FindPublished(arg0 context.Context, arg1, arg2, arg3 string, arg4 *items.Filter, arg5 ...*items.FindPublishedOptions) (res0 []*items.Item, res1 int, res2 error) {
+	request := FindPublishedRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		Filter:       arg4,
+		Options:      arg5,
+		SpaceId:      arg1,
+	}
+	response, res2 := set.FindPublishedEndpoint(arg0, &request)
+	if res2 != nil {
+		return
+	}
+	return response.(*FindPublishedResponse).Items, response.(*FindPublishedResponse).Total, res2
+}
+
+func (set EndpointsSet) GetRevision(arg0 context.Context, arg1 string, arg2 string, arg3 string, arg4 string, arg5 string, arg6 ...*items.GetRevisionOptions) (res0 *items.Item, res1 error) {
+	request := GetRevisionRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		ItemId:       arg4,
+		RevisionId:   arg5,
+		SpaceId:      arg1,
+		Options:      arg6,
+	}
+	response, res1 := set.GetRevisionEndpoint(arg0, &request)
+	if res1 != nil {
+		return
+	}
+	return response.(*GetRevisionResponse).Item, res1
+}
+
+func (set EndpointsSet) ListRevisions(arg0 context.Context, arg1, arg2, arg3, arg4 string, arg5 ...*items.ListRevisionsOptions) (res0 []*items.Item, res1 error) {
+	request := ListRevisionsRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		ItemId:       arg4,
+		SpaceId:      arg1,
+		Options:      arg5,
+	}
+	response, res1 := set.ListRevisionsEndpoint(arg0, &request)
+	if res1 != nil {
+		return
+	}
+	return response.(*ListRevisionsResponse).Items, res1
+}
+
+func (set EndpointsSet) Archive(arg0 context.Context, arg1 *items.Item, arg2 ...*items.ArchiveOptions) (res0 error) {
+	request := ArchiveRequest{Item: arg1, Options: arg2}
+	_, res0 = set.ArchiveEndpoint(arg0, &request)
+	if res0 != nil {
+		return
+	}
+	return res0
+}
+
+func (set EndpointsSet) FindArchived(arg0 context.Context, arg1, arg2, arg3 string, arg4 *items.Filter, arg5 ...*items.FindArchivedOptions) (res0 []*items.Item, res1 int, res2 error) {
+	request := FindArchivedRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		Options:      arg5,
+		Filter:       arg4,
+		SpaceId:      arg1,
+	}
+	response, res2 := set.FindArchivedEndpoint(arg0, &request)
+	if res2 != nil {
+		return
+	}
+	return response.(*FindArchivedResponse).Items, response.(*FindArchivedResponse).Total, res2
+}
+
+func (set EndpointsSet) Unarchive(arg0 context.Context, arg1 *items.Item, arg2 ...*items.UnarchiveOptions) (res0 error) {
+	request := UnarchiveRequest{Item: arg1, Options: arg2}
+	_, res0 = set.UnarchiveEndpoint(arg0, &request)
+	if res0 != nil {
+		return
+	}
+	return res0
+}
+
+func (set EndpointsSet) Aggregate(arg0 context.Context, arg1, arg2, arg3 string, arg4 *items.Filter, arg5 ...*items.AggregateOptions) (res0 map[string]interface{}, res1 error) {
+	request := AggregateRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		Filter:       arg4,
+		Options:      arg5,
+		SpaceId:      arg1,
+	}
+	response, res1 := set.AggregateEndpoint(arg0, &request)
+	if res1 != nil {
+		return
+	}
+	return response.(*AggregateResponse).Result, res1
+}
+
+func (set EndpointsSet) AggregatePublished(arg0 context.Context, arg1, arg2, arg3 string, arg4 *items.Filter, arg5 ...*items.AggregatePublishedOptions) (res0 map[string]interface{}, res1 error) {
+	request := AggregatePublishedRequest{
+		CollectionId: arg3,
+		EnvId:        arg2,
+		Filter:       arg4,
+		Options:      arg5,
+		SpaceId:      arg1,
+	}
+	response, res1 := set.AggregatePublishedEndpoint(arg0, &request)
+
+	if res1 != nil {
+		if e, ok := status.FromError(res1); ok {
+			res1 = errors.New(e.Message())
+		}
+		return
+	}
+	return response.(*AggregatePublishedResponse).Result, res1
+}
diff --git a/pkg/items/transport/endpoints.microgen.go b/pkg/items/transport/endpoints.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..5a6e8d5a678cd7180deca17a97f615fe7793ff6e
--- /dev/null
+++ b/pkg/items/transport/endpoints.microgen.go
@@ -0,0 +1,27 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import endpoint "github.com/go-kit/kit/endpoint"
+
+// EndpointsSet implements Items API and used for transport purposes.
+type EndpointsSet struct {
+	CreateEndpoint             endpoint.Endpoint
+	IntrospectEndpoint         endpoint.Endpoint
+	GetEndpoint                endpoint.Endpoint
+	FindEndpoint               endpoint.Endpoint
+	UpdateEndpoint             endpoint.Endpoint
+	DeleteEndpoint             endpoint.Endpoint
+	UndeleteEndpoint           endpoint.Endpoint
+	PublishEndpoint            endpoint.Endpoint
+	UnpublishEndpoint          endpoint.Endpoint
+	GetPublishedEndpoint       endpoint.Endpoint
+	FindPublishedEndpoint      endpoint.Endpoint
+	GetRevisionEndpoint        endpoint.Endpoint
+	ListRevisionsEndpoint      endpoint.Endpoint
+	ArchiveEndpoint            endpoint.Endpoint
+	FindArchivedEndpoint       endpoint.Endpoint
+	UnarchiveEndpoint          endpoint.Endpoint
+	AggregateEndpoint          endpoint.Endpoint
+	AggregatePublishedEndpoint endpoint.Endpoint
+}
diff --git a/pkg/items/transport/exchanges.microgen.go b/pkg/items/transport/exchanges.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..b601946f74837d41df38b07e3c5887ba8698b183
--- /dev/null
+++ b/pkg/items/transport/exchanges.microgen.go
@@ -0,0 +1,186 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import (
+	items "git.perx.ru/perxis/perxis-go/pkg/items"
+	schema "git.perx.ru/perxis/perxis-go/pkg/schema"
+)
+
+type (
+	CreateRequest struct {
+		Item *items.Item            `json:"item"`
+		Opts []*items.CreateOptions `json:"opts"` // This field was defined with ellipsis (...).
+	}
+	CreateResponse struct {
+		Created *items.Item `json:"created"`
+	}
+
+	IntrospectRequest struct {
+		Item *items.Item                `json:"item"`
+		Opts []*items.IntrospectOptions `json:"opts"` // This field was defined with ellipsis (...).
+	}
+	IntrospectResponse struct {
+		Item             *items.Item    `json:"item"`
+		Schema           *schema.Schema `json:"schema"`
+		ValidationErrors []error        `json:"validation_errors"`
+	}
+
+	GetRequest struct {
+		SpaceId      string              `json:"space_id"`
+		EnvId        string              `json:"env_id"`
+		CollectionId string              `json:"collection_id"`
+		ItemId       string              `json:"item_id"`
+		Options      []*items.GetOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	GetResponse struct {
+		Item *items.Item `json:"item"`
+	}
+
+	FindRequest struct {
+		SpaceId      string               `json:"space_id"`
+		EnvId        string               `json:"env_id"`
+		CollectionId string               `json:"collection_id"`
+		Filter       *items.Filter        `json:"filter"`
+		Options      []*items.FindOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	FindResponse struct {
+		Items []*items.Item `json:"items"`
+		Total int           `json:"total"`
+	}
+
+	UpdateRequest struct {
+		Item    *items.Item            `json:"item"`
+		Options []*items.UpdateOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	// Formal exchange type, please do not delete.
+	UpdateResponse struct{}
+
+	DeleteRequest struct {
+		SpaceId      string                 `json:"space_id"`
+		EnvId        string                 `json:"env_id"`
+		CollectionId string                 `json:"collection_id"`
+		ItemId       string                 `json:"item_id"`
+		Options      []*items.DeleteOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	// Formal exchange type, please do not delete.
+	DeleteResponse struct{}
+
+	UndeleteRequest struct {
+		SpaceId      string                   `json:"space_id"`
+		EnvId        string                   `json:"env_id"`
+		CollectionId string                   `json:"collection_id"`
+		ItemId       string                   `json:"item_id"`
+		Options      []*items.UndeleteOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	// Formal exchange type, please do not delete.
+	UndeleteResponse struct{}
+
+	PublishRequest struct {
+		Item    *items.Item             `json:"item"`
+		Options []*items.PublishOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	// Formal exchange type, please do not delete.
+	PublishResponse struct{}
+
+	UnpublishRequest struct {
+		Item    *items.Item               `json:"item"`
+		Options []*items.UnpublishOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	// Formal exchange type, please do not delete.
+	UnpublishResponse struct{}
+
+	GetPublishedRequest struct {
+		SpaceId      string                       `json:"space_id"`
+		EnvId        string                       `json:"env_id"`
+		CollectionId string                       `json:"collection_id"`
+		ItemId       string                       `json:"item_id"`
+		Options      []*items.GetPublishedOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	GetPublishedResponse struct {
+		Item *items.Item `json:"item"`
+	}
+
+	FindPublishedRequest struct {
+		SpaceId      string                        `json:"space_id"`
+		EnvId        string                        `json:"env_id"`
+		CollectionId string                        `json:"collection_id"`
+		Filter       *items.Filter                 `json:"filter"`
+		Options      []*items.FindPublishedOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	FindPublishedResponse struct {
+		Items []*items.Item `json:"items"`
+		Total int           `json:"total"`
+	}
+
+	GetRevisionRequest struct {
+		SpaceId      string                      `json:"space_id"`
+		EnvId        string                      `json:"env_id"`
+		CollectionId string                      `json:"collection_id"`
+		ItemId       string                      `json:"item_id"`
+		RevisionId   string                      `json:"revision_id"`
+		Options      []*items.GetRevisionOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	GetRevisionResponse struct {
+		Item *items.Item `json:"item"`
+	}
+
+	ListRevisionsRequest struct {
+		SpaceId      string                        `json:"space_id"`
+		EnvId        string                        `json:"env_id"`
+		CollectionId string                        `json:"collection_id"`
+		ItemId       string                        `json:"item_id"`
+		Options      []*items.ListRevisionsOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	ListRevisionsResponse struct {
+		Items []*items.Item `json:"items"`
+	}
+
+	ArchiveRequest struct {
+		Item    *items.Item             `json:"item"`
+		Options []*items.ArchiveOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	// Formal exchange type, please do not delete.
+	ArchiveResponse struct{}
+
+	FindArchivedRequest struct {
+		SpaceId      string                       `json:"space_id"`
+		EnvId        string                       `json:"env_id"`
+		CollectionId string                       `json:"collection_id"`
+		Filter       *items.Filter                `json:"filter"`
+		Options      []*items.FindArchivedOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	FindArchivedResponse struct {
+		Items []*items.Item `json:"items"`
+		Total int           `json:"total"`
+	}
+
+	UnarchiveRequest struct {
+		Item    *items.Item               `json:"item"`
+		Options []*items.UnarchiveOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	// Formal exchange type, please do not delete.
+	UnarchiveResponse struct{}
+
+	AggregateRequest struct {
+		SpaceId      string                    `json:"space_id"`
+		EnvId        string                    `json:"env_id"`
+		CollectionId string                    `json:"collection_id"`
+		Filter       *items.Filter             `json:"filter"`
+		Options      []*items.AggregateOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	AggregateResponse struct {
+		Result map[string]interface{} `json:"result"`
+	}
+
+	AggregatePublishedRequest struct {
+		SpaceId      string                             `json:"space_id"`
+		EnvId        string                             `json:"env_id"`
+		CollectionId string                             `json:"collection_id"`
+		Filter       *items.Filter                      `json:"filter"`
+		Options      []*items.AggregatePublishedOptions `json:"options"` // This field was defined with ellipsis (...).
+	}
+	AggregatePublishedResponse struct {
+		Result map[string]interface{} `json:"result"`
+	}
+)
diff --git a/pkg/items/transport/grpc/client.go b/pkg/items/transport/grpc/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..faea7cc6703746ba91b0af0e831431ffd76044fc
--- /dev/null
+++ b/pkg/items/transport/grpc/client.go
@@ -0,0 +1,34 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transportgrpc
+
+import (
+	grpcerr "git.perx.ru/perxis/perxis-go/pkg/errors/grpc"
+	transport "git.perx.ru/perxis/perxis-go/pkg/items/transport"
+	grpckit "github.com/go-kit/kit/transport/grpc"
+	grpc "google.golang.org/grpc"
+)
+
+func NewClient(conn *grpc.ClientConn, opts ...grpckit.ClientOption) transport.EndpointsSet {
+	c := NewGRPCClient(conn, "", opts...)
+	return transport.EndpointsSet{
+		CreateEndpoint:             grpcerr.ClientMiddleware(c.CreateEndpoint),
+		IntrospectEndpoint:         grpcerr.ClientMiddleware(c.IntrospectEndpoint),
+		GetEndpoint:                grpcerr.ClientMiddleware(c.GetEndpoint),
+		FindEndpoint:               grpcerr.ClientMiddleware(c.FindEndpoint),
+		UpdateEndpoint:             grpcerr.ClientMiddleware(c.UpdateEndpoint),
+		DeleteEndpoint:             grpcerr.ClientMiddleware(c.DeleteEndpoint),
+		UndeleteEndpoint:           grpcerr.ClientMiddleware(c.UndeleteEndpoint),
+		PublishEndpoint:            grpcerr.ClientMiddleware(c.PublishEndpoint),
+		UnpublishEndpoint:          grpcerr.ClientMiddleware(c.UnpublishEndpoint),
+		GetPublishedEndpoint:       grpcerr.ClientMiddleware(c.GetPublishedEndpoint),
+		FindPublishedEndpoint:      grpcerr.ClientMiddleware(c.FindPublishedEndpoint),
+		GetRevisionEndpoint:        grpcerr.ClientMiddleware(c.GetRevisionEndpoint),
+		ListRevisionsEndpoint:      grpcerr.ClientMiddleware(c.ListRevisionsEndpoint),
+		ArchiveEndpoint:            grpcerr.ClientMiddleware(c.ArchiveEndpoint),
+		FindArchivedEndpoint:       grpcerr.ClientMiddleware(c.FindArchivedEndpoint),
+		UnarchiveEndpoint:          grpcerr.ClientMiddleware(c.UnarchiveEndpoint),
+		AggregateEndpoint:          grpcerr.ClientMiddleware(c.AggregateEndpoint),
+		AggregatePublishedEndpoint: grpcerr.ClientMiddleware(c.AggregatePublishedEndpoint),
+	}
+}
diff --git a/pkg/items/transport/grpc/client.microgen.go b/pkg/items/transport/grpc/client.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..a91c8d16495333a356ebeb3e48100c40e0f7bc91
--- /dev/null
+++ b/pkg/items/transport/grpc/client.microgen.go
@@ -0,0 +1,145 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transportgrpc
+
+import (
+	transport "git.perx.ru/perxis/perxis-go/pkg/items/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/items"
+	grpckit "github.com/go-kit/kit/transport/grpc"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	grpc "google.golang.org/grpc"
+)
+
+func NewGRPCClient(conn *grpc.ClientConn, addr string, opts ...grpckit.ClientOption) transport.EndpointsSet {
+	if addr == "" {
+		addr = "content.items.Items"
+	}
+	return transport.EndpointsSet{
+		ArchiveEndpoint: grpckit.NewClient(
+			conn, addr, "Archive",
+			_Encode_Archive_Request,
+			_Decode_Archive_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+		CreateEndpoint: grpckit.NewClient(
+			conn, addr, "Create",
+			_Encode_Create_Request,
+			_Decode_Create_Response,
+			pb.CreateResponse{},
+			opts...,
+		).Endpoint(),
+		DeleteEndpoint: grpckit.NewClient(
+			conn, addr, "Delete",
+			_Encode_Delete_Request,
+			_Decode_Delete_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+		UndeleteEndpoint: grpckit.NewClient(
+			conn, addr, "Undelete",
+			_Encode_Undelete_Request,
+			_Decode_Undelete_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+		FindArchivedEndpoint: grpckit.NewClient(
+			conn, addr, "FindArchived",
+			_Encode_FindArchived_Request,
+			_Decode_FindArchived_Response,
+			pb.FindArchivedResponse{},
+			opts...,
+		).Endpoint(),
+		FindEndpoint: grpckit.NewClient(
+			conn, addr, "Find",
+			_Encode_Find_Request,
+			_Decode_Find_Response,
+			pb.FindResponse{},
+			opts...,
+		).Endpoint(),
+		FindPublishedEndpoint: grpckit.NewClient(
+			conn, addr, "FindPublished",
+			_Encode_FindPublished_Request,
+			_Decode_FindPublished_Response,
+			pb.FindPublishedResponse{},
+			opts...,
+		).Endpoint(),
+		GetEndpoint: grpckit.NewClient(
+			conn, addr, "Get",
+			_Encode_Get_Request,
+			_Decode_Get_Response,
+			pb.GetResponse{},
+			opts...,
+		).Endpoint(),
+		GetPublishedEndpoint: grpckit.NewClient(
+			conn, addr, "GetPublished",
+			_Encode_GetPublished_Request,
+			_Decode_GetPublished_Response,
+			pb.GetPublishedResponse{},
+			opts...,
+		).Endpoint(),
+		GetRevisionEndpoint: grpckit.NewClient(
+			conn, addr, "GetRevision",
+			_Encode_GetRevision_Request,
+			_Decode_GetRevision_Response,
+			pb.GetRevisionResponse{},
+			opts...,
+		).Endpoint(),
+		IntrospectEndpoint: grpckit.NewClient(
+			conn, addr, "Introspect",
+			_Encode_Introspect_Request,
+			_Decode_Introspect_Response,
+			pb.IntrospectResponse{},
+			opts...,
+		).Endpoint(),
+		ListRevisionsEndpoint: grpckit.NewClient(
+			conn, addr, "ListRevisions",
+			_Encode_ListRevisions_Request,
+			_Decode_ListRevisions_Response,
+			pb.ListRevisionsResponse{},
+			opts...,
+		).Endpoint(),
+		PublishEndpoint: grpckit.NewClient(
+			conn, addr, "Publish",
+			_Encode_Publish_Request,
+			_Decode_Publish_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+		UnarchiveEndpoint: grpckit.NewClient(
+			conn, addr, "Unarchive",
+			_Encode_Unarchive_Request,
+			_Decode_Unarchive_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+		UnpublishEndpoint: grpckit.NewClient(
+			conn, addr, "Unpublish",
+			_Encode_Unpublish_Request,
+			_Decode_Unpublish_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+		UpdateEndpoint: grpckit.NewClient(
+			conn, addr, "Update",
+			_Encode_Update_Request,
+			_Decode_Update_Response,
+			empty.Empty{},
+			opts...,
+		).Endpoint(),
+		AggregateEndpoint: grpckit.NewClient(
+			conn, addr, "Aggregate",
+			_Encode_Aggregate_Request,
+			_Decode_Aggregate_Response,
+			pb.AggregateResponse{},
+			opts...,
+		).Endpoint(),
+		AggregatePublishedEndpoint: grpckit.NewClient(
+			conn, addr, "AggregatePublished",
+			_Encode_AggregatePublished_Request,
+			_Decode_AggregatePublished_Response,
+			pb.AggregatePublishedResponse{},
+			opts...,
+		).Endpoint(),
+	}
+}
diff --git a/pkg/items/transport/grpc/protobuf_endpoint_converters.microgen.go b/pkg/items/transport/grpc/protobuf_endpoint_converters.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..69a696df329a6e28e5912af8815df9852c0c504c
--- /dev/null
+++ b/pkg/items/transport/grpc/protobuf_endpoint_converters.microgen.go
@@ -0,0 +1,1010 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+// Please, do not change functions names!
+package transportgrpc
+
+import (
+	"context"
+	"errors"
+
+	transport "git.perx.ru/perxis/perxis-go/pkg/items/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/items"
+	empty "github.com/golang/protobuf/ptypes/empty"
+)
+
+func _Encode_Create_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil CreateRequest")
+	}
+	req := request.(*transport.CreateRequest)
+	reqItem, err := PtrItemToProto(req.Item)
+	if err != nil {
+		return nil, err
+	}
+
+	opts, err := CreateOptionsToProto(req.Opts)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.CreateRequest{
+		Item:    reqItem,
+		Options: opts,
+	}, nil
+}
+
+func _Encode_Get_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetRequest")
+	}
+	req := request.(*transport.GetRequest)
+	return &pb.GetRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		ItemId:       req.ItemId,
+		SpaceId:      req.SpaceId,
+	}, nil
+}
+
+func _Encode_Find_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil FindRequest")
+	}
+	req := request.(*transport.FindRequest)
+	reqFilter, err := PtrFilterToProto(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ElPtrFindOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.FindRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		SpaceId:      req.SpaceId,
+		Options:      reqOptions,
+		Filter:       reqFilter,
+	}, nil
+}
+
+func _Encode_Update_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UpdateRequest")
+	}
+	req := request.(*transport.UpdateRequest)
+	reqItem, err := PtrItemToProto(req.Item)
+	if err != nil {
+		return nil, err
+	}
+
+	opts, err := UpdateOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.UpdateRequest{
+		Item:    reqItem,
+		Options: opts,
+	}, nil
+}
+
+func _Encode_Delete_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil DeleteRequest")
+	}
+	req := request.(*transport.DeleteRequest)
+
+	opts, err := DeleteOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.DeleteRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		ItemId:       req.ItemId,
+		SpaceId:      req.SpaceId,
+		Options:      opts,
+	}, nil
+}
+
+func _Encode_Undelete_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UndeleteRequest")
+	}
+	req := request.(*transport.UndeleteRequest)
+	return &pb.UndeleteRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		ItemId:       req.ItemId,
+		SpaceId:      req.SpaceId,
+	}, nil
+}
+
+func _Encode_Publish_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil PublishRequest")
+	}
+	req := request.(*transport.PublishRequest)
+	reqItem, err := PtrItemToProto(req.Item)
+	if err != nil {
+		return nil, err
+	}
+
+	opts, err := PublishOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.PublishRequest{
+		Item:    reqItem,
+		Options: opts,
+	}, nil
+}
+
+func _Encode_Unpublish_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UnpublishRequest")
+	}
+	req := request.(*transport.UnpublishRequest)
+	reqItem, err := PtrItemToProto(req.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.UnpublishRequest{Item: reqItem}, nil
+}
+
+func _Encode_GetPublished_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetPublishedRequest")
+	}
+	req := request.(*transport.GetPublishedRequest)
+	reqOptions, err := ElPtrGetPublishedOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.GetPublishedRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		ItemId:       req.ItemId,
+		SpaceId:      req.SpaceId,
+		Options:      reqOptions,
+	}, nil
+}
+
+func _Encode_FindPublished_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil FindPublishedRequest")
+	}
+	req := request.(*transport.FindPublishedRequest)
+	reqFilter, err := PtrFilterToProto(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ElPtrFindPublishedOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.FindPublishedRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		Options:      reqOptions,
+		Filter:       reqFilter,
+		SpaceId:      req.SpaceId,
+	}, nil
+}
+
+func _Encode_GetRevision_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetRevisionRequest")
+	}
+	req := request.(*transport.GetRevisionRequest)
+	return &pb.GetRevisionRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		ItemId:       req.ItemId,
+		RevisionId:   req.RevisionId,
+		SpaceId:      req.SpaceId,
+	}, nil
+}
+
+func _Encode_ListRevisions_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil ListRevisionsRequest")
+	}
+	req := request.(*transport.ListRevisionsRequest)
+	reqOptions, err := ElPtrListRevisionsOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.ListRevisionsRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		ItemId:       req.ItemId,
+		SpaceId:      req.SpaceId,
+		Options:      reqOptions,
+	}, nil
+}
+
+func _Encode_Archive_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil ArchiveRequest")
+	}
+	req := request.(*transport.ArchiveRequest)
+	reqItem, err := PtrItemToProto(req.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.ArchiveRequest{Item: reqItem}, nil
+}
+
+func _Encode_FindArchived_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil FindArchivedRequest")
+	}
+	req := request.(*transport.FindArchivedRequest)
+	reqFilter, err := PtrFilterToProto(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ElPtrFindArchivedOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.FindArchivedRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		Options:      reqOptions,
+		Filter:       reqFilter,
+		SpaceId:      req.SpaceId,
+	}, nil
+}
+
+func _Encode_Unarchive_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UnarchiveRequest")
+	}
+	req := request.(*transport.UnarchiveRequest)
+	reqItem, err := PtrItemToProto(req.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.UnarchiveRequest{Item: reqItem}, nil
+}
+
+func _Encode_Aggregate_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil AggregateRequest")
+	}
+	req := request.(*transport.AggregateRequest)
+	reqFilter, err := PtrFilterToProto(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ElPtrAggregateOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.AggregateRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		SpaceId:      req.SpaceId,
+		Options:      reqOptions,
+		Filter:       reqFilter,
+	}, nil
+}
+
+func _Encode_AggregatePublished_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil AggregatePublishedRequest")
+	}
+	req := request.(*transport.AggregatePublishedRequest)
+	reqFilter, err := PtrFilterToProto(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ElPtrAggregatePublishedOptionsToProto(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.AggregatePublishedRequest{
+		CollectionId: req.CollectionId,
+		EnvId:        req.EnvId,
+		SpaceId:      req.SpaceId,
+		Options:      reqOptions,
+		Filter:       reqFilter,
+	}, nil
+}
+
+func _Encode_Create_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil CreateResponse")
+	}
+	resp := response.(*transport.CreateResponse)
+	respCreated, err := PtrItemToProto(resp.Created)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.CreateResponse{Created: respCreated}, nil
+}
+
+func _Encode_Get_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetResponse")
+	}
+	resp := response.(*transport.GetResponse)
+	respItem, err := PtrItemToProto(resp.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.GetResponse{Item: respItem}, nil
+}
+
+func _Encode_Find_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil FindResponse")
+	}
+	resp := response.(*transport.FindResponse)
+	respItems, err := ListPtrItemToProto(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.FindResponse{
+		Items: respItems,
+		Total: int32(resp.Total),
+	}, nil
+}
+
+func _Encode_Update_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_Delete_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_Undelete_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_Publish_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_Unpublish_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_GetPublished_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetPublishedResponse")
+	}
+	resp := response.(*transport.GetPublishedResponse)
+	respItem, err := PtrItemToProto(resp.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.GetPublishedResponse{Item: respItem}, nil
+}
+
+func _Encode_FindPublished_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil FindPublishedResponse")
+	}
+	resp := response.(*transport.FindPublishedResponse)
+	respItems, err := ListPtrItemToProto(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.FindPublishedResponse{
+		Items: respItems,
+		Total: int32(resp.Total),
+	}, nil
+}
+
+func _Encode_GetRevision_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetRevisionResponse")
+	}
+	resp := response.(*transport.GetRevisionResponse)
+	respItem, err := PtrItemToProto(resp.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.GetRevisionResponse{Item: respItem}, nil
+}
+
+func _Encode_ListRevisions_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil ListRevisionsResponse")
+	}
+	resp := response.(*transport.ListRevisionsResponse)
+	respItems, err := ListPtrItemToProto(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.ListRevisionsResponse{Items: respItems}, nil
+}
+
+func _Encode_Archive_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_FindArchived_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil FindArchivedResponse")
+	}
+	resp := response.(*transport.FindArchivedResponse)
+	respItems, err := ListPtrItemToProto(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.FindArchivedResponse{
+		Items: respItems,
+		Total: int32(resp.Total),
+	}, nil
+}
+
+func _Encode_Unarchive_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_Aggregate_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil AggregateResponse")
+	}
+	resp := response.(*transport.AggregateResponse)
+	result, err := MapStringInterfaceToProto(resp.Result)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.AggregateResponse{
+		Result: result,
+	}, nil
+}
+
+func _Encode_AggregatePublished_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil AggregateResponse")
+	}
+	resp := response.(*transport.AggregatePublishedResponse)
+	result, err := MapStringInterfaceToProto(resp.Result)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.AggregatePublishedResponse{
+		Result: result,
+	}, nil
+}
+
+func _Decode_Create_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil CreateRequest")
+	}
+	req := request.(*pb.CreateRequest)
+	reqItem, err := ProtoToPtrItem(req.Item)
+	if err != nil {
+		return nil, err
+	}
+
+	opts, err := ProtoToCreateOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.CreateRequest{
+		Item: reqItem,
+		Opts: opts,
+	}, nil
+}
+
+func _Decode_Get_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetRequest")
+	}
+	req := request.(*pb.GetRequest)
+	return &transport.GetRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		ItemId:       string(req.ItemId),
+		SpaceId:      string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_Aggregate_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil AggregateRequest")
+	}
+	req := request.(*pb.AggregateRequest)
+	reqFilter, err := ProtoToPtrFilter(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ProtoToPtrServicesAggregateOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.AggregateRequest{
+		SpaceId:      string(req.SpaceId),
+		EnvId:        string(req.EnvId),
+		CollectionId: string(req.CollectionId),
+		Filter:       reqFilter,
+		Options:      reqOptions,
+	}, nil
+}
+
+func _Decode_AggregatePublished_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil AggregatePublishedRequest")
+	}
+	req := request.(*pb.AggregatePublishedRequest)
+	reqFilter, err := ProtoToPtrFilter(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ProtoToPtrServicesAggregatePublishedOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.AggregatePublishedRequest{
+		SpaceId:      string(req.SpaceId),
+		EnvId:        string(req.EnvId),
+		CollectionId: string(req.CollectionId),
+		Filter:       reqFilter,
+		Options:      reqOptions,
+	}, nil
+}
+
+func _Decode_Find_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil FindRequest")
+	}
+	req := request.(*pb.FindRequest)
+	reqFilter, err := ProtoToPtrFilter(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ProtoToElPtrFindOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.FindRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		Options:      reqOptions,
+		Filter:       reqFilter,
+		SpaceId:      string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_Update_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UpdateRequest")
+	}
+	req := request.(*pb.UpdateRequest)
+	reqItem, err := ProtoToPtrItem(req.Item)
+	if err != nil {
+		return nil, err
+	}
+
+	opts, err := ProtoToUpdateOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.UpdateRequest{
+		Item:    reqItem,
+		Options: opts,
+	}, nil
+}
+
+func _Decode_Delete_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil DeleteRequest")
+	}
+	req := request.(*pb.DeleteRequest)
+
+	opts, err := ProtoToDeleteOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.DeleteRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		ItemId:       string(req.ItemId),
+		SpaceId:      string(req.SpaceId),
+		Options:      opts,
+	}, nil
+}
+
+func _Decode_Undelete_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UndeleteRequest")
+	}
+	req := request.(*pb.UndeleteRequest)
+	return &transport.UndeleteRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		ItemId:       string(req.ItemId),
+		SpaceId:      string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_Publish_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil PublishRequest")
+	}
+	req := request.(*pb.PublishRequest)
+	reqItem, err := ProtoToPtrItem(req.Item)
+	if err != nil {
+		return nil, err
+	}
+
+	opts, err := ProtoToPublishOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.PublishRequest{
+		Item:    reqItem,
+		Options: opts,
+	}, nil
+}
+
+func _Decode_Unpublish_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UnpublishRequest")
+	}
+	req := request.(*pb.UnpublishRequest)
+	reqItem, err := ProtoToPtrItem(req.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.UnpublishRequest{Item: reqItem}, nil
+}
+
+func _Decode_GetPublished_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetPublishedRequest")
+	}
+	req := request.(*pb.GetPublishedRequest)
+	reqOptions, err := ProtoToElPtrGetPublishedOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.GetPublishedRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		ItemId:       string(req.ItemId),
+		SpaceId:      string(req.SpaceId),
+		Options:      reqOptions,
+	}, nil
+}
+
+func _Decode_FindPublished_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil FindPublishedRequest")
+	}
+	req := request.(*pb.FindPublishedRequest)
+	reqFilter, err := ProtoToPtrFilter(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ProtoToElPtrFindPublishedOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.FindPublishedRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		Options:      reqOptions,
+		Filter:       reqFilter,
+		SpaceId:      string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_GetRevision_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetRevisionRequest")
+	}
+	req := request.(*pb.GetRevisionRequest)
+	return &transport.GetRevisionRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		ItemId:       string(req.ItemId),
+		RevisionId:   string(req.RevisionId),
+		SpaceId:      string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_ListRevisions_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil ListRevisionsRequest")
+	}
+	req := request.(*pb.ListRevisionsRequest)
+	reqOptions, err := ProtoToElPtrListRevisionsOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.ListRevisionsRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		ItemId:       string(req.ItemId),
+		SpaceId:      string(req.SpaceId),
+		Options:      reqOptions,
+	}, nil
+}
+
+func _Decode_Archive_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil ArchiveRequest")
+	}
+	req := request.(*pb.ArchiveRequest)
+	reqItem, err := ProtoToPtrItem(req.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.ArchiveRequest{Item: reqItem}, nil
+}
+
+func _Decode_FindArchived_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil FindArchivedRequest")
+	}
+	req := request.(*pb.FindArchivedRequest)
+	reqFilter, err := ProtoToPtrFilter(req.Filter)
+	if err != nil {
+		return nil, err
+	}
+	reqOptions, err := ProtoToElPtrFindArchivedOptions(req.Options)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.FindArchivedRequest{
+		CollectionId: string(req.CollectionId),
+		EnvId:        string(req.EnvId),
+		Options:      reqOptions,
+		Filter:       reqFilter,
+		SpaceId:      string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_Unarchive_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil UnarchiveRequest")
+	}
+	req := request.(*pb.UnarchiveRequest)
+	reqItem, err := ProtoToPtrItem(req.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.UnarchiveRequest{Item: reqItem}, nil
+}
+
+func _Decode_Create_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil CreateResponse")
+	}
+	resp := response.(*pb.CreateResponse)
+	respCreated, err := ProtoToPtrItem(resp.Created)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.CreateResponse{Created: respCreated}, nil
+}
+
+func _Decode_Get_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetResponse")
+	}
+	resp := response.(*pb.GetResponse)
+	respItem, err := ProtoToPtrItem(resp.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.GetResponse{Item: respItem}, nil
+}
+
+func _Decode_Find_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil FindResponse")
+	}
+	resp := response.(*pb.FindResponse)
+	respItems, err := ProtoToListPtrItem(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.FindResponse{
+		Items: respItems,
+		Total: int(resp.Total),
+	}, nil
+}
+
+func _Decode_Update_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Decode_Delete_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Decode_Undelete_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Decode_Publish_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Decode_Unpublish_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Decode_GetPublished_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetPublishedResponse")
+	}
+	resp := response.(*pb.GetPublishedResponse)
+	respItem, err := ProtoToPtrItem(resp.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.GetPublishedResponse{Item: respItem}, nil
+}
+
+func _Decode_FindPublished_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil FindPublishedResponse")
+	}
+	resp := response.(*pb.FindPublishedResponse)
+	respItems, err := ProtoToListPtrItem(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.FindPublishedResponse{
+		Items: respItems,
+		Total: int(resp.Total),
+	}, nil
+}
+
+func _Decode_GetRevision_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetRevisionResponse")
+	}
+	resp := response.(*pb.GetRevisionResponse)
+	respItem, err := ProtoToPtrItem(resp.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.GetRevisionResponse{Item: respItem}, nil
+}
+
+func _Decode_ListRevisions_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil ListRevisionsResponse")
+	}
+	resp := response.(*pb.ListRevisionsResponse)
+	respItems, err := ProtoToListPtrItem(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.ListRevisionsResponse{Items: respItems}, nil
+}
+
+func _Decode_Archive_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Decode_FindArchived_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil FindArchivedResponse")
+	}
+	resp := response.(*pb.FindArchivedResponse)
+	respItems, err := ProtoToListPtrItem(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.FindArchivedResponse{
+		Items: respItems,
+		Total: int(resp.Total),
+	}, nil
+}
+
+func _Decode_Unarchive_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	return &empty.Empty{}, nil
+}
+
+func _Encode_Introspect_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil IntrospectRequest")
+	}
+	req := request.(*transport.IntrospectRequest)
+	reqItem, err := PtrItemToProto(req.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.IntrospectRequest{
+		Item: reqItem,
+	}, nil
+}
+
+func _Encode_Introspect_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil IntrospectResponse")
+	}
+	resp := response.(*transport.IntrospectResponse)
+	respItm, err := PtrItemToProto(resp.Item)
+	if err != nil {
+		return nil, err
+	}
+	respSch, err := PtrSchemaSchemaToProto(resp.Schema)
+	if err != nil {
+		return nil, err
+	}
+	respErrors, err := ValidationErrorsToProto(resp.ValidationErrors)
+	return &pb.IntrospectResponse{
+		Item:             respItm,
+		Schema:           respSch,
+		ValidationErrors: respErrors,
+	}, nil
+}
+
+func _Decode_Introspect_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil IntrospectRequest")
+	}
+	req := request.(*pb.IntrospectRequest)
+	reqItem, err := ProtoToPtrItem(req.Item)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.IntrospectRequest{
+		Item: reqItem,
+	}, nil
+}
+
+func _Decode_Introspect_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil IntrospectResponse")
+	}
+	resp := response.(*pb.IntrospectResponse)
+	respItm, err := ProtoToPtrItem(resp.Item)
+	if err != nil {
+		return nil, err
+	}
+	respSch, err := ProtoToPtrSchemaSchema(resp.Schema)
+	if err != nil {
+		return nil, err
+	}
+	respErrs, err := ProtoToValidationErrors(resp.ValidationErrors)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.IntrospectResponse{
+		Item:             respItm,
+		Schema:           respSch,
+		ValidationErrors: respErrs,
+	}, nil
+}
+
+func _Decode_Aggregate_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil AggregateResponse")
+	}
+	resp := response.(*pb.AggregateResponse)
+	result, err := ProtoToMapStringInterface(resp.Result)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.AggregateResponse{
+		Result: result,
+	}, nil
+}
+
+func _Decode_AggregatePublished_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil AggregatePublishedResponse")
+	}
+	resp := response.(*pb.AggregatePublishedResponse)
+	result, err := ProtoToMapStringInterface(resp.Result)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.AggregatePublishedResponse{
+		Result: result,
+	}, nil
+}
diff --git a/pkg/items/transport/grpc/protobuf_type_converters.microgen.go b/pkg/items/transport/grpc/protobuf_type_converters.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..7eae996594e266ddfc712183bd3e1d7c4a39c78a
--- /dev/null
+++ b/pkg/items/transport/grpc/protobuf_type_converters.microgen.go
@@ -0,0 +1,627 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+// It is better for you if you do not change functions names!
+// This file will never be overwritten.
+package transportgrpc
+
+import (
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/filter"
+	service "git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	pbcommon "git.perx.ru/perxis/perxis-go/proto/common"
+	pb "git.perx.ru/perxis/perxis-go/proto/items"
+	jsoniter "github.com/json-iterator/go"
+	"google.golang.org/protobuf/types/known/structpb"
+)
+
+func MapStringInterfaceToProto(data map[string]interface{}) (*structpb.Struct, error) {
+	if data == nil {
+		return nil, nil
+	}
+	return structpb.NewStruct(data)
+}
+
+func ProtoToMapStringInterface(protoData *structpb.Struct) (map[string]interface{}, error) {
+	if protoData == nil {
+		return nil, nil
+	}
+	return protoData.AsMap(), nil
+}
+
+func MapStringMapStringInterfaceToProto(translations map[string]map[string]interface{}) (map[string]*structpb.Struct, error) {
+	if translations == nil {
+		return nil, nil
+	}
+	res := make(map[string]*structpb.Struct, len(translations))
+	for k, v := range translations {
+		res[k], _ = MapStringInterfaceToProto(v)
+	}
+	return res, nil
+}
+
+func PtrPermissionsToProto(permissions *service.Permissions) (*pb.Permissions, error) {
+	if permissions == nil {
+		return nil, nil
+	}
+
+	return &pb.Permissions{
+			Edit:       permissions.Edit,
+			Archive:    permissions.Archive,
+			Publish:    permissions.Publish,
+			SoftDelete: permissions.SoftDelete,
+			HardDelete: permissions.HardDelete,
+		},
+		nil
+}
+
+func ProtoToPtrPermissions(protoPermissions *pb.Permissions) (*service.Permissions, error) {
+	if protoPermissions == nil {
+		return nil, nil
+	}
+
+	return &service.Permissions{
+			Edit:       protoPermissions.Edit,
+			Archive:    protoPermissions.Archive,
+			Publish:    protoPermissions.Publish,
+			SoftDelete: protoPermissions.SoftDelete,
+			HardDelete: protoPermissions.HardDelete,
+		},
+		nil
+}
+
+func ProtoToMapStringMapStringInterface(protoTranslations map[string]*structpb.Struct) (map[string]map[string]interface{}, error) {
+	if protoTranslations == nil {
+		return nil, nil
+	}
+	res := make(map[string]map[string]interface{}, len(protoTranslations))
+	for k, v := range protoTranslations {
+		res[k], _ = ProtoToMapStringInterface(v)
+	}
+	return res, nil
+}
+
+func PtrItemToProto(item *service.Item) (*pb.Item, error) {
+	return service.ItemToProto(item), nil
+}
+
+func ProtoToPtrItem(protoItem *pb.Item) (*service.Item, error) {
+	return service.ItemFromProto(protoItem), nil
+}
+
+func PtrFilterToProto(filter *service.Filter) (*pb.Filter, error) {
+	if filter == nil {
+		return nil, nil
+	}
+
+	dt := make([]*pbcommon.Filter, 0, len(filter.Data))
+	for _, f := range filter.Data {
+		pf := &pbcommon.Filter{
+			Op:    string(f.Op),
+			Field: f.Field,
+		}
+
+		val, err := structpb.NewValue(f.Value)
+		if err != nil {
+			return nil, err
+		}
+		pf.Value = val
+		dt = append(dt, pf)
+	}
+
+	return &pb.Filter{
+		Id:   filter.ID,
+		Data: dt,
+		Q:    filter.Q,
+	}, nil
+}
+
+func ProtoToPtrFilter(protoFilter *pb.Filter) (*service.Filter, error) {
+	if protoFilter == nil {
+		return nil, nil
+	}
+
+	dt := make([]*filter.Filter, 0, len(protoFilter.Data))
+	for _, pf := range protoFilter.Data {
+
+		f := &filter.Filter{
+			Op:    filter.Op(pf.Op),
+			Field: pf.Field,
+			Value: pf.Value.AsInterface(),
+		}
+
+		dt = append(dt, f)
+	}
+
+	return &service.Filter{
+		ID:   protoFilter.Id,
+		Data: dt,
+		Q:    protoFilter.Q,
+	}, nil
+}
+
+func PtrServicesFindOptionsToProto(opts *options.FindOptions) (*pbcommon.FindOptions, error) {
+	if opts == nil {
+		return nil, nil
+	}
+	return &pbcommon.FindOptions{
+		Sort:          opts.Sort,
+		PageNum:       int32(opts.PageNum),
+		PageSize:      int32(opts.PageSize),
+		Fields:        opts.Fields,
+		ExcludeFields: opts.ExcludeFields,
+	}, nil
+}
+
+func ProtoToPtrServicesFindOptions(protoOpts *pbcommon.FindOptions) (*options.FindOptions, error) {
+	if protoOpts == nil {
+		return nil, nil
+	}
+	return &options.FindOptions{
+		SortOptions: options.SortOptions{
+			Sort: protoOpts.Sort,
+		},
+		PaginationOptions: options.PaginationOptions{
+			PageNum:  int(protoOpts.PageNum),
+			PageSize: int(protoOpts.PageSize),
+		},
+		FieldOptions: options.FieldOptions{
+			Fields:        protoOpts.Fields,
+			ExcludeFields: protoOpts.ExcludeFields,
+		},
+	}, nil
+}
+
+func ListPtrItemToProto(items []*service.Item) ([]*pb.Item, error) {
+	protoItems := make([]*pb.Item, 0, len(items))
+	for _, itm := range items {
+		pi, err := PtrItemToProto(itm)
+		if err != nil {
+			return nil, err
+		}
+		protoItems = append(protoItems, pi)
+	}
+	return protoItems, nil
+}
+
+func ProtoToListPtrItem(protoItems []*pb.Item) ([]*service.Item, error) {
+	items := make([]*service.Item, 0, len(protoItems))
+	for _, itm := range protoItems {
+		pi, err := ProtoToPtrItem(itm)
+		if err != nil {
+			return nil, err
+		}
+		items = append(items, pi)
+	}
+	return items, nil
+}
+
+func ProtoToCreateOptions(protoOptions *pb.CreateOptions) ([]*service.CreateOptions, error) {
+	if protoOptions == nil {
+		return nil, nil
+	}
+	return []*service.CreateOptions{
+		{UpdateAttrs: protoOptions.UpdateAttrs},
+	}, nil
+}
+
+func CreateOptionsToProto(options []*service.CreateOptions) (*pb.CreateOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+
+	opts := service.MergeCreateOptions(options...)
+
+	return &pb.CreateOptions{
+		UpdateAttrs: opts.UpdateAttrs,
+	}, nil
+}
+
+func ElPtrGetOptionsToProto() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ProtoToElPtrGetOptions() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ElPtrFindOptionsToProto(options []*service.FindOptions) (*pb.FindOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+
+	opts := service.MergeFindOptions(options...)
+
+	var err error
+
+	fo := &pb.FindOptions{
+		Deleted:   opts.Deleted,
+		Regular:   opts.Regular,
+		Hidden:    opts.Hidden,
+		Templates: opts.Templates,
+	}
+
+	fo.Options, err = PtrServicesFindOptionsToProto(&opts.FindOptions)
+	if err != nil {
+		return nil, err
+	}
+
+	return fo, nil
+}
+
+func ProtoToElPtrFindOptions(protoOptions *pb.FindOptions) ([]*service.FindOptions, error) {
+	if protoOptions == nil {
+		return nil, nil
+	}
+
+	var err error
+	fo := &service.FindOptions{
+		Deleted:   protoOptions.Deleted,
+		Regular:   protoOptions.Regular,
+		Hidden:    protoOptions.Hidden,
+		Templates: protoOptions.Templates,
+	}
+
+	o, err := ProtoToPtrServicesFindOptions(protoOptions.Options)
+	if err != nil {
+		return nil, err
+	}
+	if o != nil {
+		fo.FindOptions = *o
+	}
+
+	return []*service.FindOptions{fo}, nil
+}
+
+func ProtoToUpdateOptions(protoOptions *pb.UpdateOptions) ([]*service.UpdateOptions, error) {
+	if protoOptions == nil {
+		return nil, nil
+	}
+	return []*service.UpdateOptions{
+		{UpdateAttrs: protoOptions.UpdateAttrs},
+	}, nil
+}
+
+func UpdateOptionsToProto(options []*service.UpdateOptions) (*pb.UpdateOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+
+	opts := service.MergeUpdateOptions(options...)
+
+	return &pb.UpdateOptions{
+		UpdateAttrs: opts.UpdateAttrs,
+	}, nil
+}
+
+func ProtoToDeleteOptions(protoOptions *pb.DeleteOptions) ([]*service.DeleteOptions, error) {
+	if protoOptions == nil {
+		return nil, nil
+	}
+	return []*service.DeleteOptions{
+		{Erase: protoOptions.Erase},
+	}, nil
+}
+
+func DeleteOptionsToProto(options []*service.DeleteOptions) (*pb.DeleteOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+
+	opts := service.MergeDeleteOptions(options...)
+
+	return &pb.DeleteOptions{
+		Erase: opts.Erase,
+	}, nil
+}
+
+func ProtoToPublishOptions(protoOptions *pb.PublishOptions) ([]*service.PublishOptions, error) {
+	if protoOptions == nil {
+		return nil, nil
+	}
+	return []*service.PublishOptions{
+		{UpdateAttrs: protoOptions.UpdateAttrs},
+	}, nil
+}
+
+func PublishOptionsToProto(options []*service.PublishOptions) (*pb.PublishOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+
+	opts := service.MergePublishOptions(options...)
+
+	return &pb.PublishOptions{
+		UpdateAttrs: opts.UpdateAttrs,
+	}, nil
+}
+
+func ElPtrUnpublishOptionsToProto() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ProtoToElPtrUnpublishOptions() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ElPtrGetPublishedOptionsToProto(options []*service.GetPublishedOptions) (*pb.GetPublishedOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+
+	opts := service.MergeGetPublishedOptions(options...)
+
+	return &pb.GetPublishedOptions{LocaleId: opts.LocaleID}, nil
+}
+
+func ProtoToElPtrGetPublishedOptions(protoOptions *pb.GetPublishedOptions) ([]*service.GetPublishedOptions, error) {
+	if protoOptions == nil {
+		return nil, nil
+	}
+
+	return []*service.GetPublishedOptions{{LocaleID: protoOptions.LocaleId}}, nil
+}
+
+func ElPtrFindPublishedOptionsToProto(options []*service.FindPublishedOptions) (*pb.FindPublishedOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+
+	opts := service.MergeFindPublishedOptions(options...)
+
+	var err error
+
+	fo := &pb.FindPublishedOptions{
+		Regular:   opts.Regular,
+		Hidden:    opts.Hidden,
+		Templates: opts.Templates,
+	}
+	fo.Options, err = PtrServicesFindOptionsToProto(&opts.FindOptions)
+	if err != nil {
+		return nil, err
+	}
+
+	fo.LocaleId = opts.LocaleID
+
+	return fo, nil
+}
+
+func ProtoToElPtrFindPublishedOptions(protoOptions *pb.FindPublishedOptions) ([]*service.FindPublishedOptions, error) {
+	if protoOptions == nil {
+		return nil, nil
+	}
+
+	var err error
+	fo := &service.FindPublishedOptions{
+		Regular:   protoOptions.Regular,
+		Hidden:    protoOptions.Hidden,
+		Templates: protoOptions.Templates,
+	}
+
+	o, err := ProtoToPtrServicesFindOptions(protoOptions.Options)
+	if err != nil {
+		return nil, err
+	}
+	if o != nil {
+		fo.FindOptions = *o
+	}
+
+	fo.LocaleID = protoOptions.LocaleId
+
+	return []*service.FindPublishedOptions{fo}, nil
+}
+
+func ElPtrGetRevisionOptionsToProto() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ProtoToElPtrGetRevisionOptions() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ElPtrListRevisionsOptionsToProto(options []*service.ListRevisionsOptions) (*pb.ListRevisionsOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+
+	opts := service.MergeListRevisionsOptions(options...)
+
+	var err error
+
+	fo := &pb.ListRevisionsOptions{}
+
+	fo.Options, err = PtrServicesFindOptionsToProto(&opts.FindOptions)
+	if err != nil {
+		return nil, err
+	}
+
+	return fo, nil
+}
+
+func ProtoToElPtrListRevisionsOptions(protoOptions *pb.ListRevisionsOptions) ([]*service.ListRevisionsOptions, error) {
+	if protoOptions == nil {
+		return nil, nil
+	}
+
+	var err error
+	fo := &service.ListRevisionsOptions{}
+
+	o, err := ProtoToPtrServicesFindOptions(protoOptions.Options)
+	if err != nil {
+		return nil, err
+	}
+	if o != nil {
+		fo.FindOptions = *o
+	}
+
+	return []*service.ListRevisionsOptions{fo}, nil
+}
+
+func ElPtrArchiveOptionsToProto() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ProtoToElPtrArchiveOptions() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ElPtrFindArchivedOptionsToProto(options []*service.FindArchivedOptions) (*pb.FindArchivedOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+
+	opts := service.MergeFindArchivedOptions(options...)
+
+	var err error
+
+	fo := &pb.FindArchivedOptions{}
+
+	fo.Options, err = PtrServicesFindOptionsToProto(&opts.FindOptions)
+	if err != nil {
+		return nil, err
+	}
+
+	return fo, nil
+}
+
+func ProtoToElPtrFindArchivedOptions(protoOptions *pb.FindArchivedOptions) ([]*service.FindArchivedOptions, error) {
+	if protoOptions == nil {
+		return nil, nil
+	}
+
+	var err error
+	fo := &service.FindArchivedOptions{}
+
+	o, err := ProtoToPtrServicesFindOptions(protoOptions.Options)
+	if err != nil {
+		return nil, err
+	}
+	if o != nil {
+		fo.FindOptions = *o
+	}
+
+	return []*service.FindArchivedOptions{fo}, nil
+}
+
+func ElPtrUnarchiveOptionsToProto() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ProtoToElPtrUnarchiveOptions() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ElPtrIntrospectOptionsToProto() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ProtoToElPtrIntrospectOptions() {
+	panic("function not provided") // TODO: provide converter
+}
+
+func ProtoToPtrServicesAggregateOptions(protoOpts *pb.AggregateOptions) ([]*service.AggregateOptions, error) {
+	if protoOpts == nil {
+		return nil, nil
+	}
+	return []*service.AggregateOptions{&service.AggregateOptions{Fields: protoOpts.Fields}}, nil
+}
+
+func PtrServicesAggregateOptionsToProto(opts *service.AggregateOptions) (*pb.AggregateOptions, error) {
+	if opts == nil {
+		return nil, nil
+	}
+	return &pb.AggregateOptions{
+		Fields: opts.Fields,
+	}, nil
+}
+
+func ElPtrAggregateOptionsToProto(options []*service.AggregateOptions) (*pb.AggregateOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+	opts := service.MergeAggregateOptions(options...)
+	return PtrServicesAggregateOptionsToProto(opts)
+}
+
+func ProtoToPtrServicesAggregatePublishedOptions(protoOpts *pb.AggregatePublishedOptions) ([]*service.AggregatePublishedOptions, error) {
+	if protoOpts == nil {
+		return nil, nil
+	}
+	return []*service.AggregatePublishedOptions{&service.AggregatePublishedOptions{Fields: protoOpts.Fields}}, nil
+}
+
+func PtrServicesAggregatePublishedOptionsToProto(opts *service.AggregatePublishedOptions) (*pb.AggregatePublishedOptions, error) {
+	if opts == nil {
+		return nil, nil
+	}
+	return &pb.AggregatePublishedOptions{
+		Fields: opts.Fields,
+	}, nil
+}
+
+func ElPtrAggregatePublishedOptionsToProto(options []*service.AggregatePublishedOptions) (*pb.AggregatePublishedOptions, error) {
+	if options == nil {
+		return nil, nil
+	}
+	opts := service.MergeAggregatePublishedOptions(options...)
+	return PtrServicesAggregatePublishedOptionsToProto(opts)
+}
+
+func PtrSchemaSchemaToProto(sch *schema.Schema) (string, error) {
+	if sch == nil {
+		return "", nil
+	}
+	res, err := jsoniter.MarshalToString(sch)
+	if err != nil {
+		return "", err
+	}
+	return res, nil
+}
+
+func ProtoToPtrSchemaSchema(protoSch string) (*schema.Schema, error) {
+	if protoSch == "" {
+		return nil, nil
+	}
+	sch := schema.New()
+	err := sch.UnmarshalJSON([]byte(protoSch))
+	if err != nil {
+		return nil, fmt.Errorf("failed to decode schema. err: %s", err.Error())
+	}
+	return sch, nil
+}
+
+func ValidationErrorsToProto(errs []error) ([]*pbcommon.Error_BadRequest_FieldViolation, error) {
+	if errs == nil {
+		return nil, nil
+	}
+
+	var validationErrors []*pbcommon.Error_BadRequest_FieldViolation
+	for _, err := range errs {
+
+		var fieldError errors.FieldError
+		if errors.As(err, &fieldError) {
+			validationErrors = append(validationErrors, &pbcommon.Error_BadRequest_FieldViolation{
+				Description: errors.Unwrap(fieldError).Error(),
+				Field:       fieldError.Field(),
+			})
+		}
+	}
+
+	return validationErrors, nil
+}
+
+func ProtoToValidationErrors(protoErrs []*pbcommon.Error_BadRequest_FieldViolation) ([]error, error) {
+	if protoErrs == nil {
+		return nil, nil
+	}
+
+	var validationErrors []error
+	for _, err := range protoErrs {
+		validationErrors = append(validationErrors, errors.WithField(errors.New(err.Description), err.Field))
+	}
+
+	return validationErrors, nil
+}
diff --git a/pkg/items/transport/grpc/server.go b/pkg/items/transport/grpc/server.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ac8a3b02dd4991518d8b132707bb2dd0ce3c362
--- /dev/null
+++ b/pkg/items/transport/grpc/server.go
@@ -0,0 +1,34 @@
+package transportgrpc
+
+import (
+	grpcerr "git.perx.ru/perxis/perxis-go/pkg/errors/grpc"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/items/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/items"
+	grpckit "github.com/go-kit/kit/transport/grpc"
+)
+
+func NewServer(svc items.Items, opts ...grpckit.ServerOption) pb.ItemsServer {
+	eps := transport.Endpoints(svc)
+	eps = transport.EndpointsSet{
+		CreateEndpoint:             grpcerr.ServerMiddleware(eps.CreateEndpoint),
+		IntrospectEndpoint:         grpcerr.ServerMiddleware(eps.IntrospectEndpoint),
+		GetEndpoint:                grpcerr.ServerMiddleware(eps.GetEndpoint),
+		FindEndpoint:               grpcerr.ServerMiddleware(eps.FindEndpoint),
+		UpdateEndpoint:             grpcerr.ServerMiddleware(eps.UpdateEndpoint),
+		DeleteEndpoint:             grpcerr.ServerMiddleware(eps.DeleteEndpoint),
+		UndeleteEndpoint:           grpcerr.ServerMiddleware(eps.UndeleteEndpoint),
+		PublishEndpoint:            grpcerr.ServerMiddleware(eps.PublishEndpoint),
+		UnpublishEndpoint:          grpcerr.ServerMiddleware(eps.UnpublishEndpoint),
+		GetPublishedEndpoint:       grpcerr.ServerMiddleware(eps.GetPublishedEndpoint),
+		FindPublishedEndpoint:      grpcerr.ServerMiddleware(eps.FindPublishedEndpoint),
+		GetRevisionEndpoint:        grpcerr.ServerMiddleware(eps.GetRevisionEndpoint),
+		ListRevisionsEndpoint:      grpcerr.ServerMiddleware(eps.ListRevisionsEndpoint),
+		ArchiveEndpoint:            grpcerr.ServerMiddleware(eps.ArchiveEndpoint),
+		FindArchivedEndpoint:       grpcerr.ServerMiddleware(eps.FindArchivedEndpoint),
+		UnarchiveEndpoint:          grpcerr.ServerMiddleware(eps.UnarchiveEndpoint),
+		AggregateEndpoint:          grpcerr.ServerMiddleware(eps.AggregateEndpoint),
+		AggregatePublishedEndpoint: grpcerr.ServerMiddleware(eps.AggregatePublishedEndpoint),
+	}
+	return NewGRPCServer(&eps, opts...)
+}
diff --git a/pkg/items/transport/grpc/server.microgen.go b/pkg/items/transport/grpc/server.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..a904b1e5a610dc12e38768d88b92eee392a6d7af
--- /dev/null
+++ b/pkg/items/transport/grpc/server.microgen.go
@@ -0,0 +1,292 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+// DO NOT EDIT.
+package transportgrpc
+
+import (
+	transport "git.perx.ru/perxis/perxis-go/pkg/items/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/items"
+	grpc "github.com/go-kit/kit/transport/grpc"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	context "golang.org/x/net/context"
+)
+
+type itemsServer struct {
+	create             grpc.Handler
+	introspect         grpc.Handler
+	get                grpc.Handler
+	find               grpc.Handler
+	update             grpc.Handler
+	delete             grpc.Handler
+	undelete           grpc.Handler
+	publish            grpc.Handler
+	unpublish          grpc.Handler
+	getPublished       grpc.Handler
+	findPublished      grpc.Handler
+	getRevision        grpc.Handler
+	listRevisions      grpc.Handler
+	archive            grpc.Handler
+	findArchived       grpc.Handler
+	unarchive          grpc.Handler
+	aggregate          grpc.Handler
+	aggregatePublished grpc.Handler
+
+	pb.UnimplementedItemsServer
+}
+
+func NewGRPCServer(endpoints *transport.EndpointsSet, opts ...grpc.ServerOption) pb.ItemsServer {
+	return &itemsServer{
+		archive: grpc.NewServer(
+			endpoints.ArchiveEndpoint,
+			_Decode_Archive_Request,
+			_Encode_Archive_Response,
+			opts...,
+		),
+		create: grpc.NewServer(
+			endpoints.CreateEndpoint,
+			_Decode_Create_Request,
+			_Encode_Create_Response,
+			opts...,
+		),
+		delete: grpc.NewServer(
+			endpoints.DeleteEndpoint,
+			_Decode_Delete_Request,
+			_Encode_Delete_Response,
+			opts...,
+		),
+		undelete: grpc.NewServer(
+			endpoints.UndeleteEndpoint,
+			_Decode_Undelete_Request,
+			_Encode_Undelete_Response,
+			opts...,
+		),
+		find: grpc.NewServer(
+			endpoints.FindEndpoint,
+			_Decode_Find_Request,
+			_Encode_Find_Response,
+			opts...,
+		),
+		findArchived: grpc.NewServer(
+			endpoints.FindArchivedEndpoint,
+			_Decode_FindArchived_Request,
+			_Encode_FindArchived_Response,
+			opts...,
+		),
+		findPublished: grpc.NewServer(
+			endpoints.FindPublishedEndpoint,
+			_Decode_FindPublished_Request,
+			_Encode_FindPublished_Response,
+			opts...,
+		),
+		get: grpc.NewServer(
+			endpoints.GetEndpoint,
+			_Decode_Get_Request,
+			_Encode_Get_Response,
+			opts...,
+		),
+		getPublished: grpc.NewServer(
+			endpoints.GetPublishedEndpoint,
+			_Decode_GetPublished_Request,
+			_Encode_GetPublished_Response,
+			opts...,
+		),
+		getRevision: grpc.NewServer(
+			endpoints.GetRevisionEndpoint,
+			_Decode_GetRevision_Request,
+			_Encode_GetRevision_Response,
+			opts...,
+		),
+		introspect: grpc.NewServer(
+			endpoints.IntrospectEndpoint,
+			_Decode_Introspect_Request,
+			_Encode_Introspect_Response,
+			opts...,
+		),
+		listRevisions: grpc.NewServer(
+			endpoints.ListRevisionsEndpoint,
+			_Decode_ListRevisions_Request,
+			_Encode_ListRevisions_Response,
+			opts...,
+		),
+		publish: grpc.NewServer(
+			endpoints.PublishEndpoint,
+			_Decode_Publish_Request,
+			_Encode_Publish_Response,
+			opts...,
+		),
+		unarchive: grpc.NewServer(
+			endpoints.UnarchiveEndpoint,
+			_Decode_Unarchive_Request,
+			_Encode_Unarchive_Response,
+			opts...,
+		),
+		unpublish: grpc.NewServer(
+			endpoints.UnpublishEndpoint,
+			_Decode_Unpublish_Request,
+			_Encode_Unpublish_Response,
+			opts...,
+		),
+		update: grpc.NewServer(
+			endpoints.UpdateEndpoint,
+			_Decode_Update_Request,
+			_Encode_Update_Response,
+			opts...,
+		),
+		aggregate: grpc.NewServer(
+			endpoints.AggregateEndpoint,
+			_Decode_Aggregate_Request,
+			_Encode_Aggregate_Response,
+			opts...,
+		),
+		aggregatePublished: grpc.NewServer(
+			endpoints.AggregatePublishedEndpoint,
+			_Decode_AggregatePublished_Request,
+			_Encode_AggregatePublished_Response,
+			opts...,
+		),
+	}
+}
+
+func (S *itemsServer) Create(ctx context.Context, req *pb.CreateRequest) (*pb.CreateResponse, error) {
+	_, resp, err := S.create.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.CreateResponse), nil
+}
+
+func (S *itemsServer) Introspect(ctx context.Context, req *pb.IntrospectRequest) (*pb.IntrospectResponse, error) {
+	_, resp, err := S.introspect.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.IntrospectResponse), nil
+}
+
+func (S *itemsServer) Get(ctx context.Context, req *pb.GetRequest) (*pb.GetResponse, error) {
+	_, resp, err := S.get.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.GetResponse), nil
+}
+
+func (S *itemsServer) Find(ctx context.Context, req *pb.FindRequest) (*pb.FindResponse, error) {
+	_, resp, err := S.find.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.FindResponse), nil
+}
+
+func (S *itemsServer) Update(ctx context.Context, req *pb.UpdateRequest) (*empty.Empty, error) {
+	_, resp, err := S.update.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
+
+func (S *itemsServer) Delete(ctx context.Context, req *pb.DeleteRequest) (*empty.Empty, error) {
+	_, resp, err := S.delete.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
+
+func (S *itemsServer) Undelete(ctx context.Context, req *pb.UndeleteRequest) (*empty.Empty, error) {
+	_, resp, err := S.undelete.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
+
+func (S *itemsServer) Publish(ctx context.Context, req *pb.PublishRequest) (*empty.Empty, error) {
+	_, resp, err := S.publish.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
+
+func (S *itemsServer) Unpublish(ctx context.Context, req *pb.UnpublishRequest) (*empty.Empty, error) {
+	_, resp, err := S.unpublish.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
+
+func (S *itemsServer) GetPublished(ctx context.Context, req *pb.GetPublishedRequest) (*pb.GetPublishedResponse, error) {
+	_, resp, err := S.getPublished.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.GetPublishedResponse), nil
+}
+
+func (S *itemsServer) FindPublished(ctx context.Context, req *pb.FindPublishedRequest) (*pb.FindPublishedResponse, error) {
+	_, resp, err := S.findPublished.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.FindPublishedResponse), nil
+}
+
+func (S *itemsServer) GetRevision(ctx context.Context, req *pb.GetRevisionRequest) (*pb.GetRevisionResponse, error) {
+	_, resp, err := S.getRevision.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.GetRevisionResponse), nil
+}
+
+func (S *itemsServer) ListRevisions(ctx context.Context, req *pb.ListRevisionsRequest) (*pb.ListRevisionsResponse, error) {
+	_, resp, err := S.listRevisions.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.ListRevisionsResponse), nil
+}
+
+func (S *itemsServer) Archive(ctx context.Context, req *pb.ArchiveRequest) (*empty.Empty, error) {
+	_, resp, err := S.archive.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
+
+func (S *itemsServer) FindArchived(ctx context.Context, req *pb.FindArchivedRequest) (*pb.FindArchivedResponse, error) {
+	_, resp, err := S.findArchived.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.FindArchivedResponse), nil
+}
+
+func (S *itemsServer) Unarchive(ctx context.Context, req *pb.UnarchiveRequest) (*empty.Empty, error) {
+	_, resp, err := S.unarchive.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*empty.Empty), nil
+}
+
+func (S *itemsServer) Aggregate(ctx context.Context, req *pb.AggregateRequest) (*pb.AggregateResponse, error) {
+	_, resp, err := S.aggregate.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AggregateResponse), nil
+}
+
+func (S *itemsServer) AggregatePublished(ctx context.Context, req *pb.AggregatePublishedRequest) (*pb.AggregatePublishedResponse, error) {
+	_, resp, err := S.aggregatePublished.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AggregatePublishedResponse), nil
+}
diff --git a/pkg/items/transport/server.microgen.go b/pkg/items/transport/server.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ba5f4a265125ea7b7168fab8d9c7c0f747f23bf
--- /dev/null
+++ b/pkg/items/transport/server.microgen.go
@@ -0,0 +1,220 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import (
+	"context"
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	endpoint "github.com/go-kit/kit/endpoint"
+	"github.com/hashicorp/go-multierror"
+)
+
+func Endpoints(svc items.Items) EndpointsSet {
+	return EndpointsSet{
+		ArchiveEndpoint:            ArchiveEndpoint(svc),
+		CreateEndpoint:             CreateEndpoint(svc),
+		DeleteEndpoint:             DeleteEndpoint(svc),
+		UndeleteEndpoint:           UndeleteEndpoint(svc),
+		FindArchivedEndpoint:       FindArchivedEndpoint(svc),
+		FindEndpoint:               FindEndpoint(svc),
+		FindPublishedEndpoint:      FindPublishedEndpoint(svc),
+		GetEndpoint:                GetEndpoint(svc),
+		GetPublishedEndpoint:       GetPublishedEndpoint(svc),
+		GetRevisionEndpoint:        GetRevisionEndpoint(svc),
+		IntrospectEndpoint:         IntrospectEndpoint(svc),
+		ListRevisionsEndpoint:      ListRevisionsEndpoint(svc),
+		PublishEndpoint:            PublishEndpoint(svc),
+		UnarchiveEndpoint:          UnarchiveEndpoint(svc),
+		UnpublishEndpoint:          UnpublishEndpoint(svc),
+		UpdateEndpoint:             UpdateEndpoint(svc),
+		AggregateEndpoint:          AggregateEndpoint(svc),
+		AggregatePublishedEndpoint: AggregatePublishedEndpoint(svc),
+	}
+}
+
+func CreateEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*CreateRequest)
+		res0, res1 := svc.Create(arg0, req.Item, req.Opts...)
+		return &CreateResponse{Created: res0}, res1
+	}
+}
+
+func IntrospectEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*IntrospectRequest)
+		res0, res1, res2 := svc.Introspect(arg0, req.Item, req.Opts...)
+		resp := &IntrospectResponse{
+			Item:   res0,
+			Schema: res1,
+		}
+		if res2 != nil {
+
+			err := res2
+
+			var merr *multierror.Error
+			if (strings.Contains(err.Error(), "validation error") ||
+				strings.Contains(err.Error(), "modification error") ||
+				strings.Contains(err.Error(), "decode error") ||
+				strings.Contains(err.Error(), "encode error")) && errors.As(err, &merr) {
+
+				errs := make([]error, 0)
+				for _, e := range merr.WrappedErrors() {
+					var errField errors.FieldError
+					if errors.As(e, &errField) {
+						errs = append(errs, e)
+					}
+				}
+
+				if len(errs) > 0 {
+					resp.ValidationErrors = errs
+					res2 = nil
+				}
+			}
+		}
+		return resp, res2
+	}
+}
+
+func GetEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*GetRequest)
+		res0, res1 := svc.Get(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.ItemId, req.Options...)
+		return &GetResponse{Item: res0}, res1
+	}
+}
+
+func FindEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*FindRequest)
+		res0, res1, res2 := svc.Find(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.Filter, req.Options...)
+		return &FindResponse{
+			Items: res0,
+			Total: res1,
+		}, res2
+	}
+}
+
+func UpdateEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*UpdateRequest)
+		res0 := svc.Update(arg0, req.Item, req.Options...)
+		return &UpdateResponse{}, res0
+	}
+}
+
+func DeleteEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*DeleteRequest)
+		res0 := svc.Delete(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.ItemId, req.Options...)
+		return &DeleteResponse{}, res0
+	}
+}
+
+func UndeleteEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*UndeleteRequest)
+		res0 := svc.Undelete(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.ItemId, req.Options...)
+		return &UndeleteResponse{}, res0
+	}
+}
+
+func PublishEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*PublishRequest)
+		res0 := svc.Publish(arg0, req.Item, req.Options...)
+		return &PublishResponse{}, res0
+	}
+}
+
+func UnpublishEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*UnpublishRequest)
+		res0 := svc.Unpublish(arg0, req.Item, req.Options...)
+		return &UnpublishResponse{}, res0
+	}
+}
+
+func GetPublishedEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*GetPublishedRequest)
+		res0, res1 := svc.GetPublished(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.ItemId, req.Options...)
+		return &GetPublishedResponse{Item: res0}, res1
+	}
+}
+
+func FindPublishedEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*FindPublishedRequest)
+		res0, res1, res2 := svc.FindPublished(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.Filter, req.Options...)
+		return &FindPublishedResponse{
+			Items: res0,
+			Total: res1,
+		}, res2
+	}
+}
+
+func GetRevisionEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*GetRevisionRequest)
+		res0, res1 := svc.GetRevision(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.ItemId, req.RevisionId, req.Options...)
+		return &GetRevisionResponse{Item: res0}, res1
+	}
+}
+
+func ListRevisionsEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*ListRevisionsRequest)
+		res0, res1 := svc.ListRevisions(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.ItemId, req.Options...)
+		return &ListRevisionsResponse{Items: res0}, res1
+	}
+}
+
+func ArchiveEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*ArchiveRequest)
+		res0 := svc.Archive(arg0, req.Item, req.Options...)
+		return &ArchiveResponse{}, res0
+	}
+}
+
+func FindArchivedEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*FindArchivedRequest)
+		res0, res1, res2 := svc.FindArchived(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.Filter, req.Options...)
+		return &FindArchivedResponse{
+			Items: res0,
+			Total: res1,
+		}, res2
+	}
+}
+
+func UnarchiveEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*UnarchiveRequest)
+		res0 := svc.Unarchive(arg0, req.Item, req.Options...)
+		return &UnarchiveResponse{}, res0
+	}
+}
+
+func AggregateEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*AggregateRequest)
+		res0, res1 := svc.Aggregate(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.Filter, req.Options...)
+		return &AggregateResponse{
+			Result: res0,
+		}, res1
+	}
+}
+func AggregatePublishedEndpoint(svc items.Items) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*AggregatePublishedRequest)
+		res0, res1 := svc.AggregatePublished(arg0, req.SpaceId, req.EnvId, req.CollectionId, req.Filter, req.Options...)
+		return &AggregatePublishedResponse{
+			Result: res0,
+		}, res1
+	}
+}
diff --git a/pkg/locales/middleware/caching_middleware.go b/pkg/locales/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b4635ab4a0214f99407b67020737724ebf3c841
--- /dev/null
+++ b/pkg/locales/middleware/caching_middleware.go
@@ -0,0 +1,53 @@
+package service
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	service "git.perx.ru/perxis/perxis-go/pkg/locales"
+)
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Locales) service.Locales {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Locales
+}
+
+func (m cachingMiddleware) Create(ctx context.Context, locale *service.Locale) (loc *service.Locale, err error) {
+
+	loc, err = m.next.Create(ctx, locale)
+	if err == nil {
+		m.cache.Remove(loc.SpaceID)
+	}
+	return loc, err
+}
+
+func (m cachingMiddleware) List(ctx context.Context, spaceId string) (locales []*service.Locale, err error) {
+
+	value, e := m.cache.Get(spaceId)
+	if e == nil {
+		return value.([]*service.Locale), err
+	}
+	locales, err = m.next.List(ctx, spaceId)
+	if err == nil {
+		m.cache.Set(spaceId, locales)
+	}
+	return locales, err
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, spaceId string, localeId string) (err error) {
+
+	err = m.next.Delete(ctx, spaceId, localeId)
+	if err == nil {
+		m.cache.Remove(spaceId)
+	}
+	return err
+}
diff --git a/pkg/locales/middleware/caching_middleware_test.go b/pkg/locales/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..de5e7a9f742b6336ecd5a67d529ed468236838f4
--- /dev/null
+++ b/pkg/locales/middleware/caching_middleware_test.go
@@ -0,0 +1,130 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/locales"
+	locmocks "git.perx.ru/perxis/perxis-go/pkg/locales/mocks"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestLocalesCache(t *testing.T) {
+
+	const (
+		loc1    = "loc1"
+		loc2    = "loc2"
+		spaceID = "spaceID"
+		size    = 5
+		ttl     = 20 * time.Millisecond
+	)
+
+	ctx := context.Background()
+
+	t.Run("List from Cache", func(t *testing.T) {
+		loc := &locmocks.Locales{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(loc)
+
+		loc.On("List", mock.Anything, spaceID).Return([]*locales.Locale{{ID: loc1, Name: "name1", SpaceID: spaceID}}, nil).Once()
+
+		vl1, err := svc.List(ctx, spaceID)
+		require.NoError(t, err)
+
+		vl2, err := svc.List(ctx, spaceID)
+		require.NoError(t, err)
+		assert.Same(t, vl1[0], vl2[0], "Ожидается что при повторном запросе объекты будут получены из кэша.")
+
+		loc.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("After Delete", func(t *testing.T) {
+			loc := &locmocks.Locales{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(loc)
+
+			loc.On("List", mock.Anything, spaceID).Return([]*locales.Locale{{ID: loc1, Name: "name1", SpaceID: spaceID}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается что при повторном запросе объекты будут получены из кэша.")
+
+			loc.On("Delete", mock.Anything, spaceID, loc1).Return(nil).Once()
+
+			err = svc.Delete(ctx, spaceID, loc1)
+			require.NoError(t, err)
+
+			loc.On("List", mock.Anything, spaceID).Return([]*locales.Locale{}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Len(t, vl3, 0, "Ожидается что после удаление объекты будут удалены из кеша.")
+
+			loc.AssertExpectations(t)
+		})
+
+		t.Run("After Create", func(t *testing.T) {
+			loc := &locmocks.Locales{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(loc)
+
+			loc.On("List", mock.Anything, spaceID).Return([]*locales.Locale{{ID: loc1, Name: "name1", SpaceID: spaceID}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается что при повторном запросе объекты будут получены из кэша.")
+
+			loc.On("Create", mock.Anything, mock.Anything).Return(&locales.Locale{ID: loc2, Name: "name2", SpaceID: spaceID}, nil).Once()
+
+			_, err = svc.Create(ctx, &locales.Locale{ID: loc2, Name: "name2", SpaceID: spaceID})
+			require.NoError(t, err)
+
+			loc.On("List", mock.Anything, spaceID).
+				Return([]*locales.Locale{
+					{ID: loc1, Name: "name1", SpaceID: spaceID},
+					{ID: loc2, Name: "name2", SpaceID: spaceID},
+				}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Len(t, vl3, 2, "Ожидается что после создания нового объекта данные будут удалены из кеша и получены из сервиса.")
+
+			loc.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			loc := &locmocks.Locales{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(loc)
+
+			loc.On("List", mock.Anything, spaceID).Return([]*locales.Locale{{ID: loc1, Name: "name1", SpaceID: spaceID}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается что при повторном запросе объекты будут получены из кэша.")
+
+			time.Sleep(2 * ttl)
+			loc.On("List", mock.Anything, spaceID).Return([]*locales.Locale{{ID: loc1, Name: "name1", SpaceID: spaceID}}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидается что элементы будут получены из кэша.")
+
+			loc.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/locales/middleware/error_logging_middleware.go b/pkg/locales/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..695c91128d6f093d93f022468d464bedbd571e04
--- /dev/null
+++ b/pkg/locales/middleware/error_logging_middleware.go
@@ -0,0 +1,60 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/locales -i Locales -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/locales"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements locales.Locales that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   locales.Locales
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the locales.Locales with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next locales.Locales) locales.Locales {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, locale *locales.Locale) (created *locales.Locale, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, locale)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, spaceId string, localeId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, spaceId, localeId)
+}
+
+func (m *errorLoggingMiddleware) List(ctx context.Context, spaceId string) (locales []*locales.Locale, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.List(ctx, spaceId)
+}
diff --git a/pkg/locales/middleware/logging_middleware.go b/pkg/locales/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb98d25759e87d199e8c65a204f30b3acb48c1f4
--- /dev/null
+++ b/pkg/locales/middleware/logging_middleware.go
@@ -0,0 +1,142 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/locales -i Locales -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/locales"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements locales.Locales that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   locales.Locales
+}
+
+// LoggingMiddleware instruments an implementation of the locales.Locales with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next locales.Locales) locales.Locales {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, locale *locales.Locale) (created *locales.Locale, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"locale": locale} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	created, err = m.next.Create(ctx, locale)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"created": created,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return created, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, spaceId string, localeId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":      ctx,
+		"spaceId":  spaceId,
+		"localeId": localeId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, spaceId, localeId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) List(ctx context.Context, spaceId string) (locales []*locales.Locale, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Request", fields...)
+
+	locales, err = m.next.List(ctx, spaceId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"locales": locales,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Response", fields...)
+
+	return locales, err
+}
diff --git a/pkg/locales/middleware/middleware.go b/pkg/locales/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..726b535247256a109f0fc5aa100e0a61cc928555
--- /dev/null
+++ b/pkg/locales/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/locales -i Locales -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/locales"
+	"go.uber.org/zap"
+)
+
+type Middleware func(locales.Locales) locales.Locales
+
+func WithLog(s locales.Locales, logger *zap.Logger, log_access bool) locales.Locales {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Locales")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/locales/middleware/recovering_middleware.go b/pkg/locales/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..44b198550418034e1963fec5ccbd405e0ea12ef4
--- /dev/null
+++ b/pkg/locales/middleware/recovering_middleware.go
@@ -0,0 +1,67 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/locales -i Locales -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/locales"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements locales.Locales that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   locales.Locales
+}
+
+// RecoveringMiddleware instruments an implementation of the locales.Locales with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next locales.Locales) locales.Locales {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, locale *locales.Locale) (created *locales.Locale, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, locale)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, spaceId string, localeId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, spaceId, localeId)
+}
+
+func (m *recoveringMiddleware) List(ctx context.Context, spaceId string) (locales []*locales.Locale, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.List(ctx, spaceId)
+}
diff --git a/pkg/members/middleware/caching_middleware.go b/pkg/members/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..2faa5ce433281d8d396bb5c912e4e91cfaff727f
--- /dev/null
+++ b/pkg/members/middleware/caching_middleware.go
@@ -0,0 +1,102 @@
+package service
+
+import (
+	"context"
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	service "git.perx.ru/perxis/perxis-go/pkg/members"
+)
+
+func makeKey(ss ...string) string {
+	return strings.Join(ss, "-")
+}
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Members) service.Members {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Members
+}
+
+func (m cachingMiddleware) Set(ctx context.Context, orgId string, userId string, role service.Role) (err error) {
+
+	err = m.next.Set(ctx, orgId, userId, role)
+	if err == nil {
+		m.cache.Remove(makeKey(orgId, userId))
+		m.cache.Remove(makeKey(orgId))
+		m.cache.Remove(makeKey(userId))
+	}
+	return err
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, orgId string, userId string) (role service.Role, err error) {
+
+	key := makeKey(orgId, userId)
+	value, e := m.cache.Get(key)
+	if e == nil {
+		return value.(service.Role), err
+	}
+	role, err = m.next.Get(ctx, orgId, userId)
+	if err == nil {
+		m.cache.Set(key, role)
+	}
+	return role, err
+}
+
+func (m cachingMiddleware) Remove(ctx context.Context, orgId string, userId string) (err error) {
+
+	err = m.next.Remove(ctx, orgId, userId)
+	if err == nil {
+		m.cache.Remove(makeKey(orgId, userId))
+		m.cache.Remove(makeKey(orgId))
+		m.cache.Remove(makeKey(userId))
+	}
+	return err
+}
+
+func (m cachingMiddleware) RemoveAll(ctx context.Context, orgId string) (err error) {
+
+	err = m.next.RemoveAll(ctx, orgId)
+	if err == nil {
+		members, _ := m.ListMembers(ctx, orgId)
+		for _, member := range members {
+			m.cache.Remove(member.UserId)
+			m.cache.Remove(makeKey(orgId, member.UserId))
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) ListMembers(ctx context.Context, orgId string) (members []*service.Member, err error) {
+
+	value, e := m.cache.Get(makeKey(orgId))
+	if e == nil {
+		return value.([]*service.Member), err
+	}
+	members, err = m.next.ListMembers(ctx, orgId)
+	if err == nil {
+		m.cache.Set(makeKey(orgId), members)
+	}
+	return members, err
+}
+
+func (m cachingMiddleware) ListOrganizations(ctx context.Context, userId string) (members []*service.Member, err error) {
+
+	value, e := m.cache.Get(makeKey(userId))
+	if e == nil {
+		return value.([]*service.Member), err
+	}
+	members, err = m.next.ListOrganizations(ctx, userId)
+	if err == nil {
+		m.cache.Set(makeKey(userId), members)
+	}
+	return members, err
+}
diff --git a/pkg/members/middleware/caching_middleware_test.go b/pkg/members/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..1844dc58ebb97b228800a95f9c9cb8b142407fb4
--- /dev/null
+++ b/pkg/members/middleware/caching_middleware_test.go
@@ -0,0 +1,147 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/members"
+	mocksmembers "git.perx.ru/perxis/perxis-go/pkg/members/mocks"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestMembersCache(t *testing.T) {
+
+	const (
+		orgId  = "orgId"
+		userId = "userId"
+		size   = 5
+		ttl    = 20 * time.Millisecond
+	)
+
+	ctx := context.Background()
+
+	t.Run("Get from cache", func(t *testing.T) {
+		mbrs := &mocksmembers.Members{}
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(mbrs)
+
+		mbrs.On("Get", mock.Anything, orgId, userId).Return(members.RoleOwner, nil).Once()
+
+		v1, err := svc.Get(ctx, orgId, userId)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, orgId, userId)
+		require.NoError(t, err)
+		assert.Equal(t, v1, v2, "Ожидается получение объекта из кэша, после повторного запроса get.")
+
+		mbrs.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("After Set", func(t *testing.T) {
+			mbrs := &mocksmembers.Members{}
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(mbrs)
+
+			mbrs.On("Get", mock.Anything, orgId, userId).Return(members.RoleOwner, nil).Once()
+
+			v1, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+			assert.Equal(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			mbrs.On("Set", mock.Anything, orgId, userId, members.RoleMember).Return(nil).Once()
+
+			err = svc.Set(ctx, orgId, userId, members.RoleMember)
+			require.NoError(t, err)
+
+			mbrs.On("Get", mock.Anything, orgId, userId).Return(members.RoleMember, nil).Once()
+
+			v3, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+			assert.NotEqual(t, v2, v3, "Ожидается удаление объекта из кэша и получение заново из сервиса.")
+			mbrs.AssertExpectations(t)
+		})
+
+		t.Run("After Remove", func(t *testing.T) {
+			mbrs := &mocksmembers.Members{}
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(mbrs)
+
+			mbrs.On("Get", mock.Anything, orgId, userId).Return(members.RoleOwner, nil).Once()
+
+			v1, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+			assert.Equal(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			mbrs.On("Remove", mock.Anything, orgId, userId).Return(nil).Once()
+
+			err = svc.Remove(ctx, orgId, userId)
+			require.NoError(t, err)
+
+			mbrs.On("Get", mock.Anything, orgId, userId).Return(members.NotMember, nil).Once()
+
+			v3, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+			assert.NotEqual(t, v2, v3, "Ожидается удаление объекта из кэша после удаления из хранилища и получение заново из сервиса.")
+
+			mbrs.AssertExpectations(t)
+		})
+
+		t.Run("After RemoveAll", func(t *testing.T) {
+			mbrs := &mocksmembers.Members{}
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(mbrs)
+
+			mbrs.On("Get", mock.Anything, orgId, userId).Return(members.RoleOwner, nil).Once()
+			mbrs.On("ListMembers", mock.Anything, orgId).Return([]*members.Member{{OrgId: orgId, UserId: userId, Role: members.RoleOwner}}, nil)
+
+			v1, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+			assert.Equal(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			mbrs.On("RemoveAll", mock.Anything, orgId).Return(nil).Once()
+
+			err = svc.RemoveAll(ctx, orgId)
+			require.NoError(t, err)
+
+			mbrs.On("Get", mock.Anything, orgId, userId).Return(members.NotMember, nil).Once()
+
+			v3, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+			assert.NotEqual(t, v2, v3, "Ожидается удаление объекта из кэша после удаления из хранилища и получение заново из сервиса.")
+
+			mbrs.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			mbrs := &mocksmembers.Members{}
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(mbrs)
+
+			mbrs.On("Get", mock.Anything, orgId, userId).Return(members.RoleOwner, nil).Once()
+
+			v1, err := svc.Get(ctx, orgId, userId)
+
+			v2, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+			assert.Equal(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			time.Sleep(2 * ttl)
+
+			mbrs.On("Get", mock.Anything, orgId, userId).Return(members.RoleMember, nil).Once()
+
+			v3, err := svc.Get(ctx, orgId, userId)
+			require.NoError(t, err)
+			assert.NotEqual(t, v2, v3, "Ожидается удаление объекта из кэша после истечения ttl и получение заново из сервиса.")
+			mbrs.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/members/middleware/error_logging_middleware.go b/pkg/members/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..08d2814bf4fc8f16f0df57462770342a93e191c8
--- /dev/null
+++ b/pkg/members/middleware/error_logging_middleware.go
@@ -0,0 +1,90 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/members -i Members -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/members"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements members.Members that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   members.Members
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the members.Members with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next members.Members) members.Members {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, orgId string, userId string) (role members.Role, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, orgId, userId)
+}
+
+func (m *errorLoggingMiddleware) ListMembers(ctx context.Context, orgId string) (members []*members.Member, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.ListMembers(ctx, orgId)
+}
+
+func (m *errorLoggingMiddleware) ListOrganizations(ctx context.Context, userId string) (organizations []*members.Member, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.ListOrganizations(ctx, userId)
+}
+
+func (m *errorLoggingMiddleware) Remove(ctx context.Context, orgId string, userId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Remove(ctx, orgId, userId)
+}
+
+func (m *errorLoggingMiddleware) RemoveAll(ctx context.Context, orgId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.RemoveAll(ctx, orgId)
+}
+
+func (m *errorLoggingMiddleware) Set(ctx context.Context, orgId string, userId string, role members.Role) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Set(ctx, orgId, userId, role)
+}
diff --git a/pkg/members/middleware/logging_middleware.go b/pkg/members/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b9ead6d06d694945c89638c52d4bce07e4ee938
--- /dev/null
+++ b/pkg/members/middleware/logging_middleware.go
@@ -0,0 +1,251 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/members -i Members -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/members"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements members.Members that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   members.Members
+}
+
+// LoggingMiddleware instruments an implementation of the members.Members with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next members.Members) members.Members {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, orgId string, userId string) (role members.Role, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"orgId":  orgId,
+		"userId": userId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	role, err = m.next.Get(ctx, orgId, userId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"role": role,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return role, err
+}
+
+func (m *loggingMiddleware) ListMembers(ctx context.Context, orgId string) (members []*members.Member, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":   ctx,
+		"orgId": orgId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListMembers.Request", fields...)
+
+	members, err = m.next.ListMembers(ctx, orgId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"members": members,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListMembers.Response", fields...)
+
+	return members, err
+}
+
+func (m *loggingMiddleware) ListOrganizations(ctx context.Context, userId string) (organizations []*members.Member, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"userId": userId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListOrganizations.Request", fields...)
+
+	organizations, err = m.next.ListOrganizations(ctx, userId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"organizations": organizations,
+		"err":           err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("ListOrganizations.Response", fields...)
+
+	return organizations, err
+}
+
+func (m *loggingMiddleware) Remove(ctx context.Context, orgId string, userId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"orgId":  orgId,
+		"userId": userId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Remove.Request", fields...)
+
+	err = m.next.Remove(ctx, orgId, userId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Remove.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) RemoveAll(ctx context.Context, orgId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":   ctx,
+		"orgId": orgId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("RemoveAll.Request", fields...)
+
+	err = m.next.RemoveAll(ctx, orgId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("RemoveAll.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Set(ctx context.Context, orgId string, userId string, role members.Role) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"orgId":  orgId,
+		"userId": userId,
+		"role":   role} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Set.Request", fields...)
+
+	err = m.next.Set(ctx, orgId, userId, role)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Set.Response", fields...)
+
+	return err
+}
diff --git a/pkg/members/middleware/middleware.go b/pkg/members/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..1aa0cfbe798f5688587ab8e4f5d4e166383d8d92
--- /dev/null
+++ b/pkg/members/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/members -i Members -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/members"
+	"go.uber.org/zap"
+)
+
+type Middleware func(members.Members) members.Members
+
+func WithLog(s members.Members, logger *zap.Logger, log_access bool) members.Members {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Members")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/members/middleware/recovering_middleware.go b/pkg/members/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec6db9f480d35215c47bff6fceef8c0e01a77447
--- /dev/null
+++ b/pkg/members/middleware/recovering_middleware.go
@@ -0,0 +1,103 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/members -i Members -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/members"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements members.Members that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   members.Members
+}
+
+// RecoveringMiddleware instruments an implementation of the members.Members with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next members.Members) members.Members {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, orgId string, userId string) (role members.Role, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, orgId, userId)
+}
+
+func (m *recoveringMiddleware) ListMembers(ctx context.Context, orgId string) (members []*members.Member, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.ListMembers(ctx, orgId)
+}
+
+func (m *recoveringMiddleware) ListOrganizations(ctx context.Context, userId string) (organizations []*members.Member, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.ListOrganizations(ctx, userId)
+}
+
+func (m *recoveringMiddleware) Remove(ctx context.Context, orgId string, userId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Remove(ctx, orgId, userId)
+}
+
+func (m *recoveringMiddleware) RemoveAll(ctx context.Context, orgId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.RemoveAll(ctx, orgId)
+}
+
+func (m *recoveringMiddleware) Set(ctx context.Context, orgId string, userId string, role members.Role) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Set(ctx, orgId, userId, role)
+}
diff --git a/pkg/optional/optional.go b/pkg/optional/optional.go
new file mode 100644
index 0000000000000000000000000000000000000000..94e89bf6a04708abf853f2e8aaf8d7dbd9e99371
--- /dev/null
+++ b/pkg/optional/optional.go
@@ -0,0 +1,10 @@
+package optional
+
+var (
+	True  *bool = Bool(true)
+	False *bool = Bool(false)
+)
+
+func Bool(v bool) *bool {
+	return &v
+}
diff --git a/pkg/organizations/middleware/caching_middleware.go b/pkg/organizations/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..2017c9913b41ae8fbeb05e116abdda037fb5667e
--- /dev/null
+++ b/pkg/organizations/middleware/caching_middleware.go
@@ -0,0 +1,62 @@
+package service
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	service "git.perx.ru/perxis/perxis-go/pkg/organizations"
+)
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Organizations) service.Organizations {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Organizations
+}
+
+func (m cachingMiddleware) Create(ctx context.Context, org *service.Organization) (organization *service.Organization, err error) {
+	return m.next.Create(ctx, org)
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, orgId string) (organization *service.Organization, err error) {
+
+	value, e := m.cache.Get(orgId)
+	if e == nil {
+		return value.(*service.Organization), err
+	}
+	organization, err = m.next.Get(ctx, orgId)
+	if err == nil {
+		m.cache.Set(orgId, organization)
+	}
+	return organization, err
+}
+
+func (m cachingMiddleware) Update(ctx context.Context, org *service.Organization) (err error) {
+
+	err = m.next.Update(ctx, org)
+	if err == nil {
+		m.cache.Remove(org.ID)
+	}
+	return err
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, orgId string) (err error) {
+
+	err = m.next.Delete(ctx, orgId)
+	if err == nil {
+		m.cache.Remove(orgId)
+	}
+	return err
+}
+
+func (m cachingMiddleware) Find(ctx context.Context, filter *service.Filter, opts *options.FindOptions) (organizations []*service.Organization, total int, err error) {
+	return m.next.Find(ctx, filter, opts)
+}
diff --git a/pkg/organizations/middleware/caching_middleware_test.go b/pkg/organizations/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..59248ded4d85f904bf67e21956ad2df355706cdf
--- /dev/null
+++ b/pkg/organizations/middleware/caching_middleware_test.go
@@ -0,0 +1,119 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/organizations"
+	mocksorgs "git.perx.ru/perxis/perxis-go/pkg/organizations/mocks"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestOrganizationsCache(t *testing.T) {
+
+	const (
+		orgId = "orgId"
+		size  = 5
+		ttl   = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	ctx := context.Background()
+
+	t.Run("Get from cache", func(t *testing.T) {
+		orgs := &mocksorgs.Organizations{}
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(orgs)
+
+		orgs.On("Get", mock.Anything, orgId).Return(&organizations.Organization{ID: orgId, Name: "Organization"}, nil).Once()
+
+		v1, err := svc.Get(ctx, orgId)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, orgId)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+		orgs.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("After Update", func(t *testing.T) {
+			orgs := &mocksorgs.Organizations{}
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(orgs)
+
+			orgs.On("Get", mock.Anything, orgId).Return(&organizations.Organization{ID: orgId, Name: "Organization"}, nil).Once()
+
+			v1, err := svc.Get(ctx, orgId)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, orgId)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			orgs.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+			err = svc.Update(ctx, &organizations.Organization{ID: orgId, Name: "OrganizationUPD"})
+
+			orgs.On("Get", mock.Anything, orgId).Return(&organizations.Organization{ID: orgId, Name: "OrganizationUPD"}, nil).Once()
+
+			v3, err := svc.Get(ctx, orgId)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается удаление объекта из кэша после обновления и получение заново из сервиса.")
+
+			orgs.AssertExpectations(t)
+		})
+
+		t.Run("After Delete", func(t *testing.T) {
+			orgs := &mocksorgs.Organizations{}
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(orgs)
+
+			orgs.On("Get", mock.Anything, orgId).Return(&organizations.Organization{ID: orgId, Name: "Organization"}, nil).Once()
+
+			v1, err := svc.Get(ctx, orgId)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, orgId)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			orgs.On("Delete", mock.Anything, mock.Anything).Return(nil).Once()
+			err = svc.Delete(ctx, orgId)
+
+			orgs.On("Get", mock.Anything, orgId).Return(nil, errNotFound).Once()
+
+			_, err = svc.Get(ctx, orgId)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается удаление объекта из кэша после удаления из хранилища и получение ошибки от сервиса.")
+
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			orgs := &mocksorgs.Organizations{}
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(orgs)
+
+			orgs.On("Get", mock.Anything, orgId).Return(&organizations.Organization{ID: orgId, Name: "Organization"}, nil).Once()
+
+			v1, err := svc.Get(ctx, orgId)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, orgId)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			time.Sleep(2 * ttl)
+
+			orgs.On("Get", mock.Anything, orgId).Return(&organizations.Organization{ID: orgId, Name: "Organization"}, nil).Once()
+
+			v3, err := svc.Get(ctx, orgId)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается удаление объекта из кэша и получение заново из сервиса.")
+
+			orgs.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/organizations/middleware/error_logging_middleware.go b/pkg/organizations/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..c9631f9144db6244044379219250ca334893d0d6
--- /dev/null
+++ b/pkg/organizations/middleware/error_logging_middleware.go
@@ -0,0 +1,81 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/organizations -i Organizations -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"git.perx.ru/perxis/perxis-go/pkg/organizations"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements organizations.Organizations that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   organizations.Organizations
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the organizations.Organizations with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next organizations.Organizations) organizations.Organizations {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, org *organizations.Organization) (created *organizations.Organization, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, org)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, orgId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, orgId)
+}
+
+func (m *errorLoggingMiddleware) Find(ctx context.Context, filter *organizations.Filter, opts *options.FindOptions) (orgs []*organizations.Organization, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Find(ctx, filter, opts)
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, orgId string) (org *organizations.Organization, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, orgId)
+}
+
+func (m *errorLoggingMiddleware) Update(ctx context.Context, org *organizations.Organization) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Update(ctx, org)
+}
diff --git a/pkg/organizations/middleware/logging_middleware.go b/pkg/organizations/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..6f33296d5cecbee19be4bff65da4280486cd1958
--- /dev/null
+++ b/pkg/organizations/middleware/logging_middleware.go
@@ -0,0 +1,215 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/organizations -i Organizations -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"git.perx.ru/perxis/perxis-go/pkg/organizations"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements organizations.Organizations that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   organizations.Organizations
+}
+
+// LoggingMiddleware instruments an implementation of the organizations.Organizations with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next organizations.Organizations) organizations.Organizations {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, org *organizations.Organization) (created *organizations.Organization, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx": ctx,
+		"org": org} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	created, err = m.next.Create(ctx, org)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"created": created,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return created, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, orgId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":   ctx,
+		"orgId": orgId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, orgId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Find(ctx context.Context, filter *organizations.Filter, opts *options.FindOptions) (orgs []*organizations.Organization, total int, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"filter": filter,
+		"opts":   opts} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Find.Request", fields...)
+
+	orgs, total, err = m.next.Find(ctx, filter, opts)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"orgs":  orgs,
+		"total": total,
+		"err":   err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Find.Response", fields...)
+
+	return orgs, total, err
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, orgId string) (org *organizations.Organization, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":   ctx,
+		"orgId": orgId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	org, err = m.next.Get(ctx, orgId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"org": org,
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return org, err
+}
+
+func (m *loggingMiddleware) Update(ctx context.Context, org *organizations.Organization) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx": ctx,
+		"org": org} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Request", fields...)
+
+	err = m.next.Update(ctx, org)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Response", fields...)
+
+	return err
+}
diff --git a/pkg/organizations/middleware/middleware.go b/pkg/organizations/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe3c3d645e19cc9a0c46742be61edec6bda3882e
--- /dev/null
+++ b/pkg/organizations/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/organizations -i Organizations -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/organizations"
+	"go.uber.org/zap"
+)
+
+type Middleware func(organizations.Organizations) organizations.Organizations
+
+func WithLog(s organizations.Organizations, logger *zap.Logger, log_access bool) organizations.Organizations {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Organizations")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/organizations/middleware/recovering_middleware.go b/pkg/organizations/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..35f3a6c5f7333f7fa3664c7101d31b72be270d33
--- /dev/null
+++ b/pkg/organizations/middleware/recovering_middleware.go
@@ -0,0 +1,92 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/organizations -i Organizations -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"git.perx.ru/perxis/perxis-go/pkg/organizations"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements organizations.Organizations that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   organizations.Organizations
+}
+
+// RecoveringMiddleware instruments an implementation of the organizations.Organizations with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next organizations.Organizations) organizations.Organizations {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, org *organizations.Organization) (created *organizations.Organization, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, org)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, orgId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, orgId)
+}
+
+func (m *recoveringMiddleware) Find(ctx context.Context, filter *organizations.Filter, opts *options.FindOptions) (orgs []*organizations.Organization, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Find(ctx, filter, opts)
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, orgId string) (org *organizations.Organization, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, orgId)
+}
+
+func (m *recoveringMiddleware) Update(ctx context.Context, org *organizations.Organization) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Update(ctx, org)
+}
diff --git a/pkg/references/field.go b/pkg/references/field.go
new file mode 100644
index 0000000000000000000000000000000000000000..c98ed2298150e012545d55d5364f62c4f44eac1b
--- /dev/null
+++ b/pkg/references/field.go
@@ -0,0 +1,143 @@
+package references
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+
+	"git.perx.ru/perxis/perxis-go/pkg/data"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/field"
+)
+
+const ReferenceTypeName = "reference"
+
+type ReferenceParameters struct {
+	AllowedCollections []string `json:"allowedCollections"`
+}
+
+func (p ReferenceParameters) Type() field.Type { return &ReferenceType{} }
+
+func (p ReferenceParameters) Clone(reset bool) field.Parameters {
+	if p.AllowedCollections != nil {
+		cols := make([]string, 0, len(p.AllowedCollections))
+		for _, c := range p.AllowedCollections {
+			cols = append(cols, c)
+		}
+		p.AllowedCollections = cols
+	}
+	return &p
+}
+
+type ReferenceType struct{}
+
+func NewReferenceType() *ReferenceType {
+	return &ReferenceType{}
+}
+
+func (t ReferenceType) Name() string { return ReferenceTypeName }
+
+func (t *ReferenceType) NewParameters() field.Parameters {
+	return &ReferenceParameters{}
+}
+
+func (t ReferenceType) Decode(_ context.Context, fld *field.Field, v interface{}) (interface{}, error) {
+	if v == nil {
+		return nil, nil
+	}
+
+	r, ok := v.(map[string]interface{})
+	if !ok {
+		return nil, errors.Errorf("ReferenceField decode error: incorrect type: \"%s\", expected \"map[string]interface{}\"", reflect.ValueOf(v).Kind())
+	}
+	id, ok1 := r["id"].(string)
+	if !ok1 || id == "" {
+		return nil, errors.Errorf("ReferenceField decode error: field \"id\" required")
+	}
+	collID, ok2 := r["collection_id"].(string)
+	if !ok2 || collID == "" {
+		return nil, errors.Errorf("ReferenceField decode error: field \"collection_id\" required")
+	}
+	disabled, _ := r["disabled"].(bool)
+
+	return &Reference{ID: id, CollectionID: collID, Disabled: disabled}, nil
+}
+
+func (t ReferenceType) Encode(_ context.Context, fld *field.Field, v interface{}) (interface{}, error) {
+	if v == nil {
+		return nil, nil
+	}
+
+	val, ok := v.(*Reference)
+
+	if !ok {
+		return nil, errors.Errorf("ReferenceField encode error: incorrect type: \"%s\", expected \"*Reference\"", reflect.ValueOf(v).Kind())
+	}
+	if val == nil {
+		return nil, nil
+	}
+	ref := map[string]interface{}{
+		"id":            val.ID,
+		"collection_id": val.CollectionID,
+		"disabled":      val.Disabled,
+	}
+	return ref, nil
+}
+
+func (t *ReferenceType) PreSave(ctx context.Context, f *field.Field, v interface{}, itemCtx *items.Context) (interface{}, bool, error) {
+	params, ok := f.Params.(*ReferenceParameters)
+	if !ok {
+		return nil, false, errors.New("field parameters required")
+	}
+
+	if v == nil {
+		return nil, false, nil
+	}
+	ref, ok := v.(*Reference)
+	if !ok {
+		return nil, false, fmt.Errorf("ReferenceField presave error: incorrect type: \"%s\", expected \"*Reference\"", reflect.ValueOf(v).Kind())
+	}
+
+	// Проверка на наличие ссылок заданных типов
+	if len(params.AllowedCollections) > 0 {
+		ok := false
+		for _, allowed := range params.AllowedCollections {
+
+			if data.GlobMatch(ref.CollectionID, allowed) {
+				ok = true
+				break
+			}
+		}
+		if !ok {
+			return nil, false, errors.Errorf("usage of collection \"%s\" not allowed", ref.CollectionID)
+		}
+	}
+
+	return ref, false, nil
+}
+
+// Field - создает новое поле Field типа ReferenceType
+// ReferenceType должен быть предварительно создан через `NewReferenceType` и зарегистрирован `field.Register`
+func Field(allowedColls []string, o ...interface{}) *field.Field {
+	_, ok := field.GetType(ReferenceTypeName)
+	if !ok {
+		panic("field reference type not registered")
+	}
+
+	return field.NewField(&ReferenceParameters{AllowedCollections: allowedColls}, o...)
+}
+
+func (t *ReferenceType) IsEmpty(v interface{}) bool {
+	if v == nil {
+		return true
+	}
+
+	ref, ok := v.(*Reference)
+
+	return !ok || ref.ID == "" && ref.CollectionID == ""
+}
+
+func init() {
+	field.Register(NewReferenceType())
+}
diff --git a/pkg/references/field_test.go b/pkg/references/field_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a51cb2ac65a3483cbe58169421734f17b6dce20c
--- /dev/null
+++ b/pkg/references/field_test.go
@@ -0,0 +1,300 @@
+package references
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"testing"
+
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/items/mocks"
+	"git.perx.ru/perxis/perxis-go/pkg/schema"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/field"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/validate"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestReferenceField_Decode(t *testing.T) {
+
+	tests := []struct {
+		name    string
+		field   *field.Field
+		data    interface{}
+		want    interface{}
+		wantErr bool
+	}{
+		{
+			"Correct",
+			Field(nil),
+			map[string]interface{}{"collection_id": "media", "id": "11111111"},
+			&Reference{ID: "11111111", CollectionID: "media"},
+			false,
+		},
+		{
+			"Invalid CollectionID",
+			Field(nil),
+			map[string]interface{}{"collection_id": "media", "id": 11111111},
+			"decode error: ReferenceField decode error: field \"id\" required",
+			true,
+		},
+		{
+			"Extra Field",
+			Field(nil),
+			map[string]interface{}{"collection_id": "media", "id": "11111111", "extra": true},
+			&Reference{ID: "11111111", CollectionID: "media"},
+			false,
+		},
+		{
+			"Enabled",
+			Field(nil),
+			map[string]interface{}{"collection_id": "media", "id": "11111111", "disabled": true},
+			&Reference{ID: "11111111", CollectionID: "media", Disabled: true},
+			false,
+		},
+		{
+			"Disabled",
+			Field(nil),
+			map[string]interface{}{"collection_id": "media", "id": "11111111", "disabled": false},
+			&Reference{ID: "11111111", CollectionID: "media", Disabled: false},
+			false,
+		},
+		{
+			"Disabled wrong type",
+			Field(nil),
+			map[string]interface{}{"collection_id": "media", "id": "11111111", "disabled": 3},
+			&Reference{ID: "11111111", CollectionID: "media", Disabled: false},
+			false,
+		},
+		{
+			"Missing Field",
+			Field(nil),
+			map[string]interface{}{"id": "11111111"},
+			"decode error: ReferenceField decode error: field \"collection_id\" required",
+			true,
+		},
+		{
+			"Invalid type",
+			Field(nil),
+			"string",
+			"decode error: ReferenceField decode error: incorrect type: \"string\", expected \"map[string]interface{}\"",
+			true,
+		},
+		{
+			"Invalid  element type",
+			Field(nil),
+			[]interface{}{"string"},
+			"decode error: ReferenceField decode error: incorrect type: \"slice\", expected \"map[string]interface{}\"",
+			true,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := schema.Decode(nil, tt.field, tt.data)
+			if tt.wantErr {
+				require.Error(t, err)
+				assert.EqualError(t, err, tt.want.(string), fmt.Sprintf("Decode() error = %v, want %v", err, tt.want.(string)))
+			}
+			if !tt.wantErr {
+				require.NoError(t, err)
+				assert.Equal(t, got, tt.want, fmt.Sprintf("Decode() got = %v, want %v", got, tt.want))
+			}
+		})
+	}
+}
+
+func TestReferenceField_Encode(t *testing.T) {
+
+	tests := []struct {
+		name    string
+		field   *field.Field
+		data    interface{}
+		want    interface{}
+		wantErr bool
+	}{
+		{
+			"Correct",
+			Field(nil),
+			&Reference{ID: "11111111", CollectionID: "media"},
+			map[string]interface{}{"collection_id": "media", "id": "11111111", "disabled": false},
+			false,
+		},
+		{
+			"Enabled",
+			Field(nil),
+			&Reference{ID: "11111111", CollectionID: "media", Disabled: true},
+			map[string]interface{}{"collection_id": "media", "id": "11111111", "disabled": true},
+			false,
+		},
+		{
+			"Disabled",
+			Field(nil),
+			&Reference{ID: "11111111", CollectionID: "media", Disabled: false},
+			map[string]interface{}{"collection_id": "media", "id": "11111111", "disabled": false},
+			false,
+		},
+		{
+			"From Map",
+			Field(nil),
+			map[string]interface{}{"id": "11111111", "collection_id": "media"},
+			"encode error: ReferenceField encode error: incorrect type: \"map\", expected \"*Reference\"",
+			true,
+		},
+		{
+			"Invalid type",
+			Field(nil),
+			"string",
+			"encode error: ReferenceField encode error: incorrect type: \"string\", expected \"*Reference\"",
+			true,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := schema.Encode(nil, tt.field, tt.data)
+			if tt.wantErr {
+				require.Error(t, err)
+				assert.EqualError(t, err, tt.want.(string), fmt.Sprintf("Encode() error = %v, want %v", err, tt.want.(string)))
+			}
+			if !tt.wantErr {
+				require.NoError(t, err)
+				assert.Equal(t, got, tt.want, fmt.Sprintf("Encode() got = %v, want %v", got, tt.want))
+			}
+		})
+	}
+}
+
+func TestReferenceField_PreSave(t *testing.T) {
+	ctx := context.Background()
+	rt := NewReferenceType()
+
+	t.Run("Nil Value", func(t *testing.T) {
+		svc := &mocks.Items{}
+		itemCtx := &items.Context{Items: svc, SpaceID: "sp", EnvID: "env"}
+		f := Field(nil)
+		_, _, err := rt.PreSave(ctx, f, nil, itemCtx)
+		require.NoError(t, err)
+		svc.AssertExpectations(t)
+	})
+	t.Run("Nil Context", func(t *testing.T) {
+		svc := &mocks.Items{}
+		f := Field(nil)
+		ref := &Reference{
+			ID: "111", CollectionID: "media",
+		}
+		_, _, err := rt.PreSave(ctx, f, ref, nil)
+		require.NoError(t, err)
+		svc.AssertExpectations(t)
+	})
+	t.Run("Referenced Items Exist", func(t *testing.T) {
+		t.Run("Correct", func(t *testing.T) {
+			svc := &mocks.Items{}
+			itemCtx := &items.Context{Items: svc, SpaceID: "sp", EnvID: "env"}
+
+			f := Field(nil)
+			ref := &Reference{
+				ID: "111", CollectionID: "media",
+			}
+			_, _, err := rt.PreSave(ctx, f, ref, itemCtx)
+			require.NoError(t, err)
+			svc.AssertExpectations(t)
+		})
+		t.Run("Item Not Found", func(t *testing.T) {
+			svc := &mocks.Items{}
+
+			itemCtx := &items.Context{Items: svc, SpaceID: "sp", EnvID: "env"}
+
+			f := Field(nil)
+			ref := &Reference{
+				ID: "111", CollectionID: "media",
+			}
+			_, _, err := rt.PreSave(ctx, f, ref, itemCtx)
+			require.NoError(t, err)
+			svc.AssertExpectations(t)
+		})
+	})
+	t.Run("Allowed Types", func(t *testing.T) {
+		t.Run("Correct", func(t *testing.T) {
+			svc := &mocks.Items{}
+
+			itemCtx := &items.Context{Items: svc, SpaceID: "sp", EnvID: "env"}
+
+			f := Field([]string{"media"})
+
+			ref := &Reference{
+				ID: "111", CollectionID: "media",
+			}
+
+			_, _, err := rt.PreSave(ctx, f, ref, itemCtx)
+			require.NoError(t, err)
+			svc.AssertExpectations(t)
+		})
+		t.Run("Not Allowed", func(t *testing.T) {
+			svc := &mocks.Items{}
+			f := Field([]string{"cars", "motorbikes"})
+			itemCtx := &items.Context{Items: svc, SpaceID: "sp", EnvID: "env"}
+			ref := &Reference{
+				ID: "111", CollectionID: "media",
+			}
+			_, _, err := rt.PreSave(ctx, f, ref, itemCtx)
+			require.Error(t, err)
+			assert.Equal(t, "usage of collection \"media\" not allowed", err.Error())
+			svc.AssertExpectations(t)
+		})
+	})
+}
+
+func TestReferenceField_Validate(t *testing.T) {
+	rt := NewReferenceType()
+	field.Register(rt)
+
+	t.Run("Max Elements", func(t *testing.T) {
+		t.Run("Correct", func(t *testing.T) {
+			f := Field(nil, validate.MaxItems(1))
+			refs := []*Reference{
+				{ID: "111", CollectionID: "media"},
+			}
+			err := validate.Validate(nil, f, refs)
+			require.NoError(t, err)
+		})
+		t.Run("Limit exceeded", func(t *testing.T) {
+			f := Field(nil, validate.MaxItems(1))
+			refs := []*Reference{
+				{ID: "111", CollectionID: "media"},
+				{ID: "222", CollectionID: "media"},
+			}
+			err := validate.Validate(nil, f, refs)
+			require.Error(t, err)
+			require.Contains(t, err.Error(), "validation error: maximum elements number is 1")
+		})
+	})
+	t.Run("Required", func(t *testing.T) {
+		t.Run("Correct", func(t *testing.T) {
+			f := Field(nil, validate.Required())
+			ref := &Reference{ID: "111", CollectionID: "media"}
+			err := validate.Validate(nil, f, ref)
+			require.NoError(t, err)
+		})
+		t.Run("Empty", func(t *testing.T) {
+			f := Field(nil, validate.Required())
+			ref := &Reference{}
+			err := validate.Validate(nil, f, ref)
+			require.Error(t, err)
+			require.Contains(t, err.Error(), "validation error: value is required")
+		})
+	})
+}
+
+func TestReference_JSON(t *testing.T) {
+	fld := Field([]string{"col1"}).AddOptions(validate.MaxItems(2))
+
+	b, err := json.MarshalIndent(fld, "", "  ")
+	require.NoError(t, err)
+
+	res := field.NewField(nil)
+	err = json.Unmarshal(b, res)
+	require.NoError(t, err)
+
+	assert.Equal(t, fld, res)
+}
diff --git a/pkg/references/middleware/client_encode_middleware.go b/pkg/references/middleware/client_encode_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..1cd9cb2f22340a5d90feb95bd00accb1bae39e0f
--- /dev/null
+++ b/pkg/references/middleware/client_encode_middleware.go
@@ -0,0 +1,42 @@
+package service
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/collections"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/references"
+)
+
+// ClientEncodeMiddleware выполняет операции encode/decode для передаваемых данных
+func ClientEncodeMiddleware(colls collections.Collections) Middleware {
+	return func(refs references.References) references.References {
+		return &encodeDecodeMiddleware{
+			next:  refs,
+			colls: colls,
+		}
+	}
+}
+
+type encodeDecodeMiddleware struct {
+	next  references.References
+	colls collections.Collections
+}
+
+func (m *encodeDecodeMiddleware) Get(ctx context.Context, spaceId, envId string, refs []*references.Reference) (items []*items.Item, notfound []*references.Reference, err error) {
+	items, notfound, err = m.next.Get(ctx, spaceId, envId, refs)
+	if err == nil && len(items) > 0 {
+		for i, item := range items {
+			col, err := m.colls.Get(ctx, item.SpaceID, item.EnvID, item.CollectionID)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			if item, err = item.Decode(ctx, col.Schema); err != nil {
+				return nil, nil, err
+			}
+			items[i] = item
+		}
+	}
+	return
+}
diff --git a/pkg/references/middleware/error_logging_middleware.go b/pkg/references/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..9a62947f240842c6afb099a9b03cc966d9bd99b5
--- /dev/null
+++ b/pkg/references/middleware/error_logging_middleware.go
@@ -0,0 +1,41 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/references -i References -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/references"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements references.References that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   references.References
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the references.References with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next references.References) references.References {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, spaceId string, envId string, references []*references.Reference) (items []*items.Item, notfound []*references.Reference, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, spaceId, envId, references)
+}
diff --git a/pkg/references/middleware/logging_middleware.go b/pkg/references/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..62a0e9d5cf26c6ab0d131fff2fd443cffd8fb8f3
--- /dev/null
+++ b/pkg/references/middleware/logging_middleware.go
@@ -0,0 +1,74 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/references -i References -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/references"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements references.References that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   references.References
+}
+
+// LoggingMiddleware instruments an implementation of the references.References with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next references.References) references.References {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, spaceId string, envId string, references []*references.Reference) (items []*items.Item, notfound []*references.Reference, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":        ctx,
+		"spaceId":    spaceId,
+		"envId":      envId,
+		"references": references} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	items, notfound, err = m.next.Get(ctx, spaceId, envId, references)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"items":    items,
+		"notfound": notfound,
+		"err":      err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return items, notfound, err
+}
diff --git a/pkg/references/middleware/middleware.go b/pkg/references/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..dfed8dc0c821998df97d414876b6839a9961f6b0
--- /dev/null
+++ b/pkg/references/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/references -i References -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/references"
+	"go.uber.org/zap"
+)
+
+type Middleware func(references.References) references.References
+
+func WithLog(s references.References, logger *zap.Logger, log_access bool) references.References {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("References")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/references/middleware/recovering_middleware.go b/pkg/references/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..b1a4eb6b629e5a48f7508edbe34f5450d5f52b10
--- /dev/null
+++ b/pkg/references/middleware/recovering_middleware.go
@@ -0,0 +1,44 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/references -i References -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	"git.perx.ru/perxis/perxis-go/pkg/references"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements references.References that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   references.References
+}
+
+// RecoveringMiddleware instruments an implementation of the references.References with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next references.References) references.References {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, spaceId string, envId string, references []*references.Reference) (items []*items.Item, notfound []*references.Reference, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, spaceId, envId, references)
+}
diff --git a/pkg/references/mocks/References.go b/pkg/references/mocks/References.go
new file mode 100644
index 0000000000000000000000000000000000000000..c1ead2b49f11fd6492e3ce8f9eb5319cf3b208bc
--- /dev/null
+++ b/pkg/references/mocks/References.go
@@ -0,0 +1,63 @@
+// Code generated by mockery v2.14.0. DO NOT EDIT.
+
+package mocks
+
+import (
+	context "context"
+
+	items "git.perx.ru/perxis/perxis-go/pkg/items"
+	references "git.perx.ru/perxis/perxis-go/pkg/references"
+	mock "github.com/stretchr/testify/mock"
+)
+
+// References is an autogenerated mock type for the References type
+type References struct {
+	mock.Mock
+}
+
+// Get provides a mock function with given fields: ctx, spaceId, envId, _a3
+func (_m *References) Get(ctx context.Context, spaceId string, envId string, _a3 []*references.Reference) ([]*items.Item, []*references.Reference, error) {
+	ret := _m.Called(ctx, spaceId, envId, _a3)
+
+	var r0 []*items.Item
+	if rf, ok := ret.Get(0).(func(context.Context, string, string, []*references.Reference) []*items.Item); ok {
+		r0 = rf(ctx, spaceId, envId, _a3)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).([]*items.Item)
+		}
+	}
+
+	var r1 []*references.Reference
+	if rf, ok := ret.Get(1).(func(context.Context, string, string, []*references.Reference) []*references.Reference); ok {
+		r1 = rf(ctx, spaceId, envId, _a3)
+	} else {
+		if ret.Get(1) != nil {
+			r1 = ret.Get(1).([]*references.Reference)
+		}
+	}
+
+	var r2 error
+	if rf, ok := ret.Get(2).(func(context.Context, string, string, []*references.Reference) error); ok {
+		r2 = rf(ctx, spaceId, envId, _a3)
+	} else {
+		r2 = ret.Error(2)
+	}
+
+	return r0, r1, r2
+}
+
+type mockConstructorTestingTNewReferences interface {
+	mock.TestingT
+	Cleanup(func())
+}
+
+// NewReferences creates a new instance of References. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewReferences(t mockConstructorTestingTNewReferences) *References {
+	mock := &References{}
+	mock.Mock.Test(t)
+
+	t.Cleanup(func() { mock.AssertExpectations(t) })
+
+	return mock
+}
diff --git a/pkg/references/reference.go b/pkg/references/reference.go
new file mode 100644
index 0000000000000000000000000000000000000000..12dac1865a17a39ac6876aa4390f89d408e4f76c
--- /dev/null
+++ b/pkg/references/reference.go
@@ -0,0 +1,102 @@
+package references
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	pb "git.perx.ru/perxis/perxis-go/proto/references"
+	"go.mongodb.org/mongo-driver/bson"
+)
+
+type Reference struct {
+	ID           string `json:"id" bson:"id" mapstructure:"id"`
+	CollectionID string `json:"collection_id" bson:"collection_id" mapstructure:"collection_id"`
+	Disabled     bool   `json:"disabled,omitempty" bson:"disabled,omitempty" mapstructure:"disabled"`
+}
+
+func (r *Reference) MarshalBSON() ([]byte, error) {
+	d := bson.D{
+		{Key: "id", Value: r.ID},
+		{Key: "collection_id", Value: r.CollectionID},
+	}
+
+	if r.Disabled {
+		d = append(d, bson.E{Key: "disabled", Value: true})
+	}
+
+	return bson.Marshal(d)
+}
+
+func ReferenceFromPB(refPB *pb.Reference) *Reference {
+	return &Reference{
+		ID:           refPB.Id,
+		CollectionID: refPB.CollectionId,
+		Disabled:     refPB.Disabled,
+	}
+}
+
+func ReferenceFromItem(item *items.Item) *Reference {
+	if item == nil {
+		return nil
+	}
+
+	return &Reference{
+		ID:           item.ID,
+		CollectionID: item.CollectionID,
+	}
+}
+
+func ReferenceToPB(ref *Reference) *pb.Reference {
+	return &pb.Reference{
+		Id:           ref.ID,
+		CollectionId: ref.CollectionID,
+		Disabled:     ref.Disabled,
+	}
+}
+
+func ReferenceListFromPB(listPB []*pb.Reference) []*Reference {
+	list := make([]*Reference, 0, len(listPB))
+	for _, refPB := range listPB {
+		list = append(list, ReferenceFromPB(refPB))
+	}
+	return list
+}
+
+func (r *Reference) String() string {
+	if r == nil {
+		return ""
+	}
+	return r.CollectionID + "." + r.ID
+}
+
+func (r *Reference) Equal(r1 *Reference) bool {
+	return r == r1 || r != nil && r1 != nil && r.CollectionID == r1.CollectionID && r.ID == r1.ID && r.Disabled == r1.Disabled
+}
+
+func EqualArrays(sr1, sr2 []*Reference) bool {
+	if len(sr1) != len(sr2) {
+		return false
+	}
+	for i, r := range sr1 {
+		if !r.Equal(sr2[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+func (r *Reference) IsValid() bool {
+	return r != nil && r.ID != "" && r.CollectionID != "" && !r.Disabled
+}
+
+func (r *Reference) Fetch(i interface{}) interface{} {
+	p, _ := i.(string)
+	switch p {
+	case "id":
+		return r.ID
+	case "collection_id":
+		return r.CollectionID
+	case "disabled":
+		return r.Disabled
+	default:
+		panic("unknown parameter")
+	}
+}
diff --git a/pkg/references/service.go b/pkg/references/service.go
new file mode 100644
index 0000000000000000000000000000000000000000..14e2e8eb6f6eb0558033046840a5e9c2cda7f387
--- /dev/null
+++ b/pkg/references/service.go
@@ -0,0 +1,14 @@
+package references
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+)
+
+// @microgen grpc
+// @protobuf git.perx.ru/perxis/perxis-go/proto/references
+// @grpc-addr content.references.References
+type References interface {
+	Get(ctx context.Context, spaceId, envId string, references []*Reference) (items []*items.Item, notfound []*Reference, err error)
+}
diff --git a/pkg/references/transport/client.microgen.go b/pkg/references/transport/client.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..a79d9192b857e3360087ddae86298e880df93be9
--- /dev/null
+++ b/pkg/references/transport/client.microgen.go
@@ -0,0 +1,29 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import (
+	"context"
+	"errors"
+
+	items "git.perx.ru/perxis/perxis-go/pkg/items"
+	references "git.perx.ru/perxis/perxis-go/pkg/references"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+func (set EndpointsSet) Get(arg0 context.Context, arg1 string, arg2 string, arg3 []*references.Reference) (res0 []*items.Item, res1 []*references.Reference, res2 error) {
+	request := GetRequest{
+		EnvId:      arg2,
+		References: arg3,
+		SpaceId:    arg1,
+	}
+	response, res2 := set.GetEndpoint(arg0, &request)
+	if res2 != nil {
+		if e, ok := status.FromError(res2); ok || e.Code() == codes.Internal || e.Code() == codes.Unknown {
+			res2 = errors.New(e.Message())
+		}
+		return
+	}
+	return response.(*GetResponse).Items, response.(*GetResponse).Notfound, res2
+}
diff --git a/pkg/references/transport/endpoints.microgen.go b/pkg/references/transport/endpoints.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..737752a45beef781518c38424e8fe8c31ac47d6a
--- /dev/null
+++ b/pkg/references/transport/endpoints.microgen.go
@@ -0,0 +1,10 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import endpoint "github.com/go-kit/kit/endpoint"
+
+// EndpointsSet implements References API and used for transport purposes.
+type EndpointsSet struct {
+	GetEndpoint endpoint.Endpoint
+}
diff --git a/pkg/references/transport/exchanges.microgen.go b/pkg/references/transport/exchanges.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..bd31aa2d3f78b6e0d321eb7ce602501056842d16
--- /dev/null
+++ b/pkg/references/transport/exchanges.microgen.go
@@ -0,0 +1,20 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import (
+	items "git.perx.ru/perxis/perxis-go/pkg/items"
+	references "git.perx.ru/perxis/perxis-go/pkg/references"
+)
+
+type (
+	GetRequest struct {
+		SpaceId    string                  `json:"space_id"`
+		EnvId      string                  `json:"env_id"`
+		References []*references.Reference `json:"references"`
+	}
+	GetResponse struct {
+		Items    []*items.Item           `json:"items"`
+		Notfound []*references.Reference `json:"notfound"`
+	}
+)
diff --git a/pkg/references/transport/grpc/client.microgen.go b/pkg/references/transport/grpc/client.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..f475ea9cb27f040fbcdb32a06b8893f7975c46ec
--- /dev/null
+++ b/pkg/references/transport/grpc/client.microgen.go
@@ -0,0 +1,23 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transportgrpc
+
+import (
+	transport "git.perx.ru/perxis/perxis-go/pkg/references/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/references"
+	grpckit "github.com/go-kit/kit/transport/grpc"
+	grpc "google.golang.org/grpc"
+)
+
+func NewGRPCClient(conn *grpc.ClientConn, addr string, opts ...grpckit.ClientOption) transport.EndpointsSet {
+	if addr == "" {
+		addr = "content.references.References"
+	}
+	return transport.EndpointsSet{GetEndpoint: grpckit.NewClient(
+		conn, addr, "Get",
+		_Encode_Get_Request,
+		_Decode_Get_Response,
+		pb.GetResponse{},
+		opts...,
+	).Endpoint()}
+}
diff --git a/pkg/references/transport/grpc/protobuf_endpoint_converters.microgen.go b/pkg/references/transport/grpc/protobuf_endpoint_converters.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..d47c0574bb55f654778bafac1035fbb6ba018d0d
--- /dev/null
+++ b/pkg/references/transport/grpc/protobuf_endpoint_converters.microgen.go
@@ -0,0 +1,82 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+// Please, do not change functions names!
+package transportgrpc
+
+import (
+	"context"
+	"errors"
+
+	transport "git.perx.ru/perxis/perxis-go/pkg/references/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/references"
+)
+
+func _Encode_Get_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetRequest")
+	}
+	req := request.(*transport.GetRequest)
+	reqReferences, err := ListPtrReferenceToProto(req.References)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.GetRequest{
+		EnvId:      req.EnvId,
+		References: reqReferences,
+		SpaceId:    req.SpaceId,
+	}, nil
+}
+
+func _Encode_Get_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetResponse")
+	}
+	resp := response.(*transport.GetResponse)
+	respItems, err := ListPtrItemsItemToProto(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	respNotfound, err := ListPtrReferenceToProto(resp.Notfound)
+	if err != nil {
+		return nil, err
+	}
+	return &pb.GetResponse{
+		Items:    respItems,
+		Notfound: respNotfound,
+	}, nil
+}
+
+func _Decode_Get_Request(ctx context.Context, request interface{}) (interface{}, error) {
+	if request == nil {
+		return nil, errors.New("nil GetRequest")
+	}
+	req := request.(*pb.GetRequest)
+	reqReferences, err := ProtoToListPtrReference(req.References)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.GetRequest{
+		EnvId:      string(req.EnvId),
+		References: reqReferences,
+		SpaceId:    string(req.SpaceId),
+	}, nil
+}
+
+func _Decode_Get_Response(ctx context.Context, response interface{}) (interface{}, error) {
+	if response == nil {
+		return nil, errors.New("nil GetResponse")
+	}
+	resp := response.(*pb.GetResponse)
+	respItems, err := ProtoToListPtrItemsItem(resp.Items)
+	if err != nil {
+		return nil, err
+	}
+	respNotfound, err := ProtoToListPtrReference(resp.Notfound)
+	if err != nil {
+		return nil, err
+	}
+	return &transport.GetResponse{
+		Items:    respItems,
+		Notfound: respNotfound,
+	}, nil
+}
diff --git a/pkg/references/transport/grpc/protobuf_type_converters.microgen.go b/pkg/references/transport/grpc/protobuf_type_converters.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..549d11725e085c4a35aaa047a2a4b1e9c43355df
--- /dev/null
+++ b/pkg/references/transport/grpc/protobuf_type_converters.microgen.go
@@ -0,0 +1,71 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+// It is better for you if you do not change functions names!
+// This file will never be overwritten.
+package transportgrpc
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	itemstransportgrpc "git.perx.ru/perxis/perxis-go/pkg/items/transport/grpc"
+	service "git.perx.ru/perxis/perxis-go/pkg/references"
+	itemspb "git.perx.ru/perxis/perxis-go/proto/items"
+	pb "git.perx.ru/perxis/perxis-go/proto/references"
+)
+
+func PtrItemToReference(ref *service.Reference) (*pb.Reference, error) {
+	if ref == nil {
+		return nil, nil
+	}
+
+	protoRef := &pb.Reference{
+		Id:           ref.ID,
+		CollectionId: ref.CollectionID,
+	}
+
+	return protoRef, nil
+}
+
+func ListPtrReferenceToProto(refs []*service.Reference) ([]*pb.Reference, error) {
+	protoRefs := make([]*pb.Reference, 0, len(refs))
+	for _, ref := range refs {
+		pr, err := PtrItemToReference(ref)
+		if err != nil {
+			return nil, err
+		}
+		protoRefs = append(protoRefs, pr)
+	}
+	return protoRefs, nil
+}
+
+func ProtoToPtrReference(protoRef *pb.Reference) (*service.Reference, error) {
+	if protoRef == nil {
+		return nil, nil
+	}
+
+	ref := &service.Reference{
+		ID:           protoRef.Id,
+		CollectionID: protoRef.CollectionId,
+	}
+
+	return ref, nil
+}
+
+func ProtoToListPtrReference(protoRefs []*pb.Reference) ([]*service.Reference, error) {
+	refs := make([]*service.Reference, 0, len(protoRefs))
+	for _, ref := range protoRefs {
+		r, err := ProtoToPtrReference(ref)
+		if err != nil {
+			return nil, err
+		}
+		refs = append(refs, r)
+	}
+	return refs, nil
+}
+
+func ListPtrItemsItemToProto(items []*items.Item) ([]*itemspb.Item, error) {
+	return itemstransportgrpc.ListPtrItemToProto(items)
+}
+
+func ProtoToListPtrItemsItem(protoItems []*itemspb.Item) ([]*items.Item, error) {
+	return itemstransportgrpc.ProtoToListPtrItem(protoItems)
+}
diff --git a/pkg/references/transport/grpc/server.microgen.go b/pkg/references/transport/grpc/server.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..960cf20350072d36498b2bb57edb8c594d193d28
--- /dev/null
+++ b/pkg/references/transport/grpc/server.microgen.go
@@ -0,0 +1,34 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+// DO NOT EDIT.
+package transportgrpc
+
+import (
+	transport "git.perx.ru/perxis/perxis-go/pkg/references/transport"
+	pb "git.perx.ru/perxis/perxis-go/proto/references"
+	grpc "github.com/go-kit/kit/transport/grpc"
+	context "golang.org/x/net/context"
+)
+
+type referencesServer struct {
+	get grpc.Handler
+
+	pb.UnsafeReferencesServer
+}
+
+func NewGRPCServer(endpoints *transport.EndpointsSet, opts ...grpc.ServerOption) pb.ReferencesServer {
+	return &referencesServer{get: grpc.NewServer(
+		endpoints.GetEndpoint,
+		_Decode_Get_Request,
+		_Encode_Get_Response,
+		opts...,
+	)}
+}
+
+func (S *referencesServer) Get(ctx context.Context, req *pb.GetRequest) (*pb.GetResponse, error) {
+	_, resp, err := S.get.ServeGRPC(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.GetResponse), nil
+}
diff --git a/pkg/references/transport/server.microgen.go b/pkg/references/transport/server.microgen.go
new file mode 100644
index 0000000000000000000000000000000000000000..f66b4c6b4b8ff529bb498f11cec2bfce8f79c2bf
--- /dev/null
+++ b/pkg/references/transport/server.microgen.go
@@ -0,0 +1,25 @@
+// Code generated by microgen 0.9.1. DO NOT EDIT.
+
+package transport
+
+import (
+	"context"
+
+	references "git.perx.ru/perxis/perxis-go/pkg/references"
+	endpoint "github.com/go-kit/kit/endpoint"
+)
+
+func Endpoints(svc references.References) EndpointsSet {
+	return EndpointsSet{GetEndpoint: GetEndpoint(svc)}
+}
+
+func GetEndpoint(svc references.References) endpoint.Endpoint {
+	return func(arg0 context.Context, request interface{}) (interface{}, error) {
+		req := request.(*GetRequest)
+		res0, res1, res2 := svc.Get(arg0, req.SpaceId, req.EnvId, req.References)
+		return &GetResponse{
+			Items:    res0,
+			Notfound: res1,
+		}, res2
+	}
+}
diff --git a/pkg/roles/middleware/caching_middleware.go b/pkg/roles/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..19bdfe6b890806a98ac4aed81485f2f5314020b6
--- /dev/null
+++ b/pkg/roles/middleware/caching_middleware.go
@@ -0,0 +1,80 @@
+package service
+
+import (
+	"context"
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	service "git.perx.ru/perxis/perxis-go/pkg/roles"
+)
+
+func makeKey(ss ...string) string {
+	return strings.Join(ss, "-")
+}
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Roles) service.Roles {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Roles
+}
+
+func (m cachingMiddleware) Create(ctx context.Context, role *service.Role) (rl *service.Role, err error) {
+	rl, err = m.next.Create(ctx, role)
+	if err == nil {
+		m.cache.Remove(rl.SpaceID)
+	}
+	return rl, err
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, spaceId string, roleId string) (rl *service.Role, err error) {
+	key := makeKey(spaceId, roleId)
+	value, e := m.cache.Get(key)
+	if e == nil {
+		return value.(*service.Role), err
+	}
+	rl, err = m.next.Get(ctx, spaceId, roleId)
+	if err == nil {
+		m.cache.Set(key, rl)
+	}
+	return rl, err
+}
+
+func (m cachingMiddleware) List(ctx context.Context, spaceId string) (roles []*service.Role, err error) {
+	value, e := m.cache.Get(spaceId)
+	if e == nil {
+		return value.([]*service.Role), err
+	}
+	roles, err = m.next.List(ctx, spaceId)
+	if err == nil {
+		m.cache.Set(spaceId, roles)
+	}
+	return roles, err
+}
+
+func (m cachingMiddleware) Update(ctx context.Context, role *service.Role) (err error) {
+	err = m.next.Update(ctx, role)
+	if err == nil {
+		key := makeKey(role.SpaceID, role.ID)
+		m.cache.Remove(key)
+		m.cache.Remove(role.SpaceID)
+	}
+	return err
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, spaceId string, roleId string) (err error) {
+	err = m.next.Delete(ctx, spaceId, roleId)
+	if err == nil {
+		key := makeKey(spaceId, roleId)
+		m.cache.Remove(key)
+		m.cache.Remove(spaceId)
+	}
+	return err
+}
diff --git a/pkg/roles/middleware/caching_middleware_test.go b/pkg/roles/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..bb5496b55fae164ea3681e644c7625ab9a2b9407
--- /dev/null
+++ b/pkg/roles/middleware/caching_middleware_test.go
@@ -0,0 +1,201 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/roles"
+	rsmocks "git.perx.ru/perxis/perxis-go/pkg/roles/mocks"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestRolesCache(t *testing.T) {
+
+	const (
+		roleID  = "roleID"
+		spaceID = "spaceID"
+		size    = 5
+		ttl     = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	ctx := context.Background()
+
+	t.Run("Get from cache", func(t *testing.T) {
+		rl := &rsmocks.Roles{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(rl)
+
+		rl.On("Get", mock.Anything, spaceID, roleID).Return(&roles.Role{ID: roleID, SpaceID: spaceID, Description: "Role"}, nil).Once()
+
+		v1, err := svc.Get(ctx, spaceID, roleID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, spaceID, roleID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается при повторном запросе получение объекта из кэша.")
+
+		rl.AssertExpectations(t)
+	})
+
+	t.Run("List from cache", func(t *testing.T) {
+		rl := &rsmocks.Roles{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(rl)
+
+		rl.On("List", mock.Anything, spaceID).Return([]*roles.Role{{ID: roleID, SpaceID: spaceID, Description: "Role"}}, nil).Once()
+
+		vl1, err := svc.List(ctx, spaceID)
+		require.NoError(t, err)
+
+		vl2, err := svc.List(ctx, spaceID)
+		require.NoError(t, err)
+		assert.Same(t, vl1[0], vl2[0], "Ожидается при повторном запросе получение объектов из кэша.")
+
+		rl.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("After Update", func(t *testing.T) {
+			rl := &rsmocks.Roles{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(rl)
+
+			rl.On("Get", mock.Anything, spaceID, roleID).Return(&roles.Role{ID: roleID, SpaceID: spaceID, Description: "Role"}, nil).Once()
+			rl.On("List", mock.Anything, spaceID).Return([]*roles.Role{{ID: roleID, SpaceID: spaceID, Description: "Role"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, roleID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, roleID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается при повторном запросе получение объектов из кэша.")
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается при повторном запросе получение объектов из кэша.")
+
+			rl.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+
+			err = svc.Update(ctx, &roles.Role{ID: roleID, SpaceID: spaceID, Description: "RoleUPD"})
+			require.NoError(t, err)
+
+			rl.On("Get", mock.Anything, spaceID, roleID).Return(&roles.Role{ID: roleID, SpaceID: spaceID, Description: "RoleUPD"}, nil).Once()
+			rl.On("List", mock.Anything, spaceID).Return([]*roles.Role{{ID: roleID, SpaceID: spaceID, Description: "RoleUPD"}}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, roleID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается что кеш объекта был удален после его обновления и объект был запрошен из сервиса.")
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидается что кеш объектов был удален после обновления объекта.")
+
+			rl.AssertExpectations(t)
+		})
+
+		t.Run("After Delete", func(t *testing.T) {
+			rl := &rsmocks.Roles{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(rl)
+
+			rl.On("Get", mock.Anything, spaceID, roleID).Return(&roles.Role{ID: roleID, SpaceID: spaceID, Description: "Role"}, nil).Once()
+			rl.On("List", mock.Anything, spaceID).Return([]*roles.Role{{ID: roleID, SpaceID: spaceID, Description: "Role"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, roleID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, roleID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается при повторном запросе получение объекта из кэша.")
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается при повторном запросе получение объектов из кэша.")
+
+			rl.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+
+			err = svc.Update(ctx, &roles.Role{ID: roleID, SpaceID: spaceID, Description: "RoleUPD"})
+			require.NoError(t, err)
+
+			rl.On("Get", mock.Anything, spaceID, roleID).Return(nil, errNotFound).Once()
+			rl.On("List", mock.Anything, spaceID).Return(nil, errNotFound).Once()
+
+			v3, err := svc.Get(ctx, spaceID, roleID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается что после удаления кеш объекта был удален и получена ошибка сервиса.")
+			assert.Nil(t, v3)
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается что после удаления кеш объекта был удален и получена ошибка сервиса.")
+			assert.Nil(t, vl3)
+
+			rl.AssertExpectations(t)
+		})
+
+		t.Run("After Create", func(t *testing.T) {
+			rl := &rsmocks.Roles{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(rl)
+
+			rl.On("List", mock.Anything, spaceID).Return([]*roles.Role{{ID: roleID, SpaceID: spaceID, Description: "Role"}}, nil).Once()
+
+			vl1, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается при повторном запросе получение объекта из кэша.")
+
+			rl.On("Create", mock.Anything, mock.Anything).Return(&roles.Role{ID: "roleID2", SpaceID: spaceID, Description: "Role2"}, nil).Once()
+
+			_, err = svc.Create(ctx, &roles.Role{ID: "roleID2", SpaceID: spaceID, Description: "Role2"})
+			require.NoError(t, err)
+
+			rl.On("List", mock.Anything, spaceID).Return([]*roles.Role{{ID: roleID, SpaceID: spaceID, Description: "Role"}, {ID: "roleID2", SpaceID: spaceID, Description: "Role2"}}, nil).Once()
+
+			vl3, err := svc.List(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Len(t, vl3, 2, "Ожидает что после создания нового объекта,  кеш будет очищен и объекты запрошены заново из сервиса.")
+
+			rl.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			rl := &rsmocks.Roles{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(rl)
+
+			rl.On("Get", mock.Anything, spaceID, roleID).Return(&roles.Role{ID: roleID, SpaceID: spaceID, Description: "Role"}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID, roleID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID, roleID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			time.Sleep(2 * ttl)
+			rl.On("Get", mock.Anything, spaceID, roleID).Return(&roles.Role{ID: roleID, SpaceID: spaceID, Description: "Role"}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID, roleID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается что объект был удален из кеша и получен заново из сервиса.")
+
+			rl.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/roles/middleware/error_logging_middleware.go b/pkg/roles/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..7afe8f1f1003031ddedb437d0422dfb4123d4526
--- /dev/null
+++ b/pkg/roles/middleware/error_logging_middleware.go
@@ -0,0 +1,80 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/roles -i Roles -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/roles"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements roles.Roles that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   roles.Roles
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the roles.Roles with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next roles.Roles) roles.Roles {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, role *roles.Role) (created *roles.Role, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, role)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, spaceId string, roleId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, spaceId, roleId)
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, spaceId string, roleId string) (role *roles.Role, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, spaceId, roleId)
+}
+
+func (m *errorLoggingMiddleware) List(ctx context.Context, spaceId string) (roles []*roles.Role, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.List(ctx, spaceId)
+}
+
+func (m *errorLoggingMiddleware) Update(ctx context.Context, role *roles.Role) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Update(ctx, role)
+}
diff --git a/pkg/roles/middleware/logging_middleware.go b/pkg/roles/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..ab536b6ad9e168adc79691a3127be8a47f9e5ba7
--- /dev/null
+++ b/pkg/roles/middleware/logging_middleware.go
@@ -0,0 +1,214 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/roles -i Roles -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/roles"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements roles.Roles that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   roles.Roles
+}
+
+// LoggingMiddleware instruments an implementation of the roles.Roles with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next roles.Roles) roles.Roles {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, role *roles.Role) (created *roles.Role, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":  ctx,
+		"role": role} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	created, err = m.next.Create(ctx, role)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"created": created,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return created, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, spaceId string, roleId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"roleId":  roleId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, spaceId, roleId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, spaceId string, roleId string) (role *roles.Role, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"roleId":  roleId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	role, err = m.next.Get(ctx, spaceId, roleId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"role": role,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return role, err
+}
+
+func (m *loggingMiddleware) List(ctx context.Context, spaceId string) (roles []*roles.Role, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Request", fields...)
+
+	roles, err = m.next.List(ctx, spaceId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"roles": roles,
+		"err":   err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Response", fields...)
+
+	return roles, err
+}
+
+func (m *loggingMiddleware) Update(ctx context.Context, role *roles.Role) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":  ctx,
+		"role": role} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Request", fields...)
+
+	err = m.next.Update(ctx, role)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Response", fields...)
+
+	return err
+}
diff --git a/pkg/roles/middleware/middleware.go b/pkg/roles/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a5198110dfc463b80274230a7d1cbc65283debd
--- /dev/null
+++ b/pkg/roles/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/roles -i Roles -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/roles"
+	"go.uber.org/zap"
+)
+
+type Middleware func(roles.Roles) roles.Roles
+
+func WithLog(s roles.Roles, logger *zap.Logger, log_access bool) roles.Roles {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Roles")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/roles/middleware/recovering_middleware.go b/pkg/roles/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..0c0f023b6fa4ccb6f8dc4dadbf59a58bf024a6cb
--- /dev/null
+++ b/pkg/roles/middleware/recovering_middleware.go
@@ -0,0 +1,91 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/roles -i Roles -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/roles"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements roles.Roles that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   roles.Roles
+}
+
+// RecoveringMiddleware instruments an implementation of the roles.Roles with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next roles.Roles) roles.Roles {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, role *roles.Role) (created *roles.Role, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, role)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, spaceId string, roleId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, spaceId, roleId)
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, spaceId string, roleId string) (role *roles.Role, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, spaceId, roleId)
+}
+
+func (m *recoveringMiddleware) List(ctx context.Context, spaceId string) (roles []*roles.Role, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.List(ctx, spaceId)
+}
+
+func (m *recoveringMiddleware) Update(ctx context.Context, role *roles.Role) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Update(ctx, role)
+}
diff --git a/pkg/schemaloader/context.go b/pkg/schemaloader/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..7407b6b30939981f917c50d734ac770ec69b9e4e
--- /dev/null
+++ b/pkg/schemaloader/context.go
@@ -0,0 +1,30 @@
+package schemaloader
+
+import "context"
+
+type LoaderContext struct {
+	SpaceID string
+	EnvID   string
+}
+
+type loaderCtxKey struct{}
+
+func WithContext(ctx context.Context, loaderContext *LoaderContext) context.Context {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+	p, _ := ctx.Value(loaderCtxKey{}).(*LoaderContext)
+	if p != nil {
+		*p = *loaderContext
+		return ctx
+	}
+	return context.WithValue(ctx, loaderCtxKey{}, loaderContext)
+}
+
+func GetContext(ctx context.Context) *LoaderContext {
+	p, _ := ctx.Value(loaderCtxKey{}).(*LoaderContext)
+	if p == nil {
+		return new(LoaderContext)
+	}
+	return p
+}
diff --git a/pkg/schemaloader/loader.go b/pkg/schemaloader/loader.go
new file mode 100644
index 0000000000000000000000000000000000000000..e27baf4ea8b509adff88f7b9c5e99a48f15960b7
--- /dev/null
+++ b/pkg/schemaloader/loader.go
@@ -0,0 +1,92 @@
+package schemaloader
+
+import (
+	"context"
+	"net/url"
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/collections"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/schema/field"
+)
+
+// NewLoader возвращает новый загрузчик схем из коллекций
+// используется только на сервере
+// на клиенте нужно использовать методы получения полностью загруженных схем, для которых разрешение происходит на сервере
+func NewLoader(svc collections.Collections) field.Loader {
+	return &loader{svc: svc}
+}
+
+type loader struct {
+	svc collections.Collections
+}
+
+// Load - возвращает поля по референсу из коллекций (не загруженные)
+func (l *loader) Load(ctx context.Context, ref string) ([]*field.Field, error) {
+	spaceID, envID, colID, err := parseRef(ctx, ref)
+	if err != nil {
+		return nil, err
+	}
+
+	filter := &collections.Filter{ID: []string{colID}}
+
+	collections, err := l.svc.List(ctx, spaceID, envID, filter)
+	if err != nil {
+		return nil, errors.Wrapf(err, "schemaloader: failed to get collections for \"%s\"", ref)
+	}
+
+	var schemas []*field.Field
+	for _, s := range collections {
+		if s.Schema != nil {
+			schemas = append(schemas, &s.Schema.Field)
+		}
+	}
+
+	if len(schemas) == 0 {
+		return nil, errors.Errorf("schema not found \"%s\"", ref)
+	}
+
+	return schemas, nil
+}
+
+func parseRef(ctx context.Context, ref string) (spaceID, envID, colID string, err error) {
+	var u *url.URL
+	if u, err = url.Parse(ref); err != nil {
+		return
+	}
+
+	parts := strings.SplitN(u.Path, "/", 3)
+
+	switch len(parts) {
+	case 1:
+		colID = parts[0]
+	case 2:
+		spaceID = parts[0]
+		envID = "master"
+		colID = parts[1]
+	case 3:
+		spaceID = parts[0]
+		envID = parts[1]
+		colID = parts[2]
+	}
+
+	if colID == "" {
+		err = errors.Errorf("invalid schema reference \"%s\"", ref)
+	}
+
+	if loaderCtx := GetContext(ctx); loaderCtx != nil {
+		if spaceID == "" {
+			spaceID = loaderCtx.SpaceID
+		}
+
+		if envID == "" {
+			envID = loaderCtx.EnvID
+		}
+	}
+
+	if spaceID == "" {
+		err = errors.Errorf("can't identify space for reference \"%s\"", ref)
+	}
+
+	return
+}
diff --git a/pkg/schemaloader/loader_test.go b/pkg/schemaloader/loader_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f796fd8f669e31140e4171955c4d7db8723cec26
--- /dev/null
+++ b/pkg/schemaloader/loader_test.go
@@ -0,0 +1,141 @@
+package schemaloader
+
+import (
+	"context"
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+//func Test_Load(t *testing.T) {
+//
+//	const (
+//		spaceID = "SpaceID"
+//		envID   = "envID"
+//		colID   = "colID"
+//		uri     = "/colID#fieldID"
+//	)
+//
+//	t.Run("Load schema (success)", func(t *testing.T) {
+//		collSvs := &mocks.Collections{}
+//
+//		sch := schema.New(
+//			"first_name", field.String(),
+//			"last_name", field.String(),
+//		)
+//
+//		cl := &collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "Collection", Schema: sch}
+//		collSvs.On("List", mock.Anything, spaceID, envID, mock.AnythingOfType("*collections.Filter")).Run(func(args mock.Arguments) {
+//			filter := args[3].(*collections.Filter)
+//
+//			assert.Equal(t, &collections.Filter{ID: []string{"colID"}}, filter, "Фильтр должен содержать идентификатор коллекции")
+//		}).Return([]*collections.Collection{cl}, nil).Once()
+//
+//		loader := NewLoader(collSvs, spaceID, envID)
+//		schemas, err := loader.Load(nil, uri)
+//
+//		require.NoError(t, err, "Ожидается успешное завершение")
+//		require.Equal(t, []*field.Field{&sch.Field}, schemas, "Метод должен возвращать срез схем")
+//		collSvs.AssertExpectations(t)
+//	})
+//
+//	t.Run("Collection doesn't have schema", func(t *testing.T) {
+//		collSvs := &mocks.Collections{}
+//
+//		cl := &collections.Collection{ID: colID, SpaceID: spaceID, EnvID: envID, Name: "Collection"}
+//		collSvs.On("List", mock.Anything, spaceID, envID, mock.AnythingOfType("*collections.Filter")).Run(func(args mock.Arguments) {
+//			filter := args[3].(*collections.Filter)
+//
+//			assert.Equal(t, &collections.Filter{ID: []string{"colID"}}, filter, "Фильтр должен содержать идентификатор коллекции")
+//		}).Return([]*collections.Collection{cl}, nil).Once()
+//
+//		loader := NewLoader(collSvs, spaceID, envID)
+//		schemas, err := loader.Load(nil, uri)
+//
+//		require.Error(t, err, "Ожидается ошибка")
+//		require.Contains(t, err.Error(), "schema not found")
+//		require.Nil(t, schemas, "Метод должен вернуть nil")
+//		//assert.Nil(t, schemas, "Метод должен вернуть nil")
+//		collSvs.AssertExpectations(t)
+//	})
+//
+//	t.Run("Loader not found collection", func(t *testing.T) {
+//		collSvs := &mocks.Collections{}
+//
+//		collSvs.On("List", mock.Anything, spaceID, envID, mock.AnythingOfType("*collections.Filter")).Run(func(args mock.Arguments) {
+//			filter := args[3].(*collections.Filter)
+//
+//			assert.Equal(t, &collections.Filter{ID: []string{"colID"}}, filter, "Фильтр должен содержать идентификатор коллекции")
+//		}).Return([]*collections.Collection{}, nil).Once()
+//
+//		loader := NewLoader(collSvs, spaceID, envID)
+//		schemas, err := loader.Load(nil, uri)
+//
+//		require.Error(t, err, "Ожидается ошибка")
+//		require.Contains(t, err.Error(), "schema not found")
+//		require.Nil(t, schemas, "Метод должен вернуть nil")
+//		collSvs.AssertExpectations(t)
+//	})
+//
+//	t.Run("Collection service return error", func(t *testing.T) {
+//		collSvs := &mocks.Collections{}
+//
+//		collSvs.On("List", mock.Anything, spaceID, envID, mock.AnythingOfType("*collections.Filter")).Run(func(args mock.Arguments) {
+//			filter := args[3].(*collections.Filter)
+//
+//			assert.Equal(t, &collections.Filter{ID: []string{"colID"}}, filter, "Фильтр должен содержать идентификатор коллекции")
+//		}).Return(nil, errors.New("storage error")).Once()
+//
+//		loader := NewLoader(collSvs, spaceID, envID)
+//		schemas, err := loader.Load(nil, uri)
+//
+//		require.Error(t, err, "Ожидается ошибка")
+//		require.Contains(t, err.Error(), "failed to get schema")
+//		require.Nil(t, schemas, "Метод должен вернуть nil")
+//		collSvs.AssertExpectations(t)
+//	})
+//
+//	t.Run("ParseMask return error", func(t *testing.T) {
+//		collSvs := &mocks.Collections{}
+//
+//		loader := NewLoader(collSvs, spaceID, envID)
+//		schemas, err := loader.Load(nil, "")
+//
+//		require.Error(t, err, "Ожидается ошибка")
+//		require.Contains(t, err.Error(), "invalid schema reference")
+//		require.Nil(t, schemas, "Метод должен вернуть nil")
+//		collSvs.AssertExpectations(t)
+//	})
+//}
+
+func Test_parseRef(t *testing.T) {
+	ctx := WithContext(nil, &LoaderContext{SpaceID: "spc", EnvID: "env"})
+	tests := []struct {
+		ref            string
+		ctx            context.Context
+		wantSpaceID    string
+		wantEnvId      string
+		wantCollection string
+		wantErr        assert.ErrorAssertionFunc
+	}{
+		{"col", ctx, "spc", "env", "col", assert.NoError},
+		{"/col", ctx, "spc", "master", "col", assert.NoError},
+		{"spc1/env1/col", ctx, "spc1", "env1", "col", assert.NoError},
+		{"spc1/env1/col#fld", ctx, "spc1", "env1", "col", assert.NoError},
+		{"col%3f*", ctx, "spc", "env", "col?*", assert.NoError},
+		{"#fld", ctx, "spc", "env", "", assert.Error},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.ref, func(t *testing.T) {
+			gotSpaceID, gotEnvId, gotCollection, err := parseRef(tt.ctx, tt.ref)
+			if !tt.wantErr(t, err, fmt.Sprintf("parseRef(%v)", tt.ref)) {
+				return
+			}
+			assert.Equalf(t, tt.wantSpaceID, gotSpaceID, "parseRef(%v)", tt.ref)
+			assert.Equalf(t, tt.wantEnvId, gotEnvId, "parseRef(%v)", tt.ref)
+			assert.Equalf(t, tt.wantCollection, gotCollection, "parseRef(%v)", tt.ref)
+		})
+	}
+}
diff --git a/pkg/spaces/middleware/caching_middleware.go b/pkg/spaces/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..62396fc8f1e9101f92885330a50797e26dbdebe2
--- /dev/null
+++ b/pkg/spaces/middleware/caching_middleware.go
@@ -0,0 +1,106 @@
+package service
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	service "git.perx.ru/perxis/perxis-go/pkg/spaces"
+)
+
+func orgKey(orgID string) string { return "org-" + orgID }
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Spaces) service.Spaces {
+		m := &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+
+		return m
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Spaces
+}
+
+func (m cachingMiddleware) Create(ctx context.Context, space *service.Space) (sp *service.Space, err error) {
+
+	sp, err = m.next.Create(ctx, space)
+	if err == nil {
+		m.cache.Remove(orgKey(sp.OrgID))
+	}
+	return sp, err
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, spaceId string) (sp *service.Space, err error) {
+
+	value, e := m.cache.Get(spaceId)
+	if e == nil {
+		return value.(*service.Space), err
+	}
+	sp, err = m.next.Get(ctx, spaceId)
+	if err == nil {
+		m.cache.Set(spaceId, sp)
+	}
+	return sp, err
+}
+
+func (m cachingMiddleware) List(ctx context.Context, orgId string) (spaces []*service.Space, err error) {
+
+	value, e := m.cache.Get(orgKey(orgId))
+	if e == nil {
+		return value.([]*service.Space), err
+	}
+	spaces, err = m.next.List(ctx, orgId)
+	if err == nil {
+		m.cache.Set(orgKey(orgId), spaces)
+		for _, s := range spaces {
+			m.cache.Set(s.ID, s)
+		}
+	}
+	return spaces, err
+}
+
+func (m cachingMiddleware) Update(ctx context.Context, space *service.Space) (err error) {
+
+	err = m.next.Update(ctx, space)
+	if err == nil {
+		value, e := m.cache.Get(space.ID)
+		if e == nil {
+			space := value.(*service.Space)
+			m.cache.Remove(orgKey(space.OrgID))
+		}
+		m.cache.Remove(space.ID)
+	}
+	return err
+}
+
+func (m cachingMiddleware) UpdateConfig(ctx context.Context, spaceId string, config *service.Config) (err error) {
+
+	err = m.next.UpdateConfig(ctx, spaceId, config)
+	if err == nil {
+		value, e := m.cache.Get(spaceId)
+		if e == nil {
+			space := value.(*service.Space)
+			m.cache.Remove(orgKey(space.OrgID))
+		}
+		m.cache.Remove(spaceId)
+	}
+	return err
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, spaceId string) (err error) {
+
+	err = m.next.Delete(ctx, spaceId)
+	if err == nil {
+		value, e := m.cache.Get(spaceId)
+		if e == nil {
+			space := value.(*service.Space)
+			m.cache.Remove(orgKey(space.OrgID))
+		}
+		m.cache.Remove(spaceId)
+	}
+	return err
+}
diff --git a/pkg/spaces/middleware/caching_middleware_test.go b/pkg/spaces/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2cfaf98db8713081225b3f01f39782f487292161
--- /dev/null
+++ b/pkg/spaces/middleware/caching_middleware_test.go
@@ -0,0 +1,241 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+
+	"git.perx.ru/perxis/perxis-go/pkg/spaces"
+	spmocks "git.perx.ru/perxis/perxis-go/pkg/spaces/mocks"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestRolesCache(t *testing.T) {
+
+	const (
+		spaceID = "spaceID"
+		orgID   = "orgID"
+		size    = 5
+		ttl     = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	ctx := context.Background()
+
+	t.Run("Get from cache", func(t *testing.T) {
+		sp := &spmocks.Spaces{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(sp)
+
+		sp.On("Get", mock.Anything, spaceID).Return(&spaces.Space{ID: spaceID, OrgID: orgID, Name: "Space"}, nil).Once()
+
+		v1, err := svc.Get(ctx, spaceID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, spaceID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается при повторном запросе получение объекта из кэша.")
+
+		sp.AssertExpectations(t)
+	})
+
+	t.Run("List from cache", func(t *testing.T) {
+		sp := &spmocks.Spaces{}
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(sp)
+
+		sp.On("List", mock.Anything, orgID).Return([]*spaces.Space{{ID: spaceID, OrgID: orgID, Name: "Space"}}, nil).Once()
+
+		vl1, err := svc.List(ctx, orgID)
+		require.NoError(t, err)
+
+		vl2, err := svc.List(ctx, orgID)
+		require.NoError(t, err)
+		assert.Same(t, vl1[0], vl2[0], "Ожидается при повторном запросе получение объектов из кэша.")
+
+		sp.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate cache", func(t *testing.T) {
+		t.Run("After Update", func(t *testing.T) {
+			sp := &spmocks.Spaces{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(sp)
+
+			sp.On("Get", mock.Anything, spaceID).Return(&spaces.Space{ID: spaceID, OrgID: orgID, Name: "Space"}, nil).Once()
+			sp.On("List", mock.Anything, orgID).Return([]*spaces.Space{{ID: spaceID, OrgID: orgID, Name: "Space"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается при повторном запросе получение объекта из кэша.")
+
+			vl1, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается при повторном запросе получение объектов из кэша.")
+
+			sp.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+
+			err = svc.Update(ctx, &spaces.Space{ID: spaceID, OrgID: orgID, Name: "SpaceUPD"})
+			require.NoError(t, err)
+
+			sp.On("Get", mock.Anything, spaceID).Return(&spaces.Space{ID: spaceID, OrgID: orgID, Name: "SpaceUPD"}, nil).Once()
+			sp.On("List", mock.Anything, orgID).Return([]*spaces.Space{{ID: spaceID, OrgID: orgID, Name: "SpaceUPD"}}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается что кеш объекта был удален после обновления объекта.")
+
+			vl3, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидается что кеш объектов был удален после обновления объекта.")
+
+			sp.AssertExpectations(t)
+		})
+
+		t.Run("After UpdateConfig", func(t *testing.T) {
+			sp := &spmocks.Spaces{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(sp)
+
+			sp.On("Get", mock.Anything, spaceID).Return(&spaces.Space{ID: spaceID, OrgID: orgID, Name: "Space"}, nil).Once()
+			sp.On("List", mock.Anything, orgID).Return([]*spaces.Space{{ID: spaceID, OrgID: orgID, Name: "Space"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается при повторном запросе получение объекта из кэша.")
+
+			vl1, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается при повторном запросе получение объектов из кэша.")
+
+			sp.On("UpdateConfig", mock.Anything, spaceID, mock.Anything).Return(nil).Once()
+
+			err = svc.UpdateConfig(ctx, spaceID, &spaces.Config{Features: []string{"feature"}})
+			require.NoError(t, err)
+
+			sp.On("Get", mock.Anything, spaceID).Return(&spaces.Space{ID: spaceID, OrgID: orgID, Name: "SpaceUPD", Config: &spaces.Config{Features: []string{"feature"}}}, nil).Once()
+			sp.On("List", mock.Anything, orgID).Return([]*spaces.Space{{ID: spaceID, OrgID: orgID, Name: "SpaceUPD", Config: &spaces.Config{Features: []string{"feature"}}}}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается что кеш объекта был удален после обновления объекта.")
+
+			vl3, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидается что кеш объектов был удален после обновления объекта.")
+
+			sp.AssertExpectations(t)
+		})
+
+		t.Run("After Delete", func(t *testing.T) {
+			sp := &spmocks.Spaces{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(sp)
+
+			sp.On("Get", mock.Anything, spaceID).Return(&spaces.Space{ID: spaceID, OrgID: orgID, Name: "Space"}, nil).Once()
+			sp.On("List", mock.Anything, orgID).Return([]*spaces.Space{{ID: spaceID, OrgID: orgID, Name: "Space"}}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается при повторном запросе получение объекта из кэша.")
+
+			vl1, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается при повторном запросе получение объектов из кэша.")
+
+			sp.On("Delete", mock.Anything, spaceID).Return(nil).Once()
+
+			err = svc.Delete(ctx, spaceID)
+			require.NoError(t, err)
+
+			sp.On("Get", mock.Anything, spaceID).Return(nil, errNotFound).Once()
+			sp.On("List", mock.Anything, orgID).Return([]*spaces.Space{}, nil).Once()
+
+			_, err = svc.Get(ctx, spaceID)
+			require.Error(t, err)
+			assert.EqualError(t, err, "not found", "Ожидается что после удаления объекта кеш  был удален и получена ошибка от сервиса.")
+
+			vl3, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+			assert.Len(t, vl3, 0, "Ожидается что после удаления кеш объектов был удален.")
+
+			sp.AssertExpectations(t)
+		})
+
+		t.Run("After Create", func(t *testing.T) {
+			sp := &spmocks.Spaces{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(sp)
+
+			sp.On("List", mock.Anything, orgID).Return([]*spaces.Space{{ID: spaceID, OrgID: orgID, Name: "Space"}}, nil).Once()
+
+			vl1, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+
+			vl2, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+			assert.Same(t, vl1[0], vl2[0], "Ожидается при повторном запросе получение объектов из кэша.")
+
+			sp.On("Create", mock.Anything, mock.Anything).Return(&spaces.Space{ID: "spaceID2", OrgID: orgID, Name: "Space2"}, nil).Once()
+
+			_, err = svc.Create(ctx, &spaces.Space{ID: "spaceID2", OrgID: orgID, Name: "Space2"})
+			require.NoError(t, err)
+
+			sp.On("List", mock.Anything, orgID).Return([]*spaces.Space{{ID: spaceID, OrgID: orgID, Name: "Space"}, {ID: "spaceID2", OrgID: orgID, Name: "Space2"}}, nil).Once()
+
+			vl3, err := svc.List(ctx, orgID)
+			require.NoError(t, err)
+			assert.NotSame(t, vl2[0], vl3[0], "Ожидается что кеш объектов был удален после создания нового объекта.")
+
+			sp.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			sp := &spmocks.Spaces{}
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(sp)
+
+			sp.On("Get", mock.Anything, spaceID).Return(&spaces.Space{ID: spaceID, OrgID: orgID, Name: "Space"}, nil).Once()
+
+			v1, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается при повторном запросе получение объекта из кэша.")
+
+			time.Sleep(2 * ttl)
+			sp.On("Get", mock.Anything, spaceID).Return(&spaces.Space{ID: spaceID, OrgID: orgID, Name: "Space"}, nil).Once()
+
+			v3, err := svc.Get(ctx, spaceID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается удаление объекта из кэша по истечению ttl.")
+
+			sp.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/spaces/middleware/error_logging_middleware.go b/pkg/spaces/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..85b629ba32abe38e5030077e8fe81b04206650b0
--- /dev/null
+++ b/pkg/spaces/middleware/error_logging_middleware.go
@@ -0,0 +1,90 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/spaces -i Spaces -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/spaces"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements spaces.Spaces that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   spaces.Spaces
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the spaces.Spaces with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next spaces.Spaces) spaces.Spaces {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, space *spaces.Space) (created *spaces.Space, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, space)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, spaceId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, spaceId)
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, spaceId string) (space *spaces.Space, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, spaceId)
+}
+
+func (m *errorLoggingMiddleware) List(ctx context.Context, orgId string) (spaces []*spaces.Space, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.List(ctx, orgId)
+}
+
+func (m *errorLoggingMiddleware) Update(ctx context.Context, space *spaces.Space) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Update(ctx, space)
+}
+
+func (m *errorLoggingMiddleware) UpdateConfig(ctx context.Context, spaceId string, config *spaces.Config) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.UpdateConfig(ctx, spaceId, config)
+}
diff --git a/pkg/spaces/middleware/logging_middleware.go b/pkg/spaces/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..1471ea19535139210e79968e5a4522001a0b2ae3
--- /dev/null
+++ b/pkg/spaces/middleware/logging_middleware.go
@@ -0,0 +1,248 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/spaces -i Spaces -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/spaces"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements spaces.Spaces that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   spaces.Spaces
+}
+
+// LoggingMiddleware instruments an implementation of the spaces.Spaces with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next spaces.Spaces) spaces.Spaces {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, space *spaces.Space) (created *spaces.Space, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":   ctx,
+		"space": space} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	created, err = m.next.Create(ctx, space)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"created": created,
+		"err":     err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return created, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, spaceId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, spaceId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, spaceId string) (space *spaces.Space, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	space, err = m.next.Get(ctx, spaceId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"space": space,
+		"err":   err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return space, err
+}
+
+func (m *loggingMiddleware) List(ctx context.Context, orgId string) (spaces []*spaces.Space, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":   ctx,
+		"orgId": orgId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Request", fields...)
+
+	spaces, err = m.next.List(ctx, orgId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"spaces": spaces,
+		"err":    err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("List.Response", fields...)
+
+	return spaces, err
+}
+
+func (m *loggingMiddleware) Update(ctx context.Context, space *spaces.Space) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":   ctx,
+		"space": space} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Request", fields...)
+
+	err = m.next.Update(ctx, space)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) UpdateConfig(ctx context.Context, spaceId string, config *spaces.Config) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"spaceId": spaceId,
+		"config":  config} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("UpdateConfig.Request", fields...)
+
+	err = m.next.UpdateConfig(ctx, spaceId, config)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("UpdateConfig.Response", fields...)
+
+	return err
+}
diff --git a/pkg/spaces/middleware/middleware.go b/pkg/spaces/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..9d9d2243026f6c1152f7625e4c15bd26b5b7b5fc
--- /dev/null
+++ b/pkg/spaces/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/spaces -i Spaces -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/spaces"
+	"go.uber.org/zap"
+)
+
+type Middleware func(spaces.Spaces) spaces.Spaces
+
+func WithLog(s spaces.Spaces, logger *zap.Logger, log_access bool) spaces.Spaces {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Spaces")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/spaces/middleware/recovering_middleware.go b/pkg/spaces/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..5ca795bf5c188603794603ec11fc0c8a79479524
--- /dev/null
+++ b/pkg/spaces/middleware/recovering_middleware.go
@@ -0,0 +1,103 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/spaces -i Spaces -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/spaces"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements spaces.Spaces that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   spaces.Spaces
+}
+
+// RecoveringMiddleware instruments an implementation of the spaces.Spaces with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next spaces.Spaces) spaces.Spaces {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, space *spaces.Space) (created *spaces.Space, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, space)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, spaceId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, spaceId)
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, spaceId string) (space *spaces.Space, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, spaceId)
+}
+
+func (m *recoveringMiddleware) List(ctx context.Context, orgId string) (spaces []*spaces.Space, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.List(ctx, orgId)
+}
+
+func (m *recoveringMiddleware) Update(ctx context.Context, space *spaces.Space) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Update(ctx, space)
+}
+
+func (m *recoveringMiddleware) UpdateConfig(ctx context.Context, spaceId string, config *spaces.Config) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.UpdateConfig(ctx, spaceId, config)
+}
diff --git a/pkg/template/builder.go b/pkg/template/builder.go
new file mode 100644
index 0000000000000000000000000000000000000000..9a4ddcdb73e6b5c585d77088630096351d19b2bd
--- /dev/null
+++ b/pkg/template/builder.go
@@ -0,0 +1,171 @@
+package template
+
+import (
+	"bytes"
+	"context"
+	"text/template"
+
+	"git.perx.ru/perxis/perxis-go/pkg/content"
+)
+
+type Builder struct {
+	ctx     context.Context
+	cnt     *content.Content
+	SpaceID string
+	EnvID   string
+	funcs   template.FuncMap
+	data    map[string]interface{}
+}
+
+func NewBuilder(cnt *content.Content, space, env string) *Builder {
+	return &Builder{
+		ctx:     context.Background(),
+		cnt:     cnt,
+		SpaceID: space,
+		EnvID:   env,
+		funcs:   make(template.FuncMap),
+	}
+}
+
+func (b *Builder) getFuncs() template.FuncMap {
+	return template.FuncMap{
+		"lookup": getLookup(b),
+		"system": getSystem(b),
+	}
+}
+
+func (b *Builder) WithData(data map[string]interface{}) *Builder {
+	bld := *b
+	bld.data = data
+	return &bld
+}
+
+func (b *Builder) WithKV(kv ...any) *Builder {
+	bld := *b
+	if bld.data == nil {
+		bld.data = make(map[string]interface{}, 10)
+	}
+	for i := 0; i < len(kv)-1; i += 2 {
+		k, _ := kv[i].(string)
+		v := kv[i+1]
+		if k != "" && v != nil {
+			bld.data[k] = v
+		}
+	}
+	return &bld
+}
+
+func (b *Builder) GetData() map[string]interface{} {
+	return b.data
+}
+
+func (b *Builder) WithSpace(space, env string) *Builder {
+	bld := *b
+	bld.SpaceID = space
+	bld.EnvID = env
+	return &bld
+}
+
+func (b *Builder) WithContext(ctx context.Context) *Builder {
+	bld := *b
+	bld.ctx = ctx
+	return &bld
+}
+
+func (b *Builder) Context() context.Context {
+	return b.ctx
+}
+
+func (b *Builder) Template() *template.Template {
+	return template.New("main").Funcs(b.getFuncs())
+}
+
+func (b *Builder) Execute(str string, data ...any) (string, error) {
+	t := b.Template()
+	buf := new(bytes.Buffer)
+	t, err := t.Parse(str)
+	if err != nil {
+		return "", err
+	}
+	if err = t.Execute(buf, b.getData(data...)); err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+func (b *Builder) ExecuteList(str []string, data ...any) ([]string, error) {
+	t := b.Template()
+	result := make([]string, len(str))
+	buffer := new(bytes.Buffer)
+	for i, tmpl := range str {
+		if tmpl == "" {
+			continue
+		}
+		t, err := t.Parse(tmpl)
+		if err != nil {
+			return []string{}, err
+		}
+		if err = t.Execute(buffer, b.getData(data...)); err != nil {
+			return []string{}, err
+		}
+		result[i] = buffer.String()
+		buffer.Reset()
+	}
+	return result, nil
+}
+
+func (b *Builder) ExecuteMap(str map[string]interface{}, data ...any) (map[string]interface{}, error) {
+	result := make(map[string]interface{}, len(str))
+	for k, v := range str {
+		switch t := v.(type) {
+		case string:
+			value, err := b.Execute(t, data...)
+			if err != nil {
+				return nil, err
+			}
+			v = value
+		case []string:
+			values, err := b.ExecuteList(append([]string{k}, t...), data...)
+			if err != nil {
+				return nil, err
+			}
+			k = values[0]
+			vv := make([]interface{}, 0, len(t))
+			for _, val := range values[1:] {
+				vv = append(vv, val)
+			}
+			v = vv
+		}
+
+		result[k] = v
+	}
+	return result, nil
+}
+
+func (b *Builder) getData(data ...any) any {
+	if len(data) == 0 {
+		return b.data
+	}
+
+	var res map[string]interface{}
+	for _, v := range data {
+		if m, ok := v.(map[string]interface{}); ok && b.data != nil {
+			res = mergeMaps(b.data, m)
+		}
+	}
+	if res != nil {
+		return res
+	}
+
+	return data[0]
+}
+
+func mergeMaps(in ...map[string]interface{}) map[string]interface{} {
+	out := make(map[string]interface{})
+	for _, i := range in {
+		for k, v := range i {
+			out[k] = v
+		}
+	}
+	return out
+}
diff --git a/pkg/template/builder_test.go b/pkg/template/builder_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8e2b34440dd73259a6ccd3ce9202a556caa5298
--- /dev/null
+++ b/pkg/template/builder_test.go
@@ -0,0 +1,272 @@
+package template
+
+import (
+	"context"
+	"errors"
+	"testing"
+	"text/template"
+
+	"git.perx.ru/perxis/perxis-go/pkg/content"
+	"git.perx.ru/perxis/perxis-go/pkg/items"
+	mocksitems "git.perx.ru/perxis/perxis-go/pkg/items/mocks"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestBuilder_Execute(t *testing.T) {
+	tests := []struct {
+		name    string
+		ctx     context.Context
+		cnt     *content.Content
+		SpaceID string
+		EnvID   string
+		funcs   template.FuncMap
+		str     string
+		data    any
+		want    any
+		wantErr bool
+
+		itemsCall func(itemsSvc *mocksitems.Items)
+	}{
+		{name: "error", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "hello {{ .a }}", data: "world", want: "", wantErr: true},
+		{name: "empty", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "", data: "", want: "", wantErr: false},
+		{name: "#1", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "hello {{ . }}", data: "world", want: "hello world", wantErr: false},
+		{name: "#2", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "{{ . }}", data: "world", want: "world", wantErr: false},
+		{name: "#3 ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "", data: "world", want: "", wantErr: false},
+		{name: "#4 ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "hello", data: "world", want: "hello", wantErr: false},
+		{name: "lookup", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "hello, {{ lookup \"secrets.dev.key\" }}", data: "", want: "hello, Luk", wantErr: false, itemsCall: func(itemsSvc *mocksitems.Items) {
+			itemsSvc.On("Get", context.Background(), "space", "env", "secrets", "dev").Return(&items.Item{
+				ID:           "dev",
+				SpaceID:      "space",
+				EnvID:        "env",
+				CollectionID: "secrets",
+				Data: map[string]interface{}{
+					"id":  "dev",
+					"key": "Luk",
+				},
+			}, nil).Once()
+		}},
+		{name: "lookup with slice", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "numbers {{ lookup \"secrets.dev.slice\" }}", data: "", want: "numbers [1 2 3]", wantErr: false, itemsCall: func(itemsSvc *mocksitems.Items) {
+			itemsSvc.On("Get", context.Background(), "space", "env", "secrets", "dev").Return(&items.Item{
+				ID:           "dev",
+				SpaceID:      "space",
+				EnvID:        "env",
+				CollectionID: "secrets",
+				Data: map[string]interface{}{
+					"id":    "dev",
+					"slice": []int{1, 2, 3},
+				},
+			}, nil).Once()
+		}},
+		{name: "lookup with empty Data", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "numbers {{ lookup \"secrets.dev.slice\" }}", data: "", want: "numbers <no value>", wantErr: false, itemsCall: func(itemsSvc *mocksitems.Items) {
+			itemsSvc.On("Get", context.Background(), "space", "env", "secrets", "dev").Return(&items.Item{
+				ID:           "dev",
+				SpaceID:      "space",
+				EnvID:        "env",
+				CollectionID: "secrets",
+				Data:         map[string]interface{}{},
+			}, nil).Once()
+		}},
+		{name: "lookup with incorrect field", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "hello {{ lookup \"secrets.dev.incorrect\" }}", data: "", want: "hello <no value>", wantErr: false, itemsCall: func(itemsSvc *mocksitems.Items) {
+			itemsSvc.On("Get", context.Background(), "space", "env", "secrets", "dev").Return(&items.Item{
+				ID:           "dev",
+				SpaceID:      "space",
+				EnvID:        "env",
+				CollectionID: "secrets",
+				Data: map[string]interface{}{
+					"id":  "dev",
+					"key": "1234",
+				},
+			}, nil).Once()
+		}},
+		{name: "lookup not found", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "hello {{ lookup \"secrets.prod.pass\" }}", data: "", want: "", wantErr: true, itemsCall: func(itemsSvc *mocksitems.Items) {
+			itemsSvc.On("Get", context.Background(), "space", "env", "secrets", "prod").Return(nil, errors.New("not found")).Once()
+		}},
+		{name: "lookup without itemID", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "hello {{ lookup \"secrets.pass\" }}", data: "", want: "", wantErr: true},
+		{name: "system ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: "hello {{ system.SpaceID }}", data: "", want: "hello space", wantErr: false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			itemsSvc := &mocksitems.Items{}
+			if tt.itemsCall != nil {
+				tt.itemsCall(itemsSvc)
+			}
+			tt.cnt = &content.Content{
+				Items: itemsSvc,
+			}
+			b := &Builder{
+				ctx:     tt.ctx,
+				cnt:     tt.cnt,
+				SpaceID: tt.SpaceID,
+				EnvID:   tt.EnvID,
+				funcs:   tt.funcs,
+			}
+
+			got, err := b.Execute(tt.str, tt.data)
+			if tt.wantErr == true {
+				assert.Error(t, err)
+			} else {
+				assert.NoError(t, err)
+			}
+			assert.Equal(t, tt.want, got)
+			if tt.itemsCall != nil {
+				itemsSvc.AssertExpectations(t)
+			}
+		})
+	}
+}
+
+func TestBuilder_ExecuteList(t *testing.T) {
+	tests := []struct {
+		name    string
+		ctx     context.Context
+		cnt     *content.Content
+		SpaceID string
+		EnvID   string
+		funcs   template.FuncMap
+		str     []string
+		data    any
+		want    []string
+		wantErr bool
+
+		itemsCall func(itemsSvc *mocksitems.Items)
+	}{
+		{name: "error", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: []string{"hello { . }}", "go {{ . }"}, data: "world", want: []string{}, wantErr: true},
+		{name: "empty", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: []string{""}, data: "world", want: []string{""}, wantErr: false},
+		{name: "#1", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: []string{"hello {{ . }}", "go {{ . }}"}, data: "world", want: []string{"hello world", "go world"}, wantErr: false},
+		{name: "#2", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: []string{"{{ . }}"}, data: "world", want: []string{"world"}, wantErr: false},
+		{name: "#3 ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: []string{""}, data: "world", want: []string{""}, wantErr: false},
+		{name: "#4 ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: []string{"hello"}, data: "world", want: []string{"hello"}, wantErr: false},
+		{name: "lookup", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: []string{"hello {{ lookup \"secrets.dev.key\" }}"}, data: "", want: []string{"hello 1234"}, wantErr: false, itemsCall: func(itemsSvc *mocksitems.Items) {
+			itemsSvc.On("Get", context.Background(), "space", "env", "secrets", "dev").Return(&items.Item{
+				ID:           "dev",
+				SpaceID:      "space",
+				EnvID:        "env",
+				CollectionID: "secrets",
+				Data: map[string]interface{}{
+					"id":  "dev",
+					"key": "1234",
+				},
+			}, nil).Once()
+		}},
+		{name: "lookup with incorrect field", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: []string{"hello {{ lookup \"secrets.dev.incorrect\" }}"}, data: "", want: []string{"hello <no value>"}, wantErr: false, itemsCall: func(itemsSvc *mocksitems.Items) {
+			itemsSvc.On("Get", context.Background(), "space", "env", "secrets", "dev").Return(&items.Item{
+				ID:           "dev",
+				SpaceID:      "space",
+				EnvID:        "env",
+				CollectionID: "secrets",
+				Data: map[string]interface{}{
+					"id":  "dev",
+					"key": "1234",
+				},
+			}, nil).Once()
+		}},
+		{name: "system ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: []string{"hello {{ system.SpaceID }}"}, data: "", want: []string{"hello space"}, wantErr: false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			itemsSvc := &mocksitems.Items{}
+			if tt.itemsCall != nil {
+				tt.itemsCall(itemsSvc)
+			}
+			tt.cnt = &content.Content{
+				Items: itemsSvc,
+			}
+			b := &Builder{
+				ctx:     tt.ctx,
+				cnt:     tt.cnt,
+				SpaceID: tt.SpaceID,
+				EnvID:   tt.EnvID,
+				funcs:   tt.funcs,
+			}
+
+			got, err := b.ExecuteList(tt.str, tt.data)
+			if tt.wantErr == true {
+				assert.Error(t, err)
+			} else {
+				assert.NoError(t, err)
+			}
+			assert.Equal(t, tt.want, got)
+			if tt.itemsCall != nil {
+				itemsSvc.AssertExpectations(t)
+			}
+		})
+	}
+}
+
+func TestBuilder_ExecuteMap(t *testing.T) {
+	tests := []struct {
+		name    string
+		ctx     context.Context
+		cnt     *content.Content
+		SpaceID string
+		EnvID   string
+		funcs   template.FuncMap
+		str     map[string]interface{}
+		data    any
+		want    map[string]interface{}
+		wantErr bool
+
+		itemsCall func(itemsSvc *mocksitems.Items)
+	}{
+		{name: "error", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: map[string]interface{}{"hello": "{{ . }"}, data: "world", want: nil, wantErr: true},
+		{name: "empty", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: map[string]interface{}{}, data: "", want: map[string]interface{}{}, wantErr: false},
+		{name: "#1", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: map[string]interface{}{"hello": "{{ . }}"}, data: "world", want: map[string]interface{}{"hello": "world"}, wantErr: false},
+		{name: "#2", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: map[string]interface{}{"hello": "{{ . }}", "go": "{{ . }}"}, data: "world", want: map[string]interface{}{"hello": "world", "go": "world"}, wantErr: false},
+		{name: "#3 ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: map[string]interface{}{}, data: "world", want: map[string]interface{}{}, wantErr: false},
+		{name: "#4 ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: map[string]interface{}{"a": "b"}, data: "world", want: map[string]interface{}{"a": "b"}, wantErr: false},
+		{name: "lookup ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: map[string]interface{}{"hello": "{{ lookup \"secrets.dev.key\" }}"}, data: "", want: map[string]interface{}{"hello": "1234"}, wantErr: false, itemsCall: func(itemsSvc *mocksitems.Items) {
+			itemsSvc.On("Get", context.Background(), "space", "env", "secrets", "dev").Return(&items.Item{
+				ID:           "dev",
+				SpaceID:      "space",
+				EnvID:        "env",
+				CollectionID: "secrets",
+				Data: map[string]interface{}{
+					"id":  "dev",
+					"key": "1234",
+				},
+			}, nil).Once()
+		}},
+		{name: "lookup with incorrect field", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: map[string]interface{}{"hello": "{{ lookup \"secrets.dev.incorrect\" }}"}, data: "", want: map[string]interface{}{"hello": "<no value>"}, wantErr: false, itemsCall: func(itemsSvc *mocksitems.Items) {
+			itemsSvc.On("Get", context.Background(), "space", "env", "secrets", "dev").Return(&items.Item{
+				ID:           "dev",
+				SpaceID:      "space",
+				EnvID:        "env",
+				CollectionID: "secrets",
+				Data: map[string]interface{}{
+					"id":  "dev",
+					"key": "1234",
+				},
+			}, nil).Once()
+		}},
+		{name: "system ", ctx: context.Background(), cnt: &content.Content{}, SpaceID: "space", EnvID: "env", funcs: template.FuncMap{}, str: map[string]interface{}{"hello": "{{ system.SpaceID }}"}, data: "", want: map[string]interface{}{"hello": "space"}, wantErr: false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			itemsSvc := &mocksitems.Items{}
+			if tt.itemsCall != nil {
+				tt.itemsCall(itemsSvc)
+			}
+			tt.cnt = &content.Content{
+				Items: itemsSvc,
+			}
+			b := &Builder{
+				ctx:     tt.ctx,
+				cnt:     tt.cnt,
+				SpaceID: tt.SpaceID,
+				EnvID:   tt.EnvID,
+				funcs:   tt.funcs,
+			}
+
+			got, err := b.ExecuteMap(tt.str, tt.data)
+			if tt.wantErr == true {
+				assert.Error(t, err)
+			} else {
+				assert.NoError(t, err)
+			}
+			assert.Equal(t, tt.want, got)
+			if tt.itemsCall != nil {
+				itemsSvc.AssertExpectations(t)
+			}
+		})
+	}
+}
diff --git a/pkg/template/funcs.go b/pkg/template/funcs.go
new file mode 100644
index 0000000000000000000000000000000000000000..0c320ad139e964f002b691ce097b24e70e6cfaf3
--- /dev/null
+++ b/pkg/template/funcs.go
@@ -0,0 +1,43 @@
+package template
+
+import (
+	"strings"
+
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+)
+
+// getLookup возвращает функцию для шаблонизатора для получения значений из записи коллекции
+// name указывается в виде "<collection id>.<item id>.<field>"
+// Использование в шаблонах:  {{ lookup "secrets.key.value" }}
+func getLookup(b *Builder) any {
+	return func(name string) (any, error) {
+		parsedName := strings.Split(name, ".")
+		if len(parsedName) < 3 {
+			return "", errors.Errorf("incorrect parameter \"%s\"", name)
+		}
+
+		collectionID := parsedName[0]
+		itemID := parsedName[1]
+		field := parsedName[2]
+		item, err := b.cnt.Items.Get(b.Context(), b.SpaceID, b.EnvID, collectionID, itemID)
+		if err != nil {
+			return "", errors.Wrapf(err, "failed to get \"%s\"")
+		}
+
+		if len(item.Data) > 0 {
+			if v, ok := item.Data[field]; ok {
+				return v, nil
+			}
+		}
+
+		return nil, nil
+	}
+}
+
+// getSys возвращает функцию получения System
+// Использование в шаблонах: {{ system.SpaceID }}
+func getSystem(b *Builder) any {
+	return func() *System {
+		return &System{builder: b}
+	}
+}
diff --git a/pkg/template/system.go b/pkg/template/system.go
new file mode 100644
index 0000000000000000000000000000000000000000..8f8548eb11444e72ad4f003b9225f4d4a38e4e2a
--- /dev/null
+++ b/pkg/template/system.go
@@ -0,0 +1,13 @@
+package template
+
+type System struct {
+	builder *Builder
+}
+
+func (s *System) SpaceID() string {
+	return s.builder.SpaceID
+}
+
+func (s *System) EnvID() string {
+	return s.builder.EnvID
+}
diff --git a/pkg/users/middleware/caching_middleware.go b/pkg/users/middleware/caching_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..6dc04868c75edcd77ba9bb2b3adda3f1ff3022f0
--- /dev/null
+++ b/pkg/users/middleware/caching_middleware.go
@@ -0,0 +1,91 @@
+package service
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	services "git.perx.ru/perxis/perxis-go/pkg/options"
+	service "git.perx.ru/perxis/perxis-go/pkg/users"
+)
+
+func CachingMiddleware(cache *cache.Cache) Middleware {
+	return func(next service.Users) service.Users {
+		return &cachingMiddleware{
+			cache: cache,
+			next:  next,
+		}
+	}
+}
+
+type cachingMiddleware struct {
+	cache *cache.Cache
+	next  service.Users
+}
+
+func (m cachingMiddleware) Create(ctx context.Context, create *service.User) (user *service.User, err error) {
+	return m.next.Create(ctx, create)
+}
+
+func (m cachingMiddleware) Get(ctx context.Context, userId string) (user *service.User, err error) {
+
+	value, e := m.cache.Get(userId)
+	if e == nil {
+		return value.(*service.User), err
+	}
+	user, err = m.next.Get(ctx, userId)
+	if err == nil {
+		m.cache.Set(user.ID, user)
+		for _, i := range user.Identities {
+			m.cache.Set(i, user)
+		}
+	}
+	return user, err
+}
+
+func (m cachingMiddleware) Find(ctx context.Context, filter *service.Filter, options *services.FindOptions) (users []*service.User, total int, err error) {
+	return m.next.Find(ctx, filter, options)
+}
+
+func (m cachingMiddleware) Update(ctx context.Context, update *service.User) (err error) {
+
+	err = m.next.Update(ctx, update)
+	value, e := m.cache.Get(update.ID)
+	if err == nil && e == nil {
+		usr := value.(*service.User)
+		m.cache.Remove(usr.ID)
+		for _, i := range usr.Identities {
+			m.cache.Remove(i)
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) Delete(ctx context.Context, userId string) (err error) {
+
+	err = m.next.Delete(ctx, userId)
+	value, e := m.cache.Get(userId)
+	if err == nil && e == nil {
+		usr := value.(*service.User)
+		m.cache.Remove(usr.ID)
+		for _, i := range usr.Identities {
+			m.cache.Remove(i)
+		}
+	}
+	return err
+}
+
+func (m cachingMiddleware) GetByIdentity(ctx context.Context, identity string) (user *service.User, err error) {
+
+	value, e := m.cache.Get(identity)
+	if e == nil {
+		return value.(*service.User), err
+	}
+	user, err = m.next.GetByIdentity(ctx, identity)
+	if err == nil {
+		m.cache.Set(user.ID, user)
+		for _, i := range user.Identities {
+			m.cache.Set(i, user)
+		}
+	}
+	return user, err
+}
diff --git a/pkg/users/middleware/caching_middleware_test.go b/pkg/users/middleware/caching_middleware_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..7ad09b52fdc003eac069005a8cdb956dd58949a7
--- /dev/null
+++ b/pkg/users/middleware/caching_middleware_test.go
@@ -0,0 +1,165 @@
+package service
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/cache"
+	"git.perx.ru/perxis/perxis-go/pkg/errors"
+	"git.perx.ru/perxis/perxis-go/pkg/users"
+	"git.perx.ru/perxis/perxis-go/pkg/users/mocks"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
+)
+
+func TestUsersCache(t *testing.T) {
+
+	const (
+		userID   = "user_id"
+		identity = "user identity"
+		size     = 5
+		ttl      = 20 * time.Millisecond
+	)
+
+	errNotFound := errors.NotFound(errors.New("not found"))
+
+	t.Run("Get from cache", func(t *testing.T) {
+		usrs := &mocks.Users{}
+		ctx := context.Background()
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(usrs)
+
+		usrs.On("Get", mock.Anything, userID).Return(&users.User{ID: userID, Name: "User", Identities: []string{identity}}, nil).Once()
+
+		v1, err := svc.Get(ctx, userID)
+		require.NoError(t, err)
+
+		v2, err := svc.Get(ctx, userID)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+		v3, err := svc.GetByIdentity(ctx, identity)
+		require.NoError(t, err)
+		assert.Same(t, v2, v3, "Ожидается получение объекта из кэша при запросе по Identity.")
+
+		usrs.AssertExpectations(t)
+	})
+
+	t.Run("GetByIdentity from cache", func(t *testing.T) {
+		usrs := &mocks.Users{}
+		ctx := context.Background()
+
+		svc := CachingMiddleware(cache.NewCache(size, ttl))(usrs)
+
+		usrs.On("GetByIdentity", mock.Anything, identity).Return(&users.User{ID: userID, Name: "User", Identities: []string{identity}}, nil).Once()
+
+		v1, err := svc.GetByIdentity(ctx, identity)
+		require.NoError(t, err)
+
+		v2, err := svc.GetByIdentity(ctx, identity)
+		require.NoError(t, err)
+		assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+		v3, err := svc.Get(ctx, userID)
+		require.NoError(t, err)
+		assert.Same(t, v2, v3, "Ожидается получение объекта из кэша при запросе по userID.")
+
+		usrs.AssertExpectations(t)
+	})
+
+	t.Run("Invalidate Cache", func(t *testing.T) {
+		t.Run("After Update", func(t *testing.T) {
+			usrs := &mocks.Users{}
+			ctx := context.Background()
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(usrs)
+
+			usrs.On("Get", mock.Anything, userID).Return(&users.User{ID: userID, Name: "User", Identities: []string{identity}}, nil).Once()
+			usrs.On("Update", mock.Anything, mock.Anything).Return(nil).Once()
+
+			v1, err := svc.Get(ctx, userID)
+			require.NoError(t, err)
+
+			v2, err := svc.GetByIdentity(ctx, identity)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			err = svc.Update(ctx, &users.User{ID: userID, Name: "New User", Identities: []string{identity}})
+			require.NoError(t, err)
+
+			usrs.On("GetByIdentity", mock.Anything, identity).Return(&users.User{ID: userID, Name: "New User", Identities: []string{identity}}, nil).Once()
+
+			v3, err := svc.GetByIdentity(ctx, identity)
+			require.NoError(t, err)
+			assert.NotSame(t, v3, v2, "Ожидается удаление объекта из кеша после обновления и получение его заново из сервиса.")
+
+			v4, err := svc.Get(ctx, userID)
+			require.NoError(t, err)
+			assert.NotSame(t, v4, v2)
+			assert.Same(t, v4, v3, "Ожидается получение нового обьекта из кеша.")
+
+			usrs.AssertExpectations(t)
+		})
+
+		t.Run("After Delete", func(t *testing.T) {
+			usrs := &mocks.Users{}
+			ctx := context.Background()
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(usrs)
+
+			usrs.On("Get", mock.Anything, userID).Return(&users.User{ID: userID, Name: "User", Identities: []string{identity}}, nil).Once()
+			usrs.On("Delete", mock.Anything, mock.Anything).Return(nil).Once()
+
+			v1, err := svc.Get(ctx, userID)
+			require.NoError(t, err)
+
+			v2, err := svc.GetByIdentity(ctx, identity)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			err = svc.Delete(ctx, userID)
+			require.NoError(t, err)
+
+			usrs.On("GetByIdentity", mock.Anything, identity).Return(nil, errNotFound).Once()
+			usrs.On("Get", mock.Anything, userID).Return(nil, errNotFound).Once()
+
+			_, err = svc.GetByIdentity(ctx, identity)
+			require.Error(t, err)
+			assert.EqualErrorf(t, err, "not found", "Ожидается удаление объекта из кеша после удаления из хранилища и получение ошибки от сервиса.")
+
+			_, err = svc.Get(ctx, userID)
+			require.Error(t, err)
+			assert.EqualErrorf(t, err, "not found", "Ожидается удаление объекта из кеша после удаления из хранилища и получение ошибки от сервиса.")
+
+			usrs.AssertExpectations(t)
+		})
+
+		t.Run("After TTL expired", func(t *testing.T) {
+			usrs := &mocks.Users{}
+			ctx := context.Background()
+
+			svc := CachingMiddleware(cache.NewCache(size, ttl))(usrs)
+
+			usrs.On("Get", mock.Anything, userID).Return(&users.User{ID: userID, Name: "User", Identities: []string{identity}}, nil).Once()
+
+			v1, err := svc.Get(ctx, userID)
+			require.NoError(t, err)
+
+			v2, err := svc.Get(ctx, userID)
+			require.NoError(t, err)
+			assert.Same(t, v1, v2, "Ожидается получение объекта из кэша.")
+
+			time.Sleep(2 * ttl)
+
+			usrs.On("Get", mock.Anything, userID).Return(&users.User{ID: userID, Name: "User", Identities: []string{identity}}, nil).Once()
+
+			v3, err := svc.Get(ctx, userID)
+			require.NoError(t, err)
+			assert.NotSame(t, v2, v3, "Ожидается получение объекта из кэша при запросе по Identity.")
+
+			usrs.AssertExpectations(t)
+		})
+	})
+}
diff --git a/pkg/users/middleware/error_logging_middleware.go b/pkg/users/middleware/error_logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..a9084fa7a05608e45f7c5436934b5738c685a0e9
--- /dev/null
+++ b/pkg/users/middleware/error_logging_middleware.go
@@ -0,0 +1,91 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/error_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/users -i Users -t ../../../assets/templates/middleware/error_log -o error_logging_middleware.go -l ""
+
+import (
+	"context"
+
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"git.perx.ru/perxis/perxis-go/pkg/users"
+	"go.uber.org/zap"
+)
+
+// errorLoggingMiddleware implements users.Users that is instrumented with logging
+type errorLoggingMiddleware struct {
+	logger *zap.Logger
+	next   users.Users
+}
+
+// ErrorLoggingMiddleware instruments an implementation of the users.Users with simple logging
+func ErrorLoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next users.Users) users.Users {
+		return &errorLoggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *errorLoggingMiddleware) Create(ctx context.Context, create *users.User) (user *users.User, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Create(ctx, create)
+}
+
+func (m *errorLoggingMiddleware) Delete(ctx context.Context, userId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Delete(ctx, userId)
+}
+
+func (m *errorLoggingMiddleware) Find(ctx context.Context, filter *users.Filter, options *options.FindOptions) (users []*users.User, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Find(ctx, filter, options)
+}
+
+func (m *errorLoggingMiddleware) Get(ctx context.Context, userId string) (user *users.User, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Get(ctx, userId)
+}
+
+func (m *errorLoggingMiddleware) GetByIdentity(ctx context.Context, identity string) (user *users.User, err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.GetByIdentity(ctx, identity)
+}
+
+func (m *errorLoggingMiddleware) Update(ctx context.Context, update *users.User) (err error) {
+	logger := m.logger
+	defer func() {
+		if err != nil {
+			logger.Warn("response error", zap.Error(err))
+		}
+	}()
+	return m.next.Update(ctx, update)
+}
diff --git a/pkg/users/middleware/logging_middleware.go b/pkg/users/middleware/logging_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..1fcae0626b75992ae563ff87adfd4e33edd49af6
--- /dev/null
+++ b/pkg/users/middleware/logging_middleware.go
@@ -0,0 +1,251 @@
+package service
+
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/access_log
+// gowrap: http://github.com/hexdigest/gowrap
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/users -i Users -t ../../../assets/templates/middleware/access_log -o logging_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"git.perx.ru/perxis/perxis-go/pkg/auth"
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"git.perx.ru/perxis/perxis-go/pkg/users"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// loggingMiddleware implements users.Users that is instrumented with logging
+type loggingMiddleware struct {
+	logger *zap.Logger
+	next   users.Users
+}
+
+// LoggingMiddleware instruments an implementation of the users.Users with simple logging
+func LoggingMiddleware(logger *zap.Logger) Middleware {
+	return func(next users.Users) users.Users {
+		return &loggingMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *loggingMiddleware) Create(ctx context.Context, create *users.User) (user *users.User, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"create": create} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Request", fields...)
+
+	user, err = m.next.Create(ctx, create)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"user": user,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Create.Response", fields...)
+
+	return user, err
+}
+
+func (m *loggingMiddleware) Delete(ctx context.Context, userId string) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"userId": userId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Request", fields...)
+
+	err = m.next.Delete(ctx, userId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Delete.Response", fields...)
+
+	return err
+}
+
+func (m *loggingMiddleware) Find(ctx context.Context, filter *users.Filter, options *options.FindOptions) (users []*users.User, total int, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":     ctx,
+		"filter":  filter,
+		"options": options} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Find.Request", fields...)
+
+	users, total, err = m.next.Find(ctx, filter, options)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"users": users,
+		"total": total,
+		"err":   err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Find.Response", fields...)
+
+	return users, total, err
+}
+
+func (m *loggingMiddleware) Get(ctx context.Context, userId string) (user *users.User, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"userId": userId} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Request", fields...)
+
+	user, err = m.next.Get(ctx, userId)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"user": user,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Get.Response", fields...)
+
+	return user, err
+}
+
+func (m *loggingMiddleware) GetByIdentity(ctx context.Context, identity string) (user *users.User, err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":      ctx,
+		"identity": identity} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("GetByIdentity.Request", fields...)
+
+	user, err = m.next.GetByIdentity(ctx, identity)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"user": user,
+		"err":  err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("GetByIdentity.Response", fields...)
+
+	return user, err
+}
+
+func (m *loggingMiddleware) Update(ctx context.Context, update *users.User) (err error) {
+	begin := time.Now()
+	var fields []zapcore.Field
+	for k, v := range map[string]interface{}{
+		"ctx":    ctx,
+		"update": update} {
+		if k == "ctx" {
+			fields = append(fields, zap.String("principal", fmt.Sprint(auth.GetPrincipal(ctx))))
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Request", fields...)
+
+	err = m.next.Update(ctx, update)
+
+	fields = []zapcore.Field{
+		zap.Duration("time", time.Since(begin)),
+		zap.Error(err),
+	}
+
+	for k, v := range map[string]interface{}{
+		"err": err} {
+		if k == "err" {
+			continue
+		}
+		fields = append(fields, zap.Reflect(k, v))
+	}
+
+	m.logger.Debug("Update.Response", fields...)
+
+	return err
+}
diff --git a/pkg/users/middleware/middleware.go b/pkg/users/middleware/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..2888f263ca2083bdea91e3c3e62cde40f85e974f
--- /dev/null
+++ b/pkg/users/middleware/middleware.go
@@ -0,0 +1,28 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/middleware
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/users -i Users -t ../../../assets/templates/middleware/middleware -o middleware.go -l ""
+
+import (
+	"git.perx.ru/perxis/perxis-go/pkg/users"
+	"go.uber.org/zap"
+)
+
+type Middleware func(users.Users) users.Users
+
+func WithLog(s users.Users, logger *zap.Logger, log_access bool) users.Users {
+	if logger == nil {
+		logger = zap.NewNop()
+	}
+
+	logger = logger.Named("Users")
+	s = ErrorLoggingMiddleware(logger)(s)
+	if log_access {
+		s = LoggingMiddleware(logger)(s)
+	}
+	s = RecoveringMiddleware(logger)(s)
+	return s
+}
diff --git a/pkg/users/middleware/recovering_middleware.go b/pkg/users/middleware/recovering_middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..57c401b12c0b56e9f40109570c6efe59bac8d902
--- /dev/null
+++ b/pkg/users/middleware/recovering_middleware.go
@@ -0,0 +1,104 @@
+// Code generated by gowrap. DO NOT EDIT.
+// template: ../../../assets/templates/middleware/recovery
+// gowrap: http://github.com/hexdigest/gowrap
+
+package service
+
+//go:generate gowrap gen -p git.perx.ru/perxis/perxis-go/pkg/users -i Users -t ../../../assets/templates/middleware/recovery -o recovering_middleware.go -l ""
+
+import (
+	"context"
+	"fmt"
+
+	"git.perx.ru/perxis/perxis-go/pkg/options"
+	"git.perx.ru/perxis/perxis-go/pkg/users"
+	"go.uber.org/zap"
+)
+
+// recoveringMiddleware implements users.Users that is instrumented with logging
+type recoveringMiddleware struct {
+	logger *zap.Logger
+	next   users.Users
+}
+
+// RecoveringMiddleware instruments an implementation of the users.Users with simple logging
+func RecoveringMiddleware(logger *zap.Logger) Middleware {
+	return func(next users.Users) users.Users {
+		return &recoveringMiddleware{
+			next:   next,
+			logger: logger,
+		}
+	}
+}
+
+func (m *recoveringMiddleware) Create(ctx context.Context, create *users.User) (user *users.User, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Create(ctx, create)
+}
+
+func (m *recoveringMiddleware) Delete(ctx context.Context, userId string) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Delete(ctx, userId)
+}
+
+func (m *recoveringMiddleware) Find(ctx context.Context, filter *users.Filter, options *options.FindOptions) (users []*users.User, total int, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Find(ctx, filter, options)
+}
+
+func (m *recoveringMiddleware) Get(ctx context.Context, userId string) (user *users.User, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Get(ctx, userId)
+}
+
+func (m *recoveringMiddleware) GetByIdentity(ctx context.Context, identity string) (user *users.User, err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.GetByIdentity(ctx, identity)
+}
+
+func (m *recoveringMiddleware) Update(ctx context.Context, update *users.User) (err error) {
+	logger := m.logger
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic", zap.Error(fmt.Errorf("%v", r)))
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	return m.next.Update(ctx, update)
+}
diff --git a/pkg/version/version.go b/pkg/version/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..e7f3817890c2cd3ed84a1e5c70ad42dfdf7239ad
--- /dev/null
+++ b/pkg/version/version.go
@@ -0,0 +1,17 @@
+package version
+
+import (
+	"fmt"
+)
+
+type Version struct {
+	ServerVersion string
+	APIVersion    string
+	Commit        string
+	BuildTime     string
+	BuildNumber   int
+}
+
+func (v *Version) String() string {
+	return fmt.Sprintf("%s-api%s-%s build: %d (%s)", v.ServerVersion, v.APIVersion, v.Commit, v.BuildNumber, v.BuildTime)
+}