Compare commits
25 Commits
main
...
058aff4019
| Author | SHA1 | Date | |
|---|---|---|---|
|
058aff4019
|
|||
|
6845219e94
|
|||
|
99764eb91f
|
|||
|
644a94656a
|
|||
|
cd98d1f4a2
|
|||
|
f34539c06b
|
|||
|
5192658607
|
|||
|
7b670947c3
|
|||
|
aaf227f25a
|
|||
|
efa2bec38b
|
|||
|
8fe593bb6f
|
|||
|
223a63ee6d
|
|||
|
19c02d3573
|
|||
|
1f837afadc
|
|||
|
3489703ba7
|
|||
|
0a9285c05e
|
|||
|
b14a04c163
|
|||
|
047e8d2df0
|
|||
|
65b1daa52c
|
|||
|
fb2ae39b47
|
|||
|
3efa753394
|
|||
|
f7c1e461e6
|
|||
|
b05058b5cd
|
|||
|
9d04f43104
|
|||
|
13744f0500
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,3 +2,5 @@ grpc/google
|
|||||||
grpc/grafeas
|
grpc/grafeas
|
||||||
|
|
||||||
.idea
|
.idea
|
||||||
|
|
||||||
|
coverage.*
|
||||||
33
Makefile
33
Makefile
@@ -6,7 +6,7 @@ download-third-party:
|
|||||||
mv ./grpc/third_party/googleapis/grafeas ./grpc
|
mv ./grpc/third_party/googleapis/grafeas ./grpc
|
||||||
rm -rf ./grpc/third_party
|
rm -rf ./grpc/third_party
|
||||||
|
|
||||||
gen-proto-geolocation:
|
gen-proto:
|
||||||
mkdir -p ./grpc
|
mkdir -p ./grpc
|
||||||
|
|
||||||
@protoc -I ./grpc \
|
@protoc -I ./grpc \
|
||||||
@@ -14,3 +14,34 @@ gen-proto-geolocation:
|
|||||||
--go-grpc_out=grpc --go-grpc_opt paths=source_relative \
|
--go-grpc_out=grpc --go-grpc_opt paths=source_relative \
|
||||||
--grpc-gateway_out=grpc --grpc-gateway_opt paths=source_relative \
|
--grpc-gateway_out=grpc --grpc-gateway_opt paths=source_relative \
|
||||||
./grpc/snapshot.proto
|
./grpc/snapshot.proto
|
||||||
|
|
||||||
|
# Run all tests
|
||||||
|
test:
|
||||||
|
go test -v ./...
|
||||||
|
|
||||||
|
# Run unit tests
|
||||||
|
test-unit:
|
||||||
|
go test -v ./store/... ./hash/... ./archive/...
|
||||||
|
|
||||||
|
# Run integration tests
|
||||||
|
test-integration:
|
||||||
|
go test -v -tags=integration ./...
|
||||||
|
|
||||||
|
# Run functional tests
|
||||||
|
test-functional:
|
||||||
|
go test -v -run TestFull ./...
|
||||||
|
|
||||||
|
# Run performance tests
|
||||||
|
test-performance:
|
||||||
|
go test -v -run TestPerformanceMetrics ./...
|
||||||
|
go test -v -bench=. ./...
|
||||||
|
|
||||||
|
# Run tests with code coverage
|
||||||
|
test-coverage:
|
||||||
|
go test -v -coverprofile=coverage.out ./...
|
||||||
|
go tool cover -html=coverage.out -o coverage.html
|
||||||
|
|
||||||
|
# Run all checks (tests + linter)
|
||||||
|
check: test lint
|
||||||
|
|
||||||
|
.PHONY: download-third-party gen-proto test test-unit test-integration test-functional test-performance test-coverage lint check
|
||||||
|
|||||||
343
README.md
Normal file
343
README.md
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
# Agate
|
||||||
|
|
||||||
|
Agate is a Go library for creating, managing, and sharing snapshots of directories. It provides functionality for creating incremental snapshots, storing them efficiently, and sharing them over a network using gRPC.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get gitea.unprism.ru/KRBL/Agate
|
||||||
|
```
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Create snapshots of directories
|
||||||
|
- Incremental snapshots (only store changes)
|
||||||
|
- Restore snapshots
|
||||||
|
- List and manage snapshots
|
||||||
|
- Share snapshots over a network using gRPC
|
||||||
|
- Connect to remote snapshot repositories
|
||||||
|
|
||||||
|
## Basic Usage
|
||||||
|
|
||||||
|
### Creating a Snapshot Repository
|
||||||
|
|
||||||
|
To create a snapshot repository, you need to initialize the Agate library with the appropriate options:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Create directories for your repository
|
||||||
|
workDir := "/path/to/your/repository"
|
||||||
|
if err := os.MkdirAll(workDir, 0755); err != nil {
|
||||||
|
log.Fatalf("Failed to create work directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the default stores
|
||||||
|
metadataStore, blobStore, err := stores.InitDefaultStores(workDir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to initialize stores: %v", err)
|
||||||
|
}
|
||||||
|
defer metadataStore.Close()
|
||||||
|
|
||||||
|
// Initialize Agate
|
||||||
|
agateOptions := agate.AgateOptions{
|
||||||
|
WorkDir: workDir,
|
||||||
|
MetadataStore: metadataStore,
|
||||||
|
BlobStore: blobStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
ag, err := agate.New(agateOptions)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to initialize Agate: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshotID, err := ag.SaveSnapshot(ctx, "My First Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Created snapshot with ID: %s\n", snapshotID)
|
||||||
|
|
||||||
|
// List snapshots
|
||||||
|
snapshots, err := ag.ListSnapshots(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Found %d snapshots:\n", len(snapshots))
|
||||||
|
for _, s := range snapshots {
|
||||||
|
fmt.Printf(" - %s: %s (created at %s)\n", s.ID, s.Name, s.CreationTime.Format("2006-01-02 15:04:05"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Hosting a Snapshot Repository
|
||||||
|
|
||||||
|
To host a snapshot repository and make it available over the network, you can use the `StartServer` method:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Create directories for your repository
|
||||||
|
workDir := "/path/to/your/repository"
|
||||||
|
if err := os.MkdirAll(workDir, 0755); err != nil {
|
||||||
|
log.Fatalf("Failed to create work directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the default stores
|
||||||
|
metadataStore, blobStore, err := stores.InitDefaultStores(workDir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to initialize stores: %v", err)
|
||||||
|
}
|
||||||
|
defer metadataStore.Close()
|
||||||
|
|
||||||
|
// Initialize Agate
|
||||||
|
agateOptions := agate.AgateOptions{
|
||||||
|
WorkDir: workDir,
|
||||||
|
MetadataStore: metadataStore,
|
||||||
|
BlobStore: blobStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
ag, err := agate.New(agateOptions)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to initialize Agate: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Start the gRPC server
|
||||||
|
ctx := context.Background()
|
||||||
|
address := "0.0.0.0:50051" // Listen on all interfaces, port 50051
|
||||||
|
if err := ag.StartServer(ctx, address); err != nil {
|
||||||
|
log.Fatalf("Failed to start server: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Server started on %s", address)
|
||||||
|
|
||||||
|
// Wait for termination signal
|
||||||
|
sigCh := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
<-sigCh
|
||||||
|
|
||||||
|
log.Println("Shutting down...")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Connecting to a Hosted Snapshot Repository
|
||||||
|
|
||||||
|
To connect to a hosted snapshot repository and retrieve snapshots:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Create directories for your local repository
|
||||||
|
workDir := "/path/to/your/local/repository"
|
||||||
|
if err := os.MkdirAll(workDir, 0755); err != nil {
|
||||||
|
log.Fatalf("Failed to create work directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the default stores
|
||||||
|
metadataStore, blobStore, err := stores.InitDefaultStores(workDir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to initialize stores: %v", err)
|
||||||
|
}
|
||||||
|
defer metadataStore.Close()
|
||||||
|
|
||||||
|
// Initialize Agate
|
||||||
|
agateOptions := agate.AgateOptions{
|
||||||
|
WorkDir: workDir,
|
||||||
|
MetadataStore: metadataStore,
|
||||||
|
BlobStore: blobStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
ag, err := agate.New(agateOptions)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to initialize Agate: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Connect to a remote server
|
||||||
|
ctx := context.Background()
|
||||||
|
remoteAddress := "remote-server:50051"
|
||||||
|
|
||||||
|
// List snapshots from the remote server
|
||||||
|
snapshots, err := ag.GetRemoteSnapshotList(ctx, remoteAddress)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to list remote snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d remote snapshots:\n", len(snapshots))
|
||||||
|
for _, s := range snapshots {
|
||||||
|
fmt.Printf(" - %s: %s (created at %s)\n", s.ID, s.Name, s.CreationTime.Format("2006-01-02 15:04:05"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download a specific snapshot
|
||||||
|
if len(snapshots) > 0 {
|
||||||
|
snapshotID := snapshots[0].ID
|
||||||
|
fmt.Printf("Downloading snapshot %s...\n", snapshotID)
|
||||||
|
|
||||||
|
// Download the snapshot (pass empty string as localParentID if this is the first download)
|
||||||
|
if err := ag.GetRemoteSnapshot(ctx, remoteAddress, snapshotID, ""); err != nil {
|
||||||
|
log.Fatalf("Failed to download snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Successfully downloaded snapshot %s\n", snapshotID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Advanced Usage
|
||||||
|
|
||||||
|
### Registering a Local Snapshot
|
||||||
|
|
||||||
|
You can register a local snapshot from an existing archive file with a specified UUID:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Register a local snapshot from an archive file
|
||||||
|
archivePath := "/path/to/your/archive.zip"
|
||||||
|
snapshotID := "custom-uuid-for-snapshot"
|
||||||
|
snapshotName := "My Local Snapshot"
|
||||||
|
|
||||||
|
if err := ag.RegisterLocalSnapshot(ctx, archivePath, snapshotID, snapshotName); err != nil {
|
||||||
|
log.Fatalf("Failed to register local snapshot: %v", err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Downloading Only Snapshot Metadata
|
||||||
|
|
||||||
|
You can download only the metadata of a snapshot from a remote server without downloading the actual files:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Download only the metadata of a snapshot from a remote server
|
||||||
|
remoteAddress := "remote-server:50051"
|
||||||
|
snapshotID := "snapshot-id-to-download"
|
||||||
|
|
||||||
|
if err := ag.GetRemoteSnapshotMetadata(ctx, remoteAddress, snapshotID); err != nil {
|
||||||
|
log.Fatalf("Failed to download snapshot metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If you have a local blob but missing metadata, you can restore the metadata
|
||||||
|
// by passing an empty address
|
||||||
|
if err := ag.GetRemoteSnapshotMetadata(ctx, "", snapshotID); err != nil {
|
||||||
|
log.Fatalf("Failed to restore snapshot metadata: %v", err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Creating Incremental Snapshots
|
||||||
|
|
||||||
|
You can create incremental snapshots by specifying a parent snapshot ID:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Create a first snapshot
|
||||||
|
snapshotID1, err := ag.SaveSnapshot(ctx, "First Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to create first snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make some changes to your files...
|
||||||
|
|
||||||
|
// Create a second snapshot with the first one as parent
|
||||||
|
snapshotID2, err := ag.SaveSnapshot(ctx, "Second Snapshot", snapshotID1)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to create second snapshot: %v", err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Restoring a Snapshot
|
||||||
|
|
||||||
|
To restore a snapshot:
|
||||||
|
|
||||||
|
```go
|
||||||
|
if err := ag.RestoreSnapshot(ctx, snapshotID); err != nil {
|
||||||
|
log.Fatalf("Failed to restore snapshot: %v", err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Getting Snapshot Details
|
||||||
|
|
||||||
|
To get detailed information about a snapshot:
|
||||||
|
|
||||||
|
```go
|
||||||
|
snapshot, err := ag.GetSnapshotDetails(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to get snapshot details: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Snapshot: %s\n", snapshot.Name)
|
||||||
|
fmt.Printf("Created: %s\n", snapshot.CreationTime.Format("2006-01-02 15:04:05"))
|
||||||
|
fmt.Printf("Files: %d\n", len(snapshot.Files))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deleting a Snapshot
|
||||||
|
|
||||||
|
To delete a snapshot:
|
||||||
|
|
||||||
|
```go
|
||||||
|
if err := ag.DeleteSnapshot(ctx, snapshotID); err != nil {
|
||||||
|
log.Fatalf("Failed to delete snapshot: %v", err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## API Reference
|
||||||
|
|
||||||
|
### Agate
|
||||||
|
|
||||||
|
The main entry point for the library.
|
||||||
|
|
||||||
|
- `New(options AgateOptions) (*Agate, error)` - Create a new Agate instance
|
||||||
|
- `SaveSnapshot(ctx context.Context, name string, parentID string) (string, error)` - Create a new snapshot
|
||||||
|
- `RestoreSnapshot(ctx context.Context, snapshotID string) error` - Restore a snapshot
|
||||||
|
- `ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error)` - List all snapshots
|
||||||
|
- `GetSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error)` - Get details of a snapshot
|
||||||
|
- `DeleteSnapshot(ctx context.Context, snapshotID string) error` - Delete a snapshot
|
||||||
|
- `StartServer(ctx context.Context, address string) error` - Start a gRPC server to share snapshots
|
||||||
|
- `ConnectRemote(address string) (*grpc.SnapshotClient, error)` - Connect to a remote server
|
||||||
|
- `GetRemoteSnapshotList(ctx context.Context, address string) ([]store.SnapshotInfo, error)` - List snapshots from a remote server
|
||||||
|
- `GetRemoteSnapshot(ctx context.Context, address string, snapshotID string, localParentID string) error` - Download a snapshot from a remote server
|
||||||
|
- `RegisterLocalSnapshot(ctx context.Context, archivePath string, snapshotID string, name string) error` - Register a local snapshot from an archive path with a specified UUID
|
||||||
|
- `GetRemoteSnapshotMetadata(ctx context.Context, address string, snapshotID string) error` - Download only the metadata of a snapshot from a remote server
|
||||||
|
|
||||||
|
### AgateOptions
|
||||||
|
|
||||||
|
Configuration options for the Agate library.
|
||||||
|
|
||||||
|
- `WorkDir string` - Directory where snapshots will be stored and managed
|
||||||
|
- `OpenFunc func(dir string) error` - Called after a snapshot is restored
|
||||||
|
- `CloseFunc func() error` - Called before a snapshot is created or restored
|
||||||
|
- `MetadataStore store.MetadataStore` - Implementation of the metadata store
|
||||||
|
- `BlobStore store.BlobStore` - Implementation of the blob store
|
||||||
488
api.go
488
api.go
@@ -4,10 +4,20 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"unprism.ru/KRBL/agate/store"
|
"gitea.unprism.ru/KRBL/Agate/archive"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/interfaces"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/models"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/remote"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AgateOptions defines configuration options for the Agate library.
|
// AgateOptions defines configuration options for the Agate library.
|
||||||
@@ -23,22 +33,33 @@ type AgateOptions struct {
|
|||||||
CloseFunc func() error
|
CloseFunc func() error
|
||||||
|
|
||||||
// MetadataStore is the implementation of the metadata store to use.
|
// MetadataStore is the implementation of the metadata store to use.
|
||||||
// Use the stores package to initialize the default implementation:
|
// If nil, a default SQLite-based metadata store will be created automatically.
|
||||||
|
// Use the stores package to initialize a custom implementation:
|
||||||
// metadataStore, err := stores.NewDefaultMetadataStore(metadataDir)
|
// metadataStore, err := stores.NewDefaultMetadataStore(metadataDir)
|
||||||
MetadataStore store.MetadataStore
|
MetadataStore store.MetadataStore
|
||||||
|
|
||||||
// BlobStore is the implementation of the blob store to use.
|
// BlobStore is the implementation of the blob store to use.
|
||||||
// Use the stores package to initialize the default implementation:
|
// If nil, a default filesystem-based blob store will be created automatically.
|
||||||
|
// Use the stores package to initialize a custom implementation:
|
||||||
// blobStore, err := stores.NewDefaultBlobStore(blobsDir)
|
// blobStore, err := stores.NewDefaultBlobStore(blobsDir)
|
||||||
BlobStore store.BlobStore
|
BlobStore store.BlobStore
|
||||||
|
|
||||||
|
// CleanOnRestore specifies whether the target directory should be cleaned before restoring a snapshot.
|
||||||
|
CleanOnRestore bool
|
||||||
|
|
||||||
|
// Logger is the logger to use for output. If nil, logging is disabled.
|
||||||
|
Logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// Agate is the main entry point for the snapshot library.
|
// Agate is the main entry point for the snapshot library.
|
||||||
type Agate struct {
|
type Agate struct {
|
||||||
manager SnapshotManager
|
mutex sync.Mutex
|
||||||
|
manager interfaces.SnapshotManager
|
||||||
options AgateOptions
|
options AgateOptions
|
||||||
metadataDir string
|
metadataDir string
|
||||||
blobsDir string
|
blobsDir string
|
||||||
|
currentSnapshotID string
|
||||||
|
currentIDFile string
|
||||||
}
|
}
|
||||||
|
|
||||||
// New initializes a new instance of the Agate library with the given options.
|
// New initializes a new instance of the Agate library with the given options.
|
||||||
@@ -47,6 +68,11 @@ func New(options AgateOptions) (*Agate, error) {
|
|||||||
return nil, errors.New("work directory cannot be empty")
|
return nil, errors.New("work directory cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize logger if not provided
|
||||||
|
if options.Logger == nil {
|
||||||
|
options.Logger = log.New(io.Discard, "", 0)
|
||||||
|
}
|
||||||
|
|
||||||
// Create the work directory if it doesn't exist
|
// Create the work directory if it doesn't exist
|
||||||
if err := os.MkdirAll(options.WorkDir, 0755); err != nil {
|
if err := os.MkdirAll(options.WorkDir, 0755); err != nil {
|
||||||
return nil, fmt.Errorf("failed to create work directory: %w", err)
|
return nil, fmt.Errorf("failed to create work directory: %w", err)
|
||||||
@@ -69,38 +95,96 @@ func New(options AgateOptions) (*Agate, error) {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
// Use provided stores or initialize default ones
|
// Use provided stores or initialize default ones
|
||||||
if options.MetadataStore != nil {
|
if options.MetadataStore != nil && options.BlobStore != nil {
|
||||||
|
// Use the provided stores
|
||||||
metadataStore = options.MetadataStore
|
metadataStore = options.MetadataStore
|
||||||
} else {
|
|
||||||
// For default implementation, the user needs to initialize and provide the stores
|
|
||||||
return nil, errors.New("metadata store must be provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.BlobStore != nil {
|
|
||||||
blobStore = options.BlobStore
|
blobStore = options.BlobStore
|
||||||
|
} else if options.MetadataStore == nil && options.BlobStore == nil {
|
||||||
|
// Initialize both stores with default implementations
|
||||||
|
metadataStore, blobStore, err = stores.InitDefaultStores(options.WorkDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to initialize default stores: %w", err)
|
||||||
|
}
|
||||||
|
// Update options with the created stores
|
||||||
|
options.MetadataStore = metadataStore
|
||||||
|
options.BlobStore = blobStore
|
||||||
|
} else if options.MetadataStore == nil {
|
||||||
|
// Initialize only the metadata store
|
||||||
|
metadataStore, err = stores.NewDefaultMetadataStore(metadataDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to initialize default metadata store: %w", err)
|
||||||
|
}
|
||||||
|
blobStore = options.BlobStore
|
||||||
|
// Update options with the created metadata store
|
||||||
|
options.MetadataStore = metadataStore
|
||||||
} else {
|
} else {
|
||||||
// For default implementation, the user needs to initialize and provide the stores
|
// Initialize only the blob store
|
||||||
return nil, errors.New("blob store must be provided")
|
blobStore, err = stores.NewDefaultBlobStore(blobsDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to initialize default blob store: %w", err)
|
||||||
|
}
|
||||||
|
metadataStore = options.MetadataStore
|
||||||
|
// Update options with the created blob store
|
||||||
|
options.BlobStore = blobStore
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the snapshot manager
|
// Create the snapshot manager
|
||||||
manager, err := CreateSnapshotManager(metadataStore, blobStore)
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, options.Logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create snapshot manager: %w", err)
|
return nil, fmt.Errorf("failed to create snapshot manager: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Agate{
|
// Create a file path for storing the current snapshot ID
|
||||||
|
currentIDFile := filepath.Join(options.WorkDir, "current_snapshot_id")
|
||||||
|
|
||||||
|
agate := &Agate{
|
||||||
manager: manager,
|
manager: manager,
|
||||||
options: options,
|
options: options,
|
||||||
metadataDir: metadataDir,
|
metadataDir: metadataDir,
|
||||||
blobsDir: blobsDir,
|
blobsDir: blobsDir,
|
||||||
}, nil
|
currentIDFile: currentIDFile,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the current snapshot ID if it exists
|
||||||
|
if _, err := os.Stat(currentIDFile); err == nil {
|
||||||
|
data, err := os.ReadFile(currentIDFile)
|
||||||
|
if err == nil && len(data) > 0 {
|
||||||
|
agate.currentSnapshotID = string(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call OpenFunc if provided to initialize resources in the active directory
|
||||||
|
if options.OpenFunc != nil {
|
||||||
|
if err := options.OpenFunc(blobStore.GetActiveDir()); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open resources during initialization: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return agate, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveSnapshot creates a new snapshot from the current state of the work directory.
|
func (a *Agate) GetActiveDir() string {
|
||||||
|
return a.options.BlobStore.GetActiveDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Agate) GetMetadataDir() string {
|
||||||
|
return a.metadataDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Agate) GetBlobsDir() string {
|
||||||
|
return a.blobsDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveSnapshot creates a new snapshot from the current state of the active directory.
|
||||||
// If parentID is provided, it will be set as the parent of the new snapshot.
|
// If parentID is provided, it will be set as the parent of the new snapshot.
|
||||||
|
// If parentID is empty, it will use the ID of the snapshot currently loaded in the active directory.
|
||||||
// Returns the ID of the created snapshot.
|
// Returns the ID of the created snapshot.
|
||||||
func (a *Agate) SaveSnapshot(ctx context.Context, name string, parentID string) (string, error) {
|
func (a *Agate) SaveSnapshot(ctx context.Context, name string, parentID string) (string, error) {
|
||||||
|
a.mutex.Lock()
|
||||||
|
defer a.mutex.Unlock()
|
||||||
|
|
||||||
|
a.options.Logger.Printf("Creating new snapshot with name: %s", name)
|
||||||
|
|
||||||
// Call CloseFunc if provided
|
// Call CloseFunc if provided
|
||||||
if a.options.CloseFunc != nil {
|
if a.options.CloseFunc != nil {
|
||||||
if err := a.options.CloseFunc(); err != nil {
|
if err := a.options.CloseFunc(); err != nil {
|
||||||
@@ -108,24 +192,98 @@ func (a *Agate) SaveSnapshot(ctx context.Context, name string, parentID string)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if a.options.OpenFunc != nil {
|
||||||
|
if err := a.options.OpenFunc(a.options.BlobStore.GetActiveDir()); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to open resources after snapshot creation: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// If parentID is not provided, use the current snapshot ID
|
||||||
|
if parentID == "" {
|
||||||
|
parentID = a.currentSnapshotID
|
||||||
|
}
|
||||||
|
|
||||||
|
effectiveParentID := parentID
|
||||||
|
|
||||||
// Create the snapshot
|
// Create the snapshot
|
||||||
snapshot, err := a.manager.CreateSnapshot(ctx, a.options.WorkDir, name, parentID)
|
snapshot, err := a.manager.CreateSnapshot(ctx, a.options.BlobStore.GetActiveDir(), name, effectiveParentID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to create snapshot: %v", err)
|
||||||
return "", fmt.Errorf("failed to create snapshot: %w", err)
|
return "", fmt.Errorf("failed to create snapshot: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call OpenFunc if provided
|
a.options.Logger.Printf("Successfully created snapshot with ID: %s", snapshot.ID)
|
||||||
if a.options.OpenFunc != nil {
|
|
||||||
if err := a.options.OpenFunc(a.options.WorkDir); err != nil {
|
// Update the current snapshot ID to the newly created snapshot
|
||||||
return "", fmt.Errorf("failed to open resources after snapshot: %w", err)
|
a.currentSnapshotID = snapshot.ID
|
||||||
}
|
|
||||||
|
// Save the current snapshot ID to a file
|
||||||
|
if err := a.saveCurrentSnapshotID(); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to save current snapshot ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return snapshot.ID, nil
|
return snapshot.ID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestoreSnapshot extracts a snapshot to the work directory.
|
// SnapshotAsync creates a new snapshot asynchronously.
|
||||||
|
// It returns the job ID (which is also the snapshot ID) immediately.
|
||||||
|
// The actual snapshot creation happens in a background goroutine.
|
||||||
|
// Use GetSnapshotStatus to check the progress.
|
||||||
|
func (a *Agate) SnapshotAsync(ctx context.Context, name string, parentID string) (string, error) {
|
||||||
|
a.options.Logger.Printf("Starting async snapshot creation with name: %s", name)
|
||||||
|
|
||||||
|
// If parentID is not provided, use the current snapshot ID
|
||||||
|
if parentID == "" {
|
||||||
|
parentID = a.currentSnapshotID
|
||||||
|
}
|
||||||
|
|
||||||
|
return a.manager.CreateSnapshotAsync(ctx, a.options.BlobStore.GetActiveDir(), name, parentID,
|
||||||
|
func() {
|
||||||
|
// onStart: Lock mutex and close resources
|
||||||
|
a.mutex.Lock()
|
||||||
|
if a.options.CloseFunc != nil {
|
||||||
|
if err := a.options.CloseFunc(); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to close resources before async snapshot: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(id string, err error) {
|
||||||
|
// onFinish: Open resources, update state, and unlock mutex
|
||||||
|
defer a.mutex.Unlock()
|
||||||
|
|
||||||
|
if a.options.OpenFunc != nil {
|
||||||
|
if err := a.options.OpenFunc(a.options.BlobStore.GetActiveDir()); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to open resources after async snapshot: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
a.currentSnapshotID = id
|
||||||
|
if err := a.saveCurrentSnapshotID(); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to save current snapshot ID: %v", err)
|
||||||
|
}
|
||||||
|
a.options.Logger.Printf("Async snapshot %s created successfully", id)
|
||||||
|
} else {
|
||||||
|
a.options.Logger.Printf("Async snapshot creation failed: %v", err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSnapshotStatus returns the status of an asynchronous snapshot creation job.
|
||||||
|
func (a *Agate) GetSnapshotStatus(ctx context.Context, jobID string) (*store.SnapshotStatus, error) {
|
||||||
|
return a.manager.GetSnapshotStatus(ctx, jobID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestoreSnapshot extracts a snapshot to the active directory.
|
||||||
func (a *Agate) RestoreSnapshot(ctx context.Context, snapshotID string) error {
|
func (a *Agate) RestoreSnapshot(ctx context.Context, snapshotID string) error {
|
||||||
|
a.mutex.Lock()
|
||||||
|
defer a.mutex.Unlock()
|
||||||
|
|
||||||
|
a.options.Logger.Printf("Restoring snapshot with ID: %s", snapshotID)
|
||||||
|
|
||||||
// Call CloseFunc if provided
|
// Call CloseFunc if provided
|
||||||
if a.options.CloseFunc != nil {
|
if a.options.CloseFunc != nil {
|
||||||
if err := a.options.CloseFunc(); err != nil {
|
if err := a.options.CloseFunc(); err != nil {
|
||||||
@@ -134,13 +292,25 @@ func (a *Agate) RestoreSnapshot(ctx context.Context, snapshotID string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Extract the snapshot
|
// Extract the snapshot
|
||||||
if err := a.manager.ExtractSnapshot(ctx, snapshotID, a.options.WorkDir); err != nil {
|
if err := a.manager.ExtractSnapshot(ctx, snapshotID, a.options.BlobStore.GetActiveDir(), a.options.CleanOnRestore); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to extract snapshot: %v", err)
|
||||||
return fmt.Errorf("failed to extract snapshot: %w", err)
|
return fmt.Errorf("failed to extract snapshot: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
a.options.Logger.Printf("Successfully restored snapshot with ID: %s", snapshotID)
|
||||||
|
|
||||||
|
// Save the ID of the snapshot that was restored
|
||||||
|
a.currentSnapshotID = snapshotID
|
||||||
|
|
||||||
|
// Save the current snapshot ID to a file
|
||||||
|
if err := a.saveCurrentSnapshotID(); err != nil {
|
||||||
|
return fmt.Errorf("failed to save current snapshot ID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Call OpenFunc if provided
|
// Call OpenFunc if provided
|
||||||
if a.options.OpenFunc != nil {
|
if a.options.OpenFunc != nil {
|
||||||
if err := a.options.OpenFunc(a.options.WorkDir); err != nil {
|
if err := a.options.OpenFunc(a.options.BlobStore.GetActiveDir()); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to open resources after restore: %v", err)
|
||||||
return fmt.Errorf("failed to open resources after restore: %w", err)
|
return fmt.Errorf("failed to open resources after restore: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -148,9 +318,49 @@ func (a *Agate) RestoreSnapshot(ctx context.Context, snapshotID string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestoreSnapshot extracts a snapshot to the directory.
|
||||||
|
func (a *Agate) RestoreSnapshotToDir(ctx context.Context, snapshotID string, dir string) error {
|
||||||
|
a.mutex.Lock()
|
||||||
|
defer a.mutex.Unlock()
|
||||||
|
|
||||||
|
// Call CloseFunc if provided
|
||||||
|
if a.options.CloseFunc != nil {
|
||||||
|
if err := a.options.CloseFunc(); err != nil {
|
||||||
|
return fmt.Errorf("failed to close resources before restore: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if a.options.OpenFunc != nil {
|
||||||
|
if err := a.options.OpenFunc(dir); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to open resources after snapshot restore: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Extract the snapshot
|
||||||
|
if err := a.manager.ExtractSnapshot(ctx, snapshotID, dir, a.options.CleanOnRestore); err != nil {
|
||||||
|
return fmt.Errorf("failed to extract snapshot: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If restoring to the active directory, save the snapshot ID
|
||||||
|
if dir == a.options.BlobStore.GetActiveDir() {
|
||||||
|
a.currentSnapshotID = snapshotID
|
||||||
|
|
||||||
|
// Save the current snapshot ID to a file
|
||||||
|
if err := a.saveCurrentSnapshotID(); err != nil {
|
||||||
|
return fmt.Errorf("failed to save current snapshot ID: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ListSnapshots returns a list of all available snapshots.
|
// ListSnapshots returns a list of all available snapshots.
|
||||||
func (a *Agate) ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error) {
|
func (a *Agate) ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error) {
|
||||||
return a.manager.ListSnapshots(ctx)
|
// Create empty ListOptions since we don't have filtering/pagination in this API yet
|
||||||
|
opts := store.ListOptions{}
|
||||||
|
return a.manager.ListSnapshots(ctx, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSnapshotDetails returns detailed information about a specific snapshot.
|
// GetSnapshotDetails returns detailed information about a specific snapshot.
|
||||||
@@ -163,15 +373,231 @@ func (a *Agate) DeleteSnapshot(ctx context.Context, snapshotID string) error {
|
|||||||
return a.manager.DeleteSnapshot(ctx, snapshotID)
|
return a.manager.DeleteSnapshot(ctx, snapshotID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// saveCurrentSnapshotID saves the current snapshot ID to a file in the WorkDir
|
||||||
|
func (a *Agate) saveCurrentSnapshotID() error {
|
||||||
|
if a.currentSnapshotID == "" {
|
||||||
|
// If there's no current snapshot ID, remove the file if it exists
|
||||||
|
if _, err := os.Stat(a.currentIDFile); err == nil {
|
||||||
|
if err := os.Remove(a.currentIDFile); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the current snapshot ID to the file
|
||||||
|
return os.WriteFile(a.currentIDFile, []byte(a.currentSnapshotID), 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Agate) Open() error {
|
||||||
|
return a.options.OpenFunc(a.GetActiveDir())
|
||||||
|
}
|
||||||
|
|
||||||
// Close releases all resources used by the Agate instance.
|
// Close releases all resources used by the Agate instance.
|
||||||
func (a *Agate) Close() error {
|
func (a *Agate) Close() error {
|
||||||
// Currently, we don't have a way to close the manager directly
|
if a.options.CloseFunc != nil {
|
||||||
// This would be a good addition in the future
|
return a.options.CloseFunc()
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartServer starts a gRPC server to share snapshots.
|
// StartServer starts a gRPC server to share snapshots.
|
||||||
// This is a placeholder for future implementation.
|
|
||||||
func (a *Agate) StartServer(ctx context.Context, address string) error {
|
func (a *Agate) StartServer(ctx context.Context, address string) error {
|
||||||
return errors.New("server functionality not implemented yet")
|
server := remote.NewServer(a.manager)
|
||||||
|
return server.Start(ctx, address)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRemoteSnapshot downloads a snapshot from a remote server, using an efficient differential update.
|
||||||
|
func (a *Agate) GetRemoteSnapshot(ctx context.Context, address string, snapshotID string, localParentID string) error {
|
||||||
|
client, err := remote.NewClient(address)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := client.Close(); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to close client: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
remoteSnapshot, err := client.FetchSnapshotDetails(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get remote snapshot details: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tempDownloadDir := filepath.Join(a.options.WorkDir, "temp_download")
|
||||||
|
if err := os.MkdirAll(tempDownloadDir, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create temp download dir: %w", err)
|
||||||
|
}
|
||||||
|
newSnapshotDir := filepath.Join(tempDownloadDir, "new_content_"+snapshotID)
|
||||||
|
if err := os.MkdirAll(newSnapshotDir, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create new snapshot directory: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := os.RemoveAll(newSnapshotDir); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to remove temp dir: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if localParentID != "" {
|
||||||
|
if err := a.manager.ExtractSnapshot(ctx, localParentID, newSnapshotDir, false); err != nil {
|
||||||
|
a.options.Logger.Printf("Warning: failed to extract local parent snapshot %s: %v", localParentID, err)
|
||||||
|
} else {
|
||||||
|
localParentSnap, err := a.GetSnapshotDetails(ctx, localParentID)
|
||||||
|
if err != nil {
|
||||||
|
a.options.Logger.Printf("Warning: failed to get local parent details %s: %v", localParentID, err)
|
||||||
|
} else {
|
||||||
|
remoteFilesMap := make(map[string]struct{})
|
||||||
|
for _, f := range remoteSnapshot.Files {
|
||||||
|
remoteFilesMap[f.Path] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, localFile := range localParentSnap.Files {
|
||||||
|
if _, exists := remoteFilesMap[localFile.Path]; !exists {
|
||||||
|
pathToDelete := filepath.Join(newSnapshotDir, localFile.Path)
|
||||||
|
if err := os.RemoveAll(pathToDelete); err != nil {
|
||||||
|
a.options.Logger.Printf("Warning: failed to delete file %s during diff apply: %v", pathToDelete, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diffArchivePath := filepath.Join(tempDownloadDir, snapshotID+"_diff.zip")
|
||||||
|
diffPartPath := diffArchivePath + ".part"
|
||||||
|
a.options.Logger.Printf("Downloading diff for snapshot %s from parent %s", snapshotID, localParentID)
|
||||||
|
if err := client.DownloadSnapshotDiff(ctx, snapshotID, localParentID, diffPartPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to download snapshot diff: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.Rename(diffPartPath, diffArchivePath); err != nil {
|
||||||
|
return fmt.Errorf("failed to finalize downloaded diff: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := os.Remove(diffArchivePath); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to remove temp file: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := extractArchive(diffArchivePath, newSnapshotDir); err != nil {
|
||||||
|
return fmt.Errorf("failed to extract diff archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
finalArchivePath := filepath.Join(tempDownloadDir, snapshotID+".zip")
|
||||||
|
if err := archive.CreateArchive(newSnapshotDir, finalArchivePath); err != nil {
|
||||||
|
return fmt.Errorf("failed to create final snapshot archive: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := os.Remove(finalArchivePath); err != nil {
|
||||||
|
a.options.Logger.Printf("ERROR: failed to remove temp file: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
finalArchiveFile, err := os.Open(finalArchivePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open final archive: %w", err)
|
||||||
|
}
|
||||||
|
defer finalArchiveFile.Close()
|
||||||
|
|
||||||
|
if _, err := a.options.BlobStore.StoreBlob(ctx, snapshotID, finalArchiveFile); err != nil {
|
||||||
|
return fmt.Errorf("failed to store final blob: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := a.options.MetadataStore.SaveSnapshotMetadata(ctx, *remoteSnapshot); err != nil {
|
||||||
|
a.options.BlobStore.DeleteBlob(ctx, snapshotID)
|
||||||
|
return fmt.Errorf("failed to save snapshot metadata: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.options.Logger.Printf("Successfully imported remote snapshot %s", snapshotID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Agate) GetCurrentSnapshotID() string {
|
||||||
|
return a.currentSnapshotID
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterLocalSnapshot регистрирует локальный файл как блоб снимка и создает
|
||||||
|
// соответствующую запись в метаданных. Если снимок с таким ID уже существует,
|
||||||
|
// метод ничего не делает и возвращает nil.
|
||||||
|
//
|
||||||
|
// - ctx: Контекст для выполнения операции.
|
||||||
|
// - snapshotID: ID регистрируемого снимка.
|
||||||
|
// - parentID: ID родительского снимка. Может быть пустым для полных снимков.
|
||||||
|
// - name: Описательное имя для снимка.
|
||||||
|
// - localPath: Абсолютный путь к локальному файлу снимка (полному или дифф-архиву).
|
||||||
|
func (ag *Agate) RegisterLocalSnapshot(ctx context.Context, snapshotID, parentID, name, localPath string) error {
|
||||||
|
// 1. Check if snapshot already exists
|
||||||
|
_, err := ag.manager.GetSnapshotDetails(ctx, snapshotID)
|
||||||
|
if err == nil {
|
||||||
|
ag.options.Logger.Printf("snapshot %s already exists, skipping registration", snapshotID)
|
||||||
|
return nil // Snapshot already exists
|
||||||
|
}
|
||||||
|
// We expect ErrNotFound, anything else is a real error.
|
||||||
|
if !errors.Is(err, models.ErrNotFound) {
|
||||||
|
return fmt.Errorf("failed to check for existing snapshot: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Add the file to the blob store
|
||||||
|
// Check if blob already exists. If so, we assume it's the correct one and skip overwriting.
|
||||||
|
// This is to prevent issues when registering a file that is already in the blob store.
|
||||||
|
_, err = ag.options.BlobStore.GetBlobPath(ctx, snapshotID)
|
||||||
|
if err == nil {
|
||||||
|
ag.options.Logger.Printf("blob for snapshot %s already exists, skipping storing it", snapshotID)
|
||||||
|
} else if errors.Is(err, models.ErrNotFound) {
|
||||||
|
// Blob does not exist, so we store it.
|
||||||
|
localFile, err := os.Open(localPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open local snapshot file: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := localFile.Close(); err != nil {
|
||||||
|
ag.options.Logger.Printf("ERROR: failed to close local file: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err = ag.options.BlobStore.StoreBlob(ctx, snapshotID, localFile); err != nil {
|
||||||
|
return fmt.Errorf("failed to store blob from local file: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Another error occurred when checking for the blob.
|
||||||
|
return fmt.Errorf("failed to check for existing blob: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Create and save snapshot metadata
|
||||||
|
// We get the file list from the archive to create the metadata.
|
||||||
|
// Note: This method does not calculate file hashes, so the metadata will be incomplete.
|
||||||
|
// This is a limitation of the current implementation.
|
||||||
|
var files []store.FileInfo
|
||||||
|
archiveFiles, err := archive.ListArchiveContents(localPath)
|
||||||
|
if err != nil {
|
||||||
|
// If we can't list the contents, we can't create the metadata.
|
||||||
|
// We should clean up the blob we just stored.
|
||||||
|
_ = ag.options.BlobStore.DeleteBlob(ctx, snapshotID)
|
||||||
|
return fmt.Errorf("failed to list archive contents for metadata creation: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range archiveFiles {
|
||||||
|
files = append(files, store.FileInfo{
|
||||||
|
Path: f.Path,
|
||||||
|
Size: int64(f.Size),
|
||||||
|
IsDir: f.IsDir,
|
||||||
|
// SHA256 is intentionally left empty as we don't have it.
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot := store.Snapshot{
|
||||||
|
ID: snapshotID,
|
||||||
|
Name: name,
|
||||||
|
ParentID: parentID,
|
||||||
|
CreationTime: time.Now(),
|
||||||
|
Files: files,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ag.options.MetadataStore.SaveSnapshotMetadata(ctx, snapshot); err != nil {
|
||||||
|
// Clean up the blob
|
||||||
|
_ = ag.options.BlobStore.DeleteBlob(ctx, snapshotID)
|
||||||
|
return fmt.Errorf("failed to save snapshot metadata: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ag.options.Logger.Printf("Successfully registered local snapshot %s", snapshotID)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
374
api_test.go
Normal file
374
api_test.go
Normal file
@@ -0,0 +1,374 @@
|
|||||||
|
package agate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// setupTestAPI creates a temporary directory and initializes an Agate instance
|
||||||
|
func setupTestAPI(t *testing.T) (*Agate, string, func()) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a data directory
|
||||||
|
dataDir := filepath.Join(tempDir)
|
||||||
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
t.Fatalf("Failed to create data directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test files
|
||||||
|
createAPITestFiles(t, filepath.Join(dataDir, "blobs", "active"))
|
||||||
|
|
||||||
|
// Create Agate options
|
||||||
|
options := AgateOptions{
|
||||||
|
WorkDir: dataDir,
|
||||||
|
OpenFunc: func(dir string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
CloseFunc: func() error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate instance
|
||||||
|
ag, err := New(options)
|
||||||
|
if err != nil {
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a cleanup function
|
||||||
|
cleanup := func() {
|
||||||
|
ag.Close()
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ag, tempDir, cleanup
|
||||||
|
}
|
||||||
|
|
||||||
|
// createAPITestFiles creates test files in the specified directory
|
||||||
|
func createAPITestFiles(t *testing.T, dir string) {
|
||||||
|
// Create a subdirectory
|
||||||
|
subDir := filepath.Join(dir, "subdir")
|
||||||
|
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create some test files
|
||||||
|
testFiles := map[string]string{
|
||||||
|
filepath.Join(dir, "file1.txt"): "This is file 1",
|
||||||
|
filepath.Join(dir, "file2.txt"): "This is file 2",
|
||||||
|
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||||
|
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, content := range testFiles {
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewAgate(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Create a data directory
|
||||||
|
dataDir := filepath.Join(tempDir, "data")
|
||||||
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create data directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate options
|
||||||
|
options := AgateOptions{
|
||||||
|
WorkDir: dataDir,
|
||||||
|
OpenFunc: func(dir string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
CloseFunc: func() error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate instance
|
||||||
|
ag, err := New(options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Check that the Agate instance was created successfully
|
||||||
|
if ag == nil {
|
||||||
|
t.Fatalf("Agate instance is nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSaveAndRestoreSnapshot(t *testing.T) {
|
||||||
|
ag, _, cleanup := setupTestAPI(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshotID, err := ag.SaveSnapshot(ctx, "Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the snapshot was created with the correct name
|
||||||
|
snapshot, err := ag.GetSnapshotDetails(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to get snapshot details: %v", err)
|
||||||
|
}
|
||||||
|
if snapshot.Name != "Test Snapshot" {
|
||||||
|
t.Errorf("Snapshot has wrong name: got %s, want %s", snapshot.Name, "Test Snapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify a file
|
||||||
|
dataDir := ag.options.BlobStore.GetActiveDir()
|
||||||
|
if err := os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("Modified file 1"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to modify test file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore the snapshot
|
||||||
|
err = ag.RestoreSnapshot(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to restore snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the file was restored
|
||||||
|
content, err := os.ReadFile(filepath.Join(dataDir, "file1.txt"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read restored file: %v", err)
|
||||||
|
}
|
||||||
|
if string(content) != "This is file 1" {
|
||||||
|
t.Errorf("File content was not restored: got %s, want %s", string(content), "This is file 1")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestoreSnapshotToDir(t *testing.T) {
|
||||||
|
ag, tempDir, cleanup := setupTestAPI(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshotID, err := ag.SaveSnapshot(ctx, "Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a target directory
|
||||||
|
targetDir := filepath.Join(tempDir, "target")
|
||||||
|
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create target directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore the snapshot to the target directory
|
||||||
|
err = ag.RestoreSnapshotToDir(ctx, snapshotID, targetDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to restore snapshot to directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the files were restored
|
||||||
|
testFiles := map[string]string{
|
||||||
|
filepath.Join(targetDir, "file1.txt"): "This is file 1",
|
||||||
|
filepath.Join(targetDir, "file2.txt"): "This is file 2",
|
||||||
|
filepath.Join(targetDir, "subdir/subfile1.txt"): "This is subfile 1",
|
||||||
|
filepath.Join(targetDir, "subdir/subfile2.txt"): "This is subfile 2",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, expectedContent := range testFiles {
|
||||||
|
content, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read restored file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
if string(content) != expectedContent {
|
||||||
|
t.Errorf("Restored file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPIListSnapshots(t *testing.T) {
|
||||||
|
ag, _, cleanup := setupTestAPI(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create multiple snapshots
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshotID1, err := ag.SaveSnapshot(ctx, "Snapshot 1", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify a file
|
||||||
|
dataDir := ag.options.WorkDir
|
||||||
|
if err := os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("Modified file 1"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to modify test file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotID2, err := ag.SaveSnapshot(ctx, "Snapshot 2", snapshotID1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the snapshots
|
||||||
|
snapshots, err := ag.ListSnapshots(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that both snapshots are listed
|
||||||
|
if len(snapshots) != 2 {
|
||||||
|
t.Errorf("Wrong number of snapshots listed: got %d, want %d", len(snapshots), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the snapshots have the correct information
|
||||||
|
for _, snap := range snapshots {
|
||||||
|
if snap.ID == snapshotID1 {
|
||||||
|
if snap.Name != "Snapshot 1" {
|
||||||
|
t.Errorf("Snapshot 1 has wrong name: got %s, want %s", snap.Name, "Snapshot 1")
|
||||||
|
}
|
||||||
|
if snap.ParentID != "" {
|
||||||
|
t.Errorf("Snapshot 1 has wrong parent ID: got %s, want %s", snap.ParentID, "")
|
||||||
|
}
|
||||||
|
} else if snap.ID == snapshotID2 {
|
||||||
|
if snap.Name != "Snapshot 2" {
|
||||||
|
t.Errorf("Snapshot 2 has wrong name: got %s, want %s", snap.Name, "Snapshot 2")
|
||||||
|
}
|
||||||
|
if snap.ParentID != snapshotID1 {
|
||||||
|
t.Errorf("Snapshot 2 has wrong parent ID: got %s, want %s", snap.ParentID, snapshotID1)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Errorf("Unexpected snapshot ID: %s", snap.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgate_Logging(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Create a data directory
|
||||||
|
dataDir := filepath.Join(tempDir, "data")
|
||||||
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create data directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test files in the active directory
|
||||||
|
activeDir := filepath.Join(dataDir, "blobs", "active")
|
||||||
|
if err := os.MkdirAll(activeDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create active directory: %v", err)
|
||||||
|
}
|
||||||
|
createAPITestFiles(t, activeDir)
|
||||||
|
|
||||||
|
// Create a buffer to capture log output
|
||||||
|
var logBuffer bytes.Buffer
|
||||||
|
logger := log.New(&logBuffer, "", 0)
|
||||||
|
|
||||||
|
// Create Agate options with the logger
|
||||||
|
options := AgateOptions{
|
||||||
|
WorkDir: dataDir,
|
||||||
|
OpenFunc: func(dir string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
CloseFunc: func() error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
Logger: logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate instance
|
||||||
|
ag, err := New(options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Perform operations that should generate logs
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Save a snapshot
|
||||||
|
snapshotID, err := ag.SaveSnapshot(ctx, "Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore the snapshot
|
||||||
|
err = ag.RestoreSnapshot(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to restore snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that logs were generated
|
||||||
|
logs := logBuffer.String()
|
||||||
|
if logs == "" {
|
||||||
|
t.Errorf("No logs were generated")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for expected log messages
|
||||||
|
expectedLogMessages := []string{
|
||||||
|
"Creating new snapshot",
|
||||||
|
"Restoring snapshot",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, msg := range expectedLogMessages {
|
||||||
|
if !strings.Contains(logs, msg) {
|
||||||
|
t.Errorf("Expected log message '%s' not found in logs", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: This test is a placeholder for when the ListSnapshots method is updated to accept ListOptions.
|
||||||
|
// Currently, the ListSnapshots method in api.go doesn't accept ListOptions, so we can't test that functionality directly.
|
||||||
|
// The test for ListOptions functionality is covered in TestListSnapshotsMetadata_WithOptions in store/sqlite/sqlite_test.go.
|
||||||
|
func TestAgate_ListSnapshotsWithOptions(t *testing.T) {
|
||||||
|
t.Skip("Skipping test as ListSnapshots in api.go doesn't yet support ListOptions")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPIDeleteSnapshot(t *testing.T) {
|
||||||
|
ag, _, cleanup := setupTestAPI(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshotID, err := ag.SaveSnapshot(ctx, "Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the snapshot
|
||||||
|
err = ag.DeleteSnapshot(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to delete snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to get the deleted snapshot
|
||||||
|
_, err = ag.GetSnapshotDetails(ctx, snapshotID)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when getting deleted snapshot, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// List snapshots to confirm it's gone
|
||||||
|
snapshots, err := ag.ListSnapshots(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
if len(snapshots) != 0 {
|
||||||
|
t.Errorf("Expected 0 snapshots after deletion, got %d", len(snapshots))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -95,6 +95,104 @@ func CreateArchive(sourceDir, targetPath string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateArchiveWithProgress creates a ZIP archive with progress reporting.
|
||||||
|
// onProgress is called with the current number of bytes written and the total size.
|
||||||
|
func CreateArchiveWithProgress(sourceDir, targetPath string, onProgress func(current, total int64)) error {
|
||||||
|
info, err := os.Stat(sourceDir)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to stat source directory %s: %w", sourceDir, err)
|
||||||
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
return fmt.Errorf("source %s is not a directory", sourceDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate total size
|
||||||
|
var totalSize int64
|
||||||
|
err = filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
totalSize += info.Size()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to calculate total size of %s: %w", sourceDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create file for ZIP archive
|
||||||
|
outFile, err := os.Create(targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create target archive file %s: %w", targetPath, err)
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
|
// Create zip.Writer
|
||||||
|
zipWriter := zip.NewWriter(outFile)
|
||||||
|
defer zipWriter.Close()
|
||||||
|
|
||||||
|
var currentSize int64
|
||||||
|
|
||||||
|
// Recursively walk sourceDir
|
||||||
|
err = filepath.Walk(sourceDir, func(filePath string, fileInfo os.FileInfo, walkErr error) error {
|
||||||
|
if walkErr != nil {
|
||||||
|
return fmt.Errorf("error walking path %s: %w", filePath, walkErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip sourceDir itself
|
||||||
|
if filePath == sourceDir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create relative path
|
||||||
|
relativePath := strings.TrimPrefix(filePath, sourceDir+string(filepath.Separator))
|
||||||
|
relativePath = filepath.ToSlash(relativePath)
|
||||||
|
|
||||||
|
// Check if directory
|
||||||
|
if fileInfo.IsDir() {
|
||||||
|
_, err = zipWriter.Create(relativePath + "/")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory entry %s in archive: %w", relativePath, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open file for reading
|
||||||
|
fileToArchive, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open file %s for archiving: %w", filePath, err)
|
||||||
|
}
|
||||||
|
defer fileToArchive.Close()
|
||||||
|
|
||||||
|
// Create archive entry
|
||||||
|
zipEntryWriter, err := zipWriter.Create(relativePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create entry %s in archive: %w", relativePath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy content
|
||||||
|
n, err := io.Copy(zipEntryWriter, fileToArchive)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to copy file content %s to archive: %w", filePath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentSize += n
|
||||||
|
if onProgress != nil {
|
||||||
|
onProgress(currentSize, totalSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
os.Remove(targetPath)
|
||||||
|
return fmt.Errorf("failed during directory walk for archiving %s: %w", sourceDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ListArchiveContents читает ZIP-архив и возвращает информацию о его содержимом.
|
// ListArchiveContents читает ZIP-архив и возвращает информацию о его содержимом.
|
||||||
func ListArchiveContents(archivePath string) ([]ArchiveEntryInfo, error) {
|
func ListArchiveContents(archivePath string) ([]ArchiveEntryInfo, error) {
|
||||||
// Открываем ZIP-архив
|
// Открываем ZIP-архив
|
||||||
|
|||||||
236
archive/archive_test.go
Normal file
236
archive/archive_test.go
Normal file
@@ -0,0 +1,236 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCreateArchive(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a source directory with some files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a subdirectory
|
||||||
|
subDir := filepath.Join(sourceDir, "subdir")
|
||||||
|
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create some test files
|
||||||
|
testFiles := map[string]string{
|
||||||
|
filepath.Join(sourceDir, "file1.txt"): "This is file 1",
|
||||||
|
filepath.Join(sourceDir, "file2.txt"): "This is file 2",
|
||||||
|
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||||
|
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, content := range testFiles {
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the archive
|
||||||
|
archivePath := filepath.Join(tempDir, "archive.zip")
|
||||||
|
err = CreateArchive(sourceDir, archivePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create archive: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the archive file was created
|
||||||
|
if _, err := os.Stat(archivePath); os.IsNotExist(err) {
|
||||||
|
t.Fatalf("Archive file was not created")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test creating archive with non-existent source directory
|
||||||
|
err = CreateArchive(filepath.Join(tempDir, "nonexistent"), archivePath)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when creating archive from non-existent directory, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test creating archive with a file as source
|
||||||
|
fileSourcePath := filepath.Join(tempDir, "file_source.txt")
|
||||||
|
if err := os.WriteFile(fileSourcePath, []byte("This is a file"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file: %v", err)
|
||||||
|
}
|
||||||
|
err = CreateArchive(fileSourcePath, archivePath)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when creating archive from a file, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListArchiveContents(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a source directory with some files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a subdirectory
|
||||||
|
subDir := filepath.Join(sourceDir, "subdir")
|
||||||
|
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create some test files
|
||||||
|
testFiles := map[string]string{
|
||||||
|
filepath.Join(sourceDir, "file1.txt"): "This is file 1",
|
||||||
|
filepath.Join(sourceDir, "file2.txt"): "This is file 2",
|
||||||
|
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||||
|
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, content := range testFiles {
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the archive
|
||||||
|
archivePath := filepath.Join(tempDir, "archive.zip")
|
||||||
|
err = CreateArchive(sourceDir, archivePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create archive: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the archive contents
|
||||||
|
entries, err := ListArchiveContents(archivePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list archive contents: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all files and directories are listed
|
||||||
|
expectedEntries := map[string]bool{
|
||||||
|
"file1.txt": false,
|
||||||
|
"file2.txt": false,
|
||||||
|
"subdir/": true,
|
||||||
|
"subdir/subfile1.txt": false,
|
||||||
|
"subdir/subfile2.txt": false,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) != len(expectedEntries) {
|
||||||
|
t.Errorf("Wrong number of entries: got %d, want %d", len(entries), len(expectedEntries))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
isDir, exists := expectedEntries[entry.Path]
|
||||||
|
if !exists {
|
||||||
|
t.Errorf("Unexpected entry in archive: %s", entry.Path)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if entry.IsDir != isDir {
|
||||||
|
t.Errorf("Entry %s has wrong IsDir value: got %v, want %v", entry.Path, entry.IsDir, isDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test listing contents of non-existent archive
|
||||||
|
_, err = ListArchiveContents(filepath.Join(tempDir, "nonexistent.zip"))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when listing contents of non-existent archive, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExtractFileFromArchive(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a source directory with some files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a subdirectory
|
||||||
|
subDir := filepath.Join(sourceDir, "subdir")
|
||||||
|
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create some test files
|
||||||
|
testFiles := map[string]string{
|
||||||
|
filepath.Join(sourceDir, "file1.txt"): "This is file 1",
|
||||||
|
filepath.Join(sourceDir, "file2.txt"): "This is file 2",
|
||||||
|
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||||
|
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, content := range testFiles {
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the archive
|
||||||
|
archivePath := filepath.Join(tempDir, "archive.zip")
|
||||||
|
err = CreateArchive(sourceDir, archivePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create archive: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract a file from the archive
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err = ExtractFileFromArchive(archivePath, "file1.txt", &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to extract file from archive: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the extracted content matches the original
|
||||||
|
if buf.String() != "This is file 1" {
|
||||||
|
t.Errorf("Extracted content does not match: got %s, want %s", buf.String(), "This is file 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract a file from a subdirectory
|
||||||
|
buf.Reset()
|
||||||
|
err = ExtractFileFromArchive(archivePath, "subdir/subfile1.txt", &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to extract file from archive: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the extracted content matches the original
|
||||||
|
if buf.String() != "This is subfile 1" {
|
||||||
|
t.Errorf("Extracted content does not match: got %s, want %s", buf.String(), "This is subfile 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to extract a non-existent file
|
||||||
|
buf.Reset()
|
||||||
|
err = ExtractFileFromArchive(archivePath, "nonexistent.txt", &buf)
|
||||||
|
if err != ErrFileNotFoundInArchive {
|
||||||
|
t.Fatalf("Expected ErrFileNotFoundInArchive when extracting non-existent file, got: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to extract a directory
|
||||||
|
buf.Reset()
|
||||||
|
err = ExtractFileFromArchive(archivePath, "subdir/", &buf)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when extracting a directory, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to extract from a non-existent archive
|
||||||
|
buf.Reset()
|
||||||
|
err = ExtractFileFromArchive(filepath.Join(tempDir, "nonexistent.zip"), "file1.txt", &buf)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when extracting from non-existent archive, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
115
async_test.go
Normal file
115
async_test.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
package agate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSnapshotAsync(t *testing.T) {
|
||||||
|
// Setup temporary work directory
|
||||||
|
workDir, err := os.MkdirTemp("", "agate_async_test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(workDir)
|
||||||
|
|
||||||
|
// Initialize Agate
|
||||||
|
opts := AgateOptions{
|
||||||
|
WorkDir: workDir,
|
||||||
|
Logger: nil, // Disable logging for test
|
||||||
|
}
|
||||||
|
|
||||||
|
ag, err := New(opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to initialize Agate: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Get active directory and create some dummy files
|
||||||
|
activeDir := ag.GetActiveDir()
|
||||||
|
if err := os.MkdirAll(activeDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create active dir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a large-ish file to ensure it takes some time (though still fast)
|
||||||
|
dummyFile := filepath.Join(activeDir, "data.bin")
|
||||||
|
data := make([]byte, 1024*1024) // 1MB
|
||||||
|
if err := os.WriteFile(dummyFile, data, 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create dummy file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start async snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshotID, err := ag.SnapshotAsync(ctx, "async-snap", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("SnapshotAsync failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if snapshotID == "" {
|
||||||
|
t.Fatal("SnapshotAsync returned empty ID")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check status immediately
|
||||||
|
status, err := ag.GetSnapshotStatus(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("GetSnapshotStatus failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status should be pending or running
|
||||||
|
if status.Status != "pending" && status.Status != "running" {
|
||||||
|
t.Errorf("Initial status should be pending or running, got: %s", status.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Poll for completion
|
||||||
|
timeout := time.After(5 * time.Second)
|
||||||
|
ticker := time.NewTicker(10 * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
done := false
|
||||||
|
for !done {
|
||||||
|
select {
|
||||||
|
case <-timeout:
|
||||||
|
t.Fatal("Timeout waiting for snapshot completion")
|
||||||
|
case <-ticker.C:
|
||||||
|
status, err := ag.GetSnapshotStatus(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("GetSnapshotStatus failed during polling: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status.Status == "done" {
|
||||||
|
done = true
|
||||||
|
if status.Progress != 1.0 {
|
||||||
|
t.Errorf("Expected progress 1.0, got %f", status.Progress)
|
||||||
|
}
|
||||||
|
} else if status.Status == "failed" {
|
||||||
|
t.Fatalf("Snapshot creation failed: %s", status.Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify snapshot exists
|
||||||
|
snaps, err := ag.ListSnapshots(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ListSnapshots failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for _, s := range snaps {
|
||||||
|
if s.ID == snapshotID {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
t.Errorf("Snapshot %s not found in list", snapshotID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify current snapshot ID is updated
|
||||||
|
if ag.GetCurrentSnapshotID() != snapshotID {
|
||||||
|
t.Errorf("Current snapshot ID not updated. Expected %s, got %s", snapshotID, ag.GetCurrentSnapshotID())
|
||||||
|
}
|
||||||
|
}
|
||||||
BIN
basic_usage
BIN
basic_usage
Binary file not shown.
@@ -7,8 +7,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"unprism.ru/KRBL/agate"
|
"gitea.unprism.ru/KRBL/Agate"
|
||||||
"unprism.ru/KRBL/agate/stores"
|
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|||||||
330
functional_test.go
Normal file
330
functional_test.go
Normal file
@@ -0,0 +1,330 @@
|
|||||||
|
package agate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestFullWorkflow tests a complete workflow of creating snapshots, modifying files,
|
||||||
|
// creating more snapshots, and restoring snapshots.
|
||||||
|
func TestFullWorkflow(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Create Agate options
|
||||||
|
options := AgateOptions{
|
||||||
|
WorkDir: tempDir,
|
||||||
|
CleanOnRestore: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate instance
|
||||||
|
ag, err := New(options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Create a data directory
|
||||||
|
dataDir := ag.options.BlobStore.GetActiveDir()
|
||||||
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create data directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create initial test files
|
||||||
|
initialFiles := map[string]string{
|
||||||
|
filepath.Join(dataDir, "file1.txt"): "Initial content of file 1",
|
||||||
|
filepath.Join(dataDir, "file2.txt"): "Initial content of file 2",
|
||||||
|
filepath.Join(dataDir, "subdir", "file3.txt"): "Initial content of file 3",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create subdirectory
|
||||||
|
if err := os.MkdirAll(filepath.Join(dataDir, "subdir"), 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the files
|
||||||
|
for path, content := range initialFiles {
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 1: Create the first snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshot1ID, err := ag.SaveSnapshot(ctx, "Snapshot 1", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create first snapshot: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Created first snapshot with ID: %s", snapshot1ID)
|
||||||
|
|
||||||
|
// Step 2: Modify some files and add a new file
|
||||||
|
modifiedFiles := map[string]string{
|
||||||
|
filepath.Join(dataDir, "file1.txt"): "Modified content of file 1",
|
||||||
|
filepath.Join(dataDir, "file4.txt"): "Content of new file 4",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, content := range modifiedFiles {
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to modify/create test file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3: Create the second snapshot
|
||||||
|
snapshot2ID, err := ag.SaveSnapshot(ctx, "Snapshot 2", snapshot1ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create second snapshot: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Created second snapshot with ID: %s", snapshot2ID)
|
||||||
|
|
||||||
|
// Step 4: Delete a file and modify another
|
||||||
|
if err := os.Remove(filepath.Join(dataDir, "file2.txt")); err != nil {
|
||||||
|
t.Fatalf("Failed to delete test file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(filepath.Join(dataDir, "subdir/file3.txt"), []byte("Modified content of file 3"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to modify test file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 5: Create the third snapshot
|
||||||
|
snapshot3ID, err := ag.SaveSnapshot(ctx, "Snapshot 3", snapshot2ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create third snapshot: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Created third snapshot with ID: %s", snapshot3ID)
|
||||||
|
|
||||||
|
// Step 6: List all snapshots
|
||||||
|
snapshots, err := ag.ListSnapshots(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(snapshots) != 3 {
|
||||||
|
t.Errorf("Expected 3 snapshots, got %d", len(snapshots))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 7: Restore the first snapshot
|
||||||
|
err = ag.RestoreSnapshot(ctx, snapshot1ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to restore first snapshot: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Restored first snapshot")
|
||||||
|
|
||||||
|
// Step 8: Verify the restored files match the initial state
|
||||||
|
for path, expectedContent := range initialFiles {
|
||||||
|
content, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read restored file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
if string(content) != expectedContent {
|
||||||
|
t.Errorf("Restored file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||||
|
} else {
|
||||||
|
t.Logf("SUCCESS: Restored file %s has correct content after restoring first snapshot", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that file4.txt doesn't exist
|
||||||
|
file4Path := filepath.Join(dataDir, "file4.txt")
|
||||||
|
_, err = os.Stat(file4Path)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("File4.txt should not exist after restoring first snapshot")
|
||||||
|
} else if !os.IsNotExist(err) {
|
||||||
|
t.Errorf("Unexpected error checking if File4.txt exists: %v", err)
|
||||||
|
} else {
|
||||||
|
t.Logf("SUCCESS: File4.txt correctly does not exist after restoring first snapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 9: Restore the third snapshot
|
||||||
|
err = ag.RestoreSnapshot(ctx, snapshot3ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to restore third snapshot: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Restored third snapshot")
|
||||||
|
|
||||||
|
// Step 10: Verify the restored files match the final state
|
||||||
|
expectedFiles := map[string]string{
|
||||||
|
filepath.Join(dataDir, "file1.txt"): "Modified content of file 1",
|
||||||
|
filepath.Join(dataDir, "file4.txt"): "Content of new file 4",
|
||||||
|
filepath.Join(dataDir, "subdir/file3.txt"): "Modified content of file 3",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, expectedContent := range expectedFiles {
|
||||||
|
content, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read restored file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
if string(content) != expectedContent {
|
||||||
|
t.Errorf("Restored file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||||
|
} else {
|
||||||
|
t.Logf("SUCCESS: Restored file %s has correct content after restoring third snapshot", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that file2.txt doesn't exist
|
||||||
|
file2Path := filepath.Join(dataDir, "file2.txt")
|
||||||
|
_, err = os.Stat(file2Path)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("File2.txt should not exist after restoring third snapshot")
|
||||||
|
} else if !os.IsNotExist(err) {
|
||||||
|
t.Errorf("Unexpected error checking if File2.txt exists: %v", err)
|
||||||
|
} else {
|
||||||
|
t.Logf("SUCCESS: File2.txt correctly does not exist after restoring third snapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 11: Delete a snapshot
|
||||||
|
err = ag.DeleteSnapshot(ctx, snapshot2ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to delete snapshot: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Deleted second snapshot")
|
||||||
|
|
||||||
|
// Step 12: Verify the snapshot was deleted
|
||||||
|
snapshots, err = ag.ListSnapshots(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug output
|
||||||
|
t.Logf("After deletion, found %d snapshots:", len(snapshots))
|
||||||
|
for i, snap := range snapshots {
|
||||||
|
t.Logf(" Snapshot %d: ID=%s, Name=%s, ParentID=%s", i+1, snap.ID, snap.Name, snap.ParentID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get detailed information about snapshot 3
|
||||||
|
snapshot3, err := ag.GetSnapshotDetails(ctx, snapshot3ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("Failed to get snapshot 3 details: %v", err)
|
||||||
|
} else {
|
||||||
|
t.Logf("Snapshot 3 details: ID=%s, Name=%s, ParentID=%s", snapshot3.ID, snapshot3.Name, snapshot3.ParentID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that snapshot 3's parent ID has been updated to point to snapshot 1
|
||||||
|
if snapshot3 != nil && snapshot3.ParentID != snapshot1ID {
|
||||||
|
t.Errorf("Snapshot 3's parent ID should be updated to point to Snapshot 1 after Snapshot 2 is deleted. Got ParentID=%s, want ParentID=%s", snapshot3.ParentID, snapshot1ID)
|
||||||
|
} else {
|
||||||
|
t.Logf("SUCCESS: Snapshot 3's parent ID has been correctly updated to point to Snapshot 1: %s", snapshot3.ParentID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(snapshots) != 2 {
|
||||||
|
t.Errorf("Expected 2 snapshots after deletion, got %d", len(snapshots))
|
||||||
|
} else {
|
||||||
|
t.Logf("SUCCESS: Found correct number of snapshots after deletion: %d", len(snapshots))
|
||||||
|
}
|
||||||
|
|
||||||
|
foundDeletedSnapshot := false
|
||||||
|
for _, snap := range snapshots {
|
||||||
|
if snap.ID == snapshot2ID {
|
||||||
|
foundDeletedSnapshot = true
|
||||||
|
t.Errorf("Snapshot 2 (ID=%s) should have been deleted", snapshot2ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !foundDeletedSnapshot {
|
||||||
|
t.Logf("SUCCESS: Snapshot 2 (ID=%s) was correctly deleted", snapshot2ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLargeFiles tests creating and restoring snapshots with large files
|
||||||
|
func TestLargeFiles(t *testing.T) {
|
||||||
|
// Skip this test in short mode
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping large file test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Create Agate options
|
||||||
|
options := AgateOptions{
|
||||||
|
WorkDir: tempDir,
|
||||||
|
CleanOnRestore: true,
|
||||||
|
OpenFunc: func(dir string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
CloseFunc: func() error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate instance
|
||||||
|
ag, err := New(options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Create a data directory
|
||||||
|
dataDir := ag.options.BlobStore.GetActiveDir()
|
||||||
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create data directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a large file (10 MB)
|
||||||
|
largeFilePath := filepath.Join(dataDir, "large_file.bin")
|
||||||
|
largeFileSize := 10 * 1024 * 1024 // 10 MB
|
||||||
|
largeFile, err := os.Create(largeFilePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create large test file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill the file with a repeating pattern
|
||||||
|
pattern := []byte("0123456789ABCDEF")
|
||||||
|
buffer := make([]byte, 8192) // 8 KB buffer
|
||||||
|
for i := 0; i < len(buffer); i += len(pattern) {
|
||||||
|
copy(buffer[i:], pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the buffer multiple times to reach the desired size
|
||||||
|
bytesWritten := 0
|
||||||
|
for bytesWritten < largeFileSize {
|
||||||
|
n, err := largeFile.Write(buffer)
|
||||||
|
if err != nil {
|
||||||
|
largeFile.Close()
|
||||||
|
t.Fatalf("Failed to write to large test file: %v", err)
|
||||||
|
}
|
||||||
|
bytesWritten += n
|
||||||
|
}
|
||||||
|
largeFile.Close()
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
startTime := time.Now()
|
||||||
|
snapshotID, err := ag.SaveSnapshot(ctx, "Large File Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
t.Logf("Created snapshot with large file in %v", duration)
|
||||||
|
|
||||||
|
// Modify the large file
|
||||||
|
if err := os.WriteFile(largeFilePath, []byte("Modified content"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to modify large file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore the snapshot
|
||||||
|
startTime = time.Now()
|
||||||
|
err = ag.RestoreSnapshot(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to restore snapshot: %v", err)
|
||||||
|
}
|
||||||
|
duration = time.Since(startTime)
|
||||||
|
t.Logf("Restored snapshot with large file in %v", duration)
|
||||||
|
|
||||||
|
// Verify the file size is correct
|
||||||
|
fileInfo, err := os.Stat(largeFilePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to stat restored large file: %v", err)
|
||||||
|
}
|
||||||
|
if fileInfo.Size() != int64(largeFileSize) {
|
||||||
|
t.Errorf("Restored large file has wrong size: got %d, want %d", fileInfo.Size(), largeFileSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
22
go.mod
22
go.mod
@@ -1,19 +1,19 @@
|
|||||||
module unprism.ru/KRBL/agate
|
module gitea.unprism.ru/KRBL/Agate
|
||||||
|
|
||||||
go 1.24.0
|
go 1.24.3
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3
|
||||||
github.com/mattn/go-sqlite3 v1.14.28
|
github.com/mattn/go-sqlite3 v1.14.32
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f
|
google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba
|
||||||
google.golang.org/grpc v1.72.0
|
google.golang.org/grpc v1.77.0
|
||||||
google.golang.org/protobuf v1.36.6
|
google.golang.org/protobuf v1.36.10
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
golang.org/x/net v0.39.0 // indirect
|
golang.org/x/net v0.47.0 // indirect
|
||||||
golang.org/x/sys v0.32.0 // indirect
|
golang.org/x/sys v0.38.0 // indirect
|
||||||
golang.org/x/text v0.24.0 // indirect
|
golang.org/x/text v0.31.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
66
go.sum
66
go.sum
@@ -1,5 +1,5 @@
|
|||||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
@@ -8,33 +8,35 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
|||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
||||||
github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A=
|
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
||||||
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||||
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
|
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||||
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
|
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||||
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||||
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
|
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||||
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
|
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||||
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
|
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
|
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||||
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||||
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
|
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f h1:tjZsroqekhC63+WMqzmWyW5Twj/ZfR5HAlpd5YQ1Vs0=
|
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:Cd8IzgPo5Akum2c9R6FsXNaZbH3Jpa2gpHlW89FqlyQ=
|
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4=
|
google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba h1:B14OtaXuMaCQsl2deSvNkyPKIzq3BjfxQp8d00QyWx4=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:G5IanEx8/PgI9w6CFcYQf7jMtHQhZruvfM1i3qOqk5U=
|
||||||
google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba h1:UKgtfRM7Yh93Sya0Fo8ZzhDP4qBckrrxEr2oF5UIVb8=
|
||||||
google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
|
||||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
|
||||||
|
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||||
|
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.36.6
|
// protoc-gen-go v1.36.8
|
||||||
// protoc v4.25.3
|
// protoc v6.32.0
|
||||||
// source: snapshot.proto
|
// source: snapshot.proto
|
||||||
|
|
||||||
package grpc
|
package grpc
|
||||||
@@ -26,10 +26,10 @@ const (
|
|||||||
// Метаданные файла внутри снапшота
|
// Метаданные файла внутри снапшота
|
||||||
type FileInfo struct {
|
type FileInfo struct {
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Относительный путь файла внутри снапшота
|
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
|
||||||
SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` // Размер файла в байтах
|
SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"`
|
||||||
Sha256Hash string `protobuf:"bytes,3,opt,name=sha256_hash,json=sha256Hash,proto3" json:"sha256_hash,omitempty"` // Хеш-сумма файла (SHA256)
|
Sha256Hash string `protobuf:"bytes,3,opt,name=sha256_hash,json=sha256Hash,proto3" json:"sha256_hash,omitempty"`
|
||||||
IsDir bool `protobuf:"varint,4,opt,name=is_dir,json=isDir,proto3" json:"is_dir,omitempty"` // Является ли запись директорией
|
IsDir bool `protobuf:"varint,4,opt,name=is_dir,json=isDir,proto3" json:"is_dir,omitempty"`
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
@@ -95,10 +95,10 @@ func (x *FileInfo) GetIsDir() bool {
|
|||||||
// Краткая информация о снапшоте
|
// Краткая информация о снапшоте
|
||||||
type SnapshotInfo struct {
|
type SnapshotInfo struct {
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // Уникальный ID снапшота (UUID)
|
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // Имя снапшота
|
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
ParentId string `protobuf:"bytes,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` // ID родительского снапшота (может быть пустым)
|
ParentId string `protobuf:"bytes,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
|
||||||
CreationTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` // Время создания
|
CreationTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"`
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
@@ -164,8 +164,8 @@ func (x *SnapshotInfo) GetCreationTime() *timestamppb.Timestamp {
|
|||||||
// Детальная информация о снапшоте
|
// Детальная информация о снапшоте
|
||||||
type SnapshotDetails struct {
|
type SnapshotDetails struct {
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
Info *SnapshotInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` // Краткая информация
|
Info *SnapshotInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"`
|
||||||
Files []*FileInfo `protobuf:"bytes,2,rep,name=files,proto3" json:"files,omitempty"` // Список файлов в снапшоте
|
Files []*FileInfo `protobuf:"bytes,2,rep,name=files,proto3" json:"files,omitempty"`
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
@@ -214,7 +214,7 @@ func (x *SnapshotDetails) GetFiles() []*FileInfo {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Запрос на получение списка снапшотов (можно добавить фильтры/пагинацию)
|
// Запрос на получение списка снапшотов
|
||||||
type ListSnapshotsRequest struct {
|
type ListSnapshotsRequest struct {
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
@@ -254,7 +254,7 @@ func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) {
|
|||||||
// Ответ со списком снапшотов
|
// Ответ со списком снапшотов
|
||||||
type ListSnapshotsResponse struct {
|
type ListSnapshotsResponse struct {
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
Snapshots []*SnapshotInfo `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` // string next_page_token = 2;
|
Snapshots []*SnapshotInfo `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"`
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
@@ -299,7 +299,7 @@ func (x *ListSnapshotsResponse) GetSnapshots() []*SnapshotInfo {
|
|||||||
// Запрос на получение деталей снапшота
|
// Запрос на получение деталей снапшота
|
||||||
type GetSnapshotDetailsRequest struct {
|
type GetSnapshotDetailsRequest struct {
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` // ID нужного снапшота
|
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
@@ -344,8 +344,8 @@ func (x *GetSnapshotDetailsRequest) GetSnapshotId() string {
|
|||||||
// Запрос на скачивание файла
|
// Запрос на скачивание файла
|
||||||
type DownloadFileRequest struct {
|
type DownloadFileRequest struct {
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` // ID снапшота
|
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
|
||||||
FilePath string `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"` // Путь к файлу внутри снапшота
|
FilePath string `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"`
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
@@ -397,7 +397,7 @@ func (x *DownloadFileRequest) GetFilePath() string {
|
|||||||
// Ответ (часть файла) при скачивании
|
// Ответ (часть файла) при скачивании
|
||||||
type DownloadFileResponse struct {
|
type DownloadFileResponse struct {
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
ChunkData []byte `protobuf:"bytes,1,opt,name=chunk_data,json=chunkData,proto3" json:"chunk_data,omitempty"` // Кусочек данных файла
|
ChunkData []byte `protobuf:"bytes,1,opt,name=chunk_data,json=chunkData,proto3" json:"chunk_data,omitempty"`
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
@@ -439,6 +439,173 @@ func (x *DownloadFileResponse) GetChunkData() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Запрос на скачивание разницы между снапшотами
|
||||||
|
type DownloadSnapshotDiffRequest struct {
|
||||||
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
|
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` // ID целевого снапшота
|
||||||
|
LocalParentId string `protobuf:"bytes,2,opt,name=local_parent_id,json=localParentId,proto3" json:"local_parent_id,omitempty"` // ID снапшота, который уже есть у клиента
|
||||||
|
Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // Смещение в байтах для докачки
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *DownloadSnapshotDiffRequest) Reset() {
|
||||||
|
*x = DownloadSnapshotDiffRequest{}
|
||||||
|
mi := &file_snapshot_proto_msgTypes[8]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *DownloadSnapshotDiffRequest) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*DownloadSnapshotDiffRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *DownloadSnapshotDiffRequest) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_snapshot_proto_msgTypes[8]
|
||||||
|
if x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use DownloadSnapshotDiffRequest.ProtoReflect.Descriptor instead.
|
||||||
|
func (*DownloadSnapshotDiffRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return file_snapshot_proto_rawDescGZIP(), []int{8}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *DownloadSnapshotDiffRequest) GetSnapshotId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.SnapshotId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *DownloadSnapshotDiffRequest) GetLocalParentId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.LocalParentId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *DownloadSnapshotDiffRequest) GetOffset() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Offset
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Запрос на получение информации о дифе
|
||||||
|
type GetDiffInfoRequest struct {
|
||||||
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
|
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
|
||||||
|
LocalParentId string `protobuf:"bytes,2,opt,name=local_parent_id,json=localParentId,proto3" json:"local_parent_id,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *GetDiffInfoRequest) Reset() {
|
||||||
|
*x = GetDiffInfoRequest{}
|
||||||
|
mi := &file_snapshot_proto_msgTypes[9]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *GetDiffInfoRequest) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*GetDiffInfoRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *GetDiffInfoRequest) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_snapshot_proto_msgTypes[9]
|
||||||
|
if x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use GetDiffInfoRequest.ProtoReflect.Descriptor instead.
|
||||||
|
func (*GetDiffInfoRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return file_snapshot_proto_rawDescGZIP(), []int{9}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *GetDiffInfoRequest) GetSnapshotId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.SnapshotId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *GetDiffInfoRequest) GetLocalParentId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.LocalParentId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Информация о дифе
|
||||||
|
type DiffInfo struct {
|
||||||
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
|
Sha256Hash string `protobuf:"bytes,1,opt,name=sha256_hash,json=sha256Hash,proto3" json:"sha256_hash,omitempty"`
|
||||||
|
SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *DiffInfo) Reset() {
|
||||||
|
*x = DiffInfo{}
|
||||||
|
mi := &file_snapshot_proto_msgTypes[10]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *DiffInfo) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*DiffInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *DiffInfo) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_snapshot_proto_msgTypes[10]
|
||||||
|
if x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use DiffInfo.ProtoReflect.Descriptor instead.
|
||||||
|
func (*DiffInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return file_snapshot_proto_rawDescGZIP(), []int{10}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *DiffInfo) GetSha256Hash() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Sha256Hash
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *DiffInfo) GetSizeBytes() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.SizeBytes
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
var File_snapshot_proto protoreflect.FileDescriptor
|
var File_snapshot_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
const file_snapshot_proto_rawDesc = "" +
|
const file_snapshot_proto_rawDesc = "" +
|
||||||
@@ -472,11 +639,27 @@ const file_snapshot_proto_rawDesc = "" +
|
|||||||
"\tfile_path\x18\x02 \x01(\tR\bfilePath\"5\n" +
|
"\tfile_path\x18\x02 \x01(\tR\bfilePath\"5\n" +
|
||||||
"\x14DownloadFileResponse\x12\x1d\n" +
|
"\x14DownloadFileResponse\x12\x1d\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"chunk_data\x18\x01 \x01(\fR\tchunkData2\x8a\x03\n" +
|
"chunk_data\x18\x01 \x01(\fR\tchunkData\"~\n" +
|
||||||
|
"\x1bDownloadSnapshotDiffRequest\x12\x1f\n" +
|
||||||
|
"\vsnapshot_id\x18\x01 \x01(\tR\n" +
|
||||||
|
"snapshotId\x12&\n" +
|
||||||
|
"\x0flocal_parent_id\x18\x02 \x01(\tR\rlocalParentId\x12\x16\n" +
|
||||||
|
"\x06offset\x18\x03 \x01(\x03R\x06offset\"]\n" +
|
||||||
|
"\x12GetDiffInfoRequest\x12\x1f\n" +
|
||||||
|
"\vsnapshot_id\x18\x01 \x01(\tR\n" +
|
||||||
|
"snapshotId\x12&\n" +
|
||||||
|
"\x0flocal_parent_id\x18\x02 \x01(\tR\rlocalParentId\"J\n" +
|
||||||
|
"\bDiffInfo\x12\x1f\n" +
|
||||||
|
"\vsha256_hash\x18\x01 \x01(\tR\n" +
|
||||||
|
"sha256Hash\x12\x1d\n" +
|
||||||
|
"\n" +
|
||||||
|
"size_bytes\x18\x02 \x01(\x03R\tsizeBytes2\xb8\x04\n" +
|
||||||
"\x0fSnapshotService\x12k\n" +
|
"\x0fSnapshotService\x12k\n" +
|
||||||
"\rListSnapshots\x12 .agate.grpc.ListSnapshotsRequest\x1a!.agate.grpc.ListSnapshotsResponse\"\x15\x82\xd3\xe4\x93\x02\x0f\x12\r/v1/snapshots\x12}\n" +
|
"\rListSnapshots\x12 .agate.grpc.ListSnapshotsRequest\x1a!.agate.grpc.ListSnapshotsResponse\"\x15\x82\xd3\xe4\x93\x02\x0f\x12\r/v1/snapshots\x12}\n" +
|
||||||
"\x12GetSnapshotDetails\x12%.agate.grpc.GetSnapshotDetailsRequest\x1a\x1b.agate.grpc.SnapshotDetails\"#\x82\xd3\xe4\x93\x02\x1d\x12\x1b/v1/snapshots/{snapshot_id}\x12\x8a\x01\n" +
|
"\x12GetSnapshotDetails\x12%.agate.grpc.GetSnapshotDetailsRequest\x1a\x1b.agate.grpc.SnapshotDetails\"#\x82\xd3\xe4\x93\x02\x1d\x12\x1b/v1/snapshots/{snapshot_id}\x12\x8a\x01\n" +
|
||||||
"\fDownloadFile\x12\x1f.agate.grpc.DownloadFileRequest\x1a .agate.grpc.DownloadFileResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v1/snapshots/{snapshot_id}/files/{file_path}0\x01B\x1cZ\x1aunprism.ru/KRBL/agate/grpcb\x06proto3"
|
"\fDownloadFile\x12\x1f.agate.grpc.DownloadFileRequest\x1a .agate.grpc.DownloadFileResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v1/snapshots/{snapshot_id}/files/{file_path}0\x01\x12e\n" +
|
||||||
|
"\x14DownloadSnapshotDiff\x12'.agate.grpc.DownloadSnapshotDiffRequest\x1a .agate.grpc.DownloadFileResponse\"\x000\x01\x12E\n" +
|
||||||
|
"\vGetDiffInfo\x12\x1e.agate.grpc.GetDiffInfoRequest\x1a\x14.agate.grpc.DiffInfo\"\x00B\"Z gitea.unprism.ru/KRBL/Agate/grpcb\x06proto3"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
file_snapshot_proto_rawDescOnce sync.Once
|
file_snapshot_proto_rawDescOnce sync.Once
|
||||||
@@ -490,7 +673,7 @@ func file_snapshot_proto_rawDescGZIP() []byte {
|
|||||||
return file_snapshot_proto_rawDescData
|
return file_snapshot_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
|
||||||
var file_snapshot_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
|
var file_snapshot_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
|
||||||
var file_snapshot_proto_goTypes = []any{
|
var file_snapshot_proto_goTypes = []any{
|
||||||
(*FileInfo)(nil), // 0: agate.grpc.FileInfo
|
(*FileInfo)(nil), // 0: agate.grpc.FileInfo
|
||||||
(*SnapshotInfo)(nil), // 1: agate.grpc.SnapshotInfo
|
(*SnapshotInfo)(nil), // 1: agate.grpc.SnapshotInfo
|
||||||
@@ -500,21 +683,28 @@ var file_snapshot_proto_goTypes = []any{
|
|||||||
(*GetSnapshotDetailsRequest)(nil), // 5: agate.grpc.GetSnapshotDetailsRequest
|
(*GetSnapshotDetailsRequest)(nil), // 5: agate.grpc.GetSnapshotDetailsRequest
|
||||||
(*DownloadFileRequest)(nil), // 6: agate.grpc.DownloadFileRequest
|
(*DownloadFileRequest)(nil), // 6: agate.grpc.DownloadFileRequest
|
||||||
(*DownloadFileResponse)(nil), // 7: agate.grpc.DownloadFileResponse
|
(*DownloadFileResponse)(nil), // 7: agate.grpc.DownloadFileResponse
|
||||||
(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
|
(*DownloadSnapshotDiffRequest)(nil), // 8: agate.grpc.DownloadSnapshotDiffRequest
|
||||||
|
(*GetDiffInfoRequest)(nil), // 9: agate.grpc.GetDiffInfoRequest
|
||||||
|
(*DiffInfo)(nil), // 10: agate.grpc.DiffInfo
|
||||||
|
(*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
|
||||||
}
|
}
|
||||||
var file_snapshot_proto_depIdxs = []int32{
|
var file_snapshot_proto_depIdxs = []int32{
|
||||||
8, // 0: agate.grpc.SnapshotInfo.creation_time:type_name -> google.protobuf.Timestamp
|
11, // 0: agate.grpc.SnapshotInfo.creation_time:type_name -> google.protobuf.Timestamp
|
||||||
1, // 1: agate.grpc.SnapshotDetails.info:type_name -> agate.grpc.SnapshotInfo
|
1, // 1: agate.grpc.SnapshotDetails.info:type_name -> agate.grpc.SnapshotInfo
|
||||||
0, // 2: agate.grpc.SnapshotDetails.files:type_name -> agate.grpc.FileInfo
|
0, // 2: agate.grpc.SnapshotDetails.files:type_name -> agate.grpc.FileInfo
|
||||||
1, // 3: agate.grpc.ListSnapshotsResponse.snapshots:type_name -> agate.grpc.SnapshotInfo
|
1, // 3: agate.grpc.ListSnapshotsResponse.snapshots:type_name -> agate.grpc.SnapshotInfo
|
||||||
3, // 4: agate.grpc.SnapshotService.ListSnapshots:input_type -> agate.grpc.ListSnapshotsRequest
|
3, // 4: agate.grpc.SnapshotService.ListSnapshots:input_type -> agate.grpc.ListSnapshotsRequest
|
||||||
5, // 5: agate.grpc.SnapshotService.GetSnapshotDetails:input_type -> agate.grpc.GetSnapshotDetailsRequest
|
5, // 5: agate.grpc.SnapshotService.GetSnapshotDetails:input_type -> agate.grpc.GetSnapshotDetailsRequest
|
||||||
6, // 6: agate.grpc.SnapshotService.DownloadFile:input_type -> agate.grpc.DownloadFileRequest
|
6, // 6: agate.grpc.SnapshotService.DownloadFile:input_type -> agate.grpc.DownloadFileRequest
|
||||||
4, // 7: agate.grpc.SnapshotService.ListSnapshots:output_type -> agate.grpc.ListSnapshotsResponse
|
8, // 7: agate.grpc.SnapshotService.DownloadSnapshotDiff:input_type -> agate.grpc.DownloadSnapshotDiffRequest
|
||||||
2, // 8: agate.grpc.SnapshotService.GetSnapshotDetails:output_type -> agate.grpc.SnapshotDetails
|
9, // 8: agate.grpc.SnapshotService.GetDiffInfo:input_type -> agate.grpc.GetDiffInfoRequest
|
||||||
7, // 9: agate.grpc.SnapshotService.DownloadFile:output_type -> agate.grpc.DownloadFileResponse
|
4, // 9: agate.grpc.SnapshotService.ListSnapshots:output_type -> agate.grpc.ListSnapshotsResponse
|
||||||
7, // [7:10] is the sub-list for method output_type
|
2, // 10: agate.grpc.SnapshotService.GetSnapshotDetails:output_type -> agate.grpc.SnapshotDetails
|
||||||
4, // [4:7] is the sub-list for method input_type
|
7, // 11: agate.grpc.SnapshotService.DownloadFile:output_type -> agate.grpc.DownloadFileResponse
|
||||||
|
7, // 12: agate.grpc.SnapshotService.DownloadSnapshotDiff:output_type -> agate.grpc.DownloadFileResponse
|
||||||
|
10, // 13: agate.grpc.SnapshotService.GetDiffInfo:output_type -> agate.grpc.DiffInfo
|
||||||
|
9, // [9:14] is the sub-list for method output_type
|
||||||
|
4, // [4:9] is the sub-list for method input_type
|
||||||
4, // [4:4] is the sub-list for extension type_name
|
4, // [4:4] is the sub-list for extension type_name
|
||||||
4, // [4:4] is the sub-list for extension extendee
|
4, // [4:4] is the sub-list for extension extendee
|
||||||
0, // [0:4] is the sub-list for field type_name
|
0, // [0:4] is the sub-list for field type_name
|
||||||
@@ -531,7 +721,7 @@ func file_snapshot_proto_init() {
|
|||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_snapshot_proto_rawDesc), len(file_snapshot_proto_rawDesc)),
|
RawDescriptor: unsafe.Slice(unsafe.StringData(file_snapshot_proto_rawDesc), len(file_snapshot_proto_rawDesc)),
|
||||||
NumEnums: 0,
|
NumEnums: 0,
|
||||||
NumMessages: 8,
|
NumMessages: 11,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
NumServices: 1,
|
NumServices: 1,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -40,7 +40,9 @@ func request_SnapshotService_ListSnapshots_0(ctx context.Context, marshaler runt
|
|||||||
protoReq ListSnapshotsRequest
|
protoReq ListSnapshotsRequest
|
||||||
metadata runtime.ServerMetadata
|
metadata runtime.ServerMetadata
|
||||||
)
|
)
|
||||||
io.Copy(io.Discard, req.Body)
|
if req.Body != nil {
|
||||||
|
_, _ = io.Copy(io.Discard, req.Body)
|
||||||
|
}
|
||||||
msg, err := client.ListSnapshots(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
msg, err := client.ListSnapshots(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||||
return msg, metadata, err
|
return msg, metadata, err
|
||||||
}
|
}
|
||||||
@@ -60,7 +62,9 @@ func request_SnapshotService_GetSnapshotDetails_0(ctx context.Context, marshaler
|
|||||||
metadata runtime.ServerMetadata
|
metadata runtime.ServerMetadata
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
io.Copy(io.Discard, req.Body)
|
if req.Body != nil {
|
||||||
|
_, _ = io.Copy(io.Discard, req.Body)
|
||||||
|
}
|
||||||
val, ok := pathParams["snapshot_id"]
|
val, ok := pathParams["snapshot_id"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "snapshot_id")
|
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "snapshot_id")
|
||||||
@@ -97,7 +101,9 @@ func request_SnapshotService_DownloadFile_0(ctx context.Context, marshaler runti
|
|||||||
metadata runtime.ServerMetadata
|
metadata runtime.ServerMetadata
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
io.Copy(io.Discard, req.Body)
|
if req.Body != nil {
|
||||||
|
_, _ = io.Copy(io.Discard, req.Body)
|
||||||
|
}
|
||||||
val, ok := pathParams["snapshot_id"]
|
val, ok := pathParams["snapshot_id"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "snapshot_id")
|
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "snapshot_id")
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ syntax = "proto3";
|
|||||||
package agate.grpc;
|
package agate.grpc;
|
||||||
|
|
||||||
import "google/protobuf/timestamp.proto";
|
import "google/protobuf/timestamp.proto";
|
||||||
import "google/api/annotations.proto"; // Добавлено для HTTP mapping
|
import "google/api/annotations.proto";
|
||||||
|
|
||||||
option go_package = "unprism.ru/KRBL/agate/grpc";
|
option go_package = "gitea.unprism.ru/KRBL/Agate/grpc";
|
||||||
|
|
||||||
// Сервис для управления снапшотами
|
// Сервис для управления снапшотами
|
||||||
service SnapshotService {
|
service SnapshotService {
|
||||||
@@ -30,77 +30,74 @@ service SnapshotService {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Методы для управления (опционально, можно не включать в публичный API клиента) ---
|
// Скачать архив, содержащий только разницу между двумя снапшотами
|
||||||
// Создать новый снапшот из директории (если серверу позволено инициировать)
|
rpc DownloadSnapshotDiff(DownloadSnapshotDiffRequest) returns (stream DownloadFileResponse) {}
|
||||||
// rpc CreateSnapshot(CreateSnapshotRequest) returns (Snapshot);
|
|
||||||
// Удалить снапшот (если требуется)
|
// Получить информацию о дифе (хеш и размер)
|
||||||
// rpc DeleteSnapshot(DeleteSnapshotRequest) returns (DeleteSnapshotResponse);
|
rpc GetDiffInfo(GetDiffInfoRequest) returns (DiffInfo) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Метаданные файла внутри снапшота
|
// Метаданные файла внутри снапшота
|
||||||
message FileInfo {
|
message FileInfo {
|
||||||
string path = 1; // Относительный путь файла внутри снапшота
|
string path = 1;
|
||||||
int64 size_bytes = 2; // Размер файла в байтах
|
int64 size_bytes = 2;
|
||||||
string sha256_hash = 3; // Хеш-сумма файла (SHA256)
|
string sha256_hash = 3;
|
||||||
bool is_dir = 4; // Является ли запись директорией
|
bool is_dir = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Краткая информация о снапшоте
|
// Краткая информация о снапшоте
|
||||||
message SnapshotInfo {
|
message SnapshotInfo {
|
||||||
string id = 1; // Уникальный ID снапшота (UUID)
|
string id = 1;
|
||||||
string name = 2; // Имя снапшота
|
string name = 2;
|
||||||
string parent_id = 3; // ID родительского снапшота (может быть пустым)
|
string parent_id = 3;
|
||||||
google.protobuf.Timestamp creation_time = 4; // Время создания
|
google.protobuf.Timestamp creation_time = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Детальная информация о снапшоте
|
// Детальная информация о снапшоте
|
||||||
message SnapshotDetails {
|
message SnapshotDetails {
|
||||||
SnapshotInfo info = 1; // Краткая информация
|
SnapshotInfo info = 1;
|
||||||
repeated FileInfo files = 2; // Список файлов в снапшоте
|
repeated FileInfo files = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Запрос на получение списка снапшотов (можно добавить фильтры/пагинацию)
|
// Запрос на получение списка снапшотов
|
||||||
message ListSnapshotsRequest {
|
message ListSnapshotsRequest {}
|
||||||
// string filter_by_name = 1;
|
|
||||||
// int32 page_size = 2;
|
|
||||||
// string page_token = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ответ со списком снапшотов
|
// Ответ со списком снапшотов
|
||||||
message ListSnapshotsResponse {
|
message ListSnapshotsResponse {
|
||||||
repeated SnapshotInfo snapshots = 1;
|
repeated SnapshotInfo snapshots = 1;
|
||||||
// string next_page_token = 2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Запрос на получение деталей снапшота
|
// Запрос на получение деталей снапшота
|
||||||
message GetSnapshotDetailsRequest {
|
message GetSnapshotDetailsRequest {
|
||||||
string snapshot_id = 1; // ID нужного снапшота
|
string snapshot_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Запрос на скачивание файла
|
// Запрос на скачивание файла
|
||||||
message DownloadFileRequest {
|
message DownloadFileRequest {
|
||||||
string snapshot_id = 1; // ID снапшота
|
string snapshot_id = 1;
|
||||||
string file_path = 2; // Путь к файлу внутри снапшота
|
string file_path = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ответ (часть файла) при скачивании
|
// Ответ (часть файла) при скачивании
|
||||||
message DownloadFileResponse {
|
message DownloadFileResponse {
|
||||||
bytes chunk_data = 1; // Кусочек данных файла
|
bytes chunk_data = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Сообщения для опциональных методов управления ---
|
// Запрос на скачивание разницы между снапшотами
|
||||||
/*
|
message DownloadSnapshotDiffRequest {
|
||||||
message CreateSnapshotRequest {
|
string snapshot_id = 1; // ID целевого снапшота
|
||||||
string source_path = 1; // Путь к директории на сервере
|
string local_parent_id = 2; // ID снапшота, который уже есть у клиента
|
||||||
string name = 2;
|
int64 offset = 3; // Смещение в байтах для докачки
|
||||||
string parent_id = 3; // Опционально
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message DeleteSnapshotRequest {
|
// Запрос на получение информации о дифе
|
||||||
|
message GetDiffInfoRequest {
|
||||||
string snapshot_id = 1;
|
string snapshot_id = 1;
|
||||||
|
string local_parent_id = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DeleteSnapshotResponse {
|
// Информация о дифе
|
||||||
bool success = 1;
|
message DiffInfo {
|
||||||
|
string sha256_hash = 1;
|
||||||
|
int64 size_bytes = 2;
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// - protoc-gen-go-grpc v1.5.1
|
// - protoc-gen-go-grpc v1.5.1
|
||||||
// - protoc v4.25.3
|
// - protoc v6.32.0
|
||||||
// source: snapshot.proto
|
// source: snapshot.proto
|
||||||
|
|
||||||
package grpc
|
package grpc
|
||||||
@@ -22,6 +22,8 @@ const (
|
|||||||
SnapshotService_ListSnapshots_FullMethodName = "/agate.grpc.SnapshotService/ListSnapshots"
|
SnapshotService_ListSnapshots_FullMethodName = "/agate.grpc.SnapshotService/ListSnapshots"
|
||||||
SnapshotService_GetSnapshotDetails_FullMethodName = "/agate.grpc.SnapshotService/GetSnapshotDetails"
|
SnapshotService_GetSnapshotDetails_FullMethodName = "/agate.grpc.SnapshotService/GetSnapshotDetails"
|
||||||
SnapshotService_DownloadFile_FullMethodName = "/agate.grpc.SnapshotService/DownloadFile"
|
SnapshotService_DownloadFile_FullMethodName = "/agate.grpc.SnapshotService/DownloadFile"
|
||||||
|
SnapshotService_DownloadSnapshotDiff_FullMethodName = "/agate.grpc.SnapshotService/DownloadSnapshotDiff"
|
||||||
|
SnapshotService_GetDiffInfo_FullMethodName = "/agate.grpc.SnapshotService/GetDiffInfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SnapshotServiceClient is the client API for SnapshotService service.
|
// SnapshotServiceClient is the client API for SnapshotService service.
|
||||||
@@ -36,6 +38,10 @@ type SnapshotServiceClient interface {
|
|||||||
GetSnapshotDetails(ctx context.Context, in *GetSnapshotDetailsRequest, opts ...grpc.CallOption) (*SnapshotDetails, error)
|
GetSnapshotDetails(ctx context.Context, in *GetSnapshotDetailsRequest, opts ...grpc.CallOption) (*SnapshotDetails, error)
|
||||||
// Скачать конкретный файл из снапшота (потоковая передача)
|
// Скачать конкретный файл из снапшота (потоковая передача)
|
||||||
DownloadFile(ctx context.Context, in *DownloadFileRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[DownloadFileResponse], error)
|
DownloadFile(ctx context.Context, in *DownloadFileRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[DownloadFileResponse], error)
|
||||||
|
// Скачать архив, содержащий только разницу между двумя снапшотами
|
||||||
|
DownloadSnapshotDiff(ctx context.Context, in *DownloadSnapshotDiffRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[DownloadFileResponse], error)
|
||||||
|
// Получить информацию о дифе (хеш и размер)
|
||||||
|
GetDiffInfo(ctx context.Context, in *GetDiffInfoRequest, opts ...grpc.CallOption) (*DiffInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type snapshotServiceClient struct {
|
type snapshotServiceClient struct {
|
||||||
@@ -85,6 +91,35 @@ func (c *snapshotServiceClient) DownloadFile(ctx context.Context, in *DownloadFi
|
|||||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
type SnapshotService_DownloadFileClient = grpc.ServerStreamingClient[DownloadFileResponse]
|
type SnapshotService_DownloadFileClient = grpc.ServerStreamingClient[DownloadFileResponse]
|
||||||
|
|
||||||
|
func (c *snapshotServiceClient) DownloadSnapshotDiff(ctx context.Context, in *DownloadSnapshotDiffRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[DownloadFileResponse], error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
stream, err := c.cc.NewStream(ctx, &SnapshotService_ServiceDesc.Streams[1], SnapshotService_DownloadSnapshotDiff_FullMethodName, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &grpc.GenericClientStream[DownloadSnapshotDiffRequest, DownloadFileResponse]{ClientStream: stream}
|
||||||
|
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := x.ClientStream.CloseSend(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
|
type SnapshotService_DownloadSnapshotDiffClient = grpc.ServerStreamingClient[DownloadFileResponse]
|
||||||
|
|
||||||
|
func (c *snapshotServiceClient) GetDiffInfo(ctx context.Context, in *GetDiffInfoRequest, opts ...grpc.CallOption) (*DiffInfo, error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
out := new(DiffInfo)
|
||||||
|
err := c.cc.Invoke(ctx, SnapshotService_GetDiffInfo_FullMethodName, in, out, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
// SnapshotServiceServer is the server API for SnapshotService service.
|
// SnapshotServiceServer is the server API for SnapshotService service.
|
||||||
// All implementations must embed UnimplementedSnapshotServiceServer
|
// All implementations must embed UnimplementedSnapshotServiceServer
|
||||||
// for forward compatibility.
|
// for forward compatibility.
|
||||||
@@ -97,6 +132,10 @@ type SnapshotServiceServer interface {
|
|||||||
GetSnapshotDetails(context.Context, *GetSnapshotDetailsRequest) (*SnapshotDetails, error)
|
GetSnapshotDetails(context.Context, *GetSnapshotDetailsRequest) (*SnapshotDetails, error)
|
||||||
// Скачать конкретный файл из снапшота (потоковая передача)
|
// Скачать конкретный файл из снапшота (потоковая передача)
|
||||||
DownloadFile(*DownloadFileRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error
|
DownloadFile(*DownloadFileRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error
|
||||||
|
// Скачать архив, содержащий только разницу между двумя снапшотами
|
||||||
|
DownloadSnapshotDiff(*DownloadSnapshotDiffRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error
|
||||||
|
// Получить информацию о дифе (хеш и размер)
|
||||||
|
GetDiffInfo(context.Context, *GetDiffInfoRequest) (*DiffInfo, error)
|
||||||
mustEmbedUnimplementedSnapshotServiceServer()
|
mustEmbedUnimplementedSnapshotServiceServer()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -116,6 +155,12 @@ func (UnimplementedSnapshotServiceServer) GetSnapshotDetails(context.Context, *G
|
|||||||
func (UnimplementedSnapshotServiceServer) DownloadFile(*DownloadFileRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error {
|
func (UnimplementedSnapshotServiceServer) DownloadFile(*DownloadFileRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error {
|
||||||
return status.Errorf(codes.Unimplemented, "method DownloadFile not implemented")
|
return status.Errorf(codes.Unimplemented, "method DownloadFile not implemented")
|
||||||
}
|
}
|
||||||
|
func (UnimplementedSnapshotServiceServer) DownloadSnapshotDiff(*DownloadSnapshotDiffRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error {
|
||||||
|
return status.Errorf(codes.Unimplemented, "method DownloadSnapshotDiff not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedSnapshotServiceServer) GetDiffInfo(context.Context, *GetDiffInfoRequest) (*DiffInfo, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method GetDiffInfo not implemented")
|
||||||
|
}
|
||||||
func (UnimplementedSnapshotServiceServer) mustEmbedUnimplementedSnapshotServiceServer() {}
|
func (UnimplementedSnapshotServiceServer) mustEmbedUnimplementedSnapshotServiceServer() {}
|
||||||
func (UnimplementedSnapshotServiceServer) testEmbeddedByValue() {}
|
func (UnimplementedSnapshotServiceServer) testEmbeddedByValue() {}
|
||||||
|
|
||||||
@@ -184,6 +229,35 @@ func _SnapshotService_DownloadFile_Handler(srv interface{}, stream grpc.ServerSt
|
|||||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
type SnapshotService_DownloadFileServer = grpc.ServerStreamingServer[DownloadFileResponse]
|
type SnapshotService_DownloadFileServer = grpc.ServerStreamingServer[DownloadFileResponse]
|
||||||
|
|
||||||
|
func _SnapshotService_DownloadSnapshotDiff_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
m := new(DownloadSnapshotDiffRequest)
|
||||||
|
if err := stream.RecvMsg(m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return srv.(SnapshotServiceServer).DownloadSnapshotDiff(m, &grpc.GenericServerStream[DownloadSnapshotDiffRequest, DownloadFileResponse]{ServerStream: stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
|
type SnapshotService_DownloadSnapshotDiffServer = grpc.ServerStreamingServer[DownloadFileResponse]
|
||||||
|
|
||||||
|
func _SnapshotService_GetDiffInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(GetDiffInfoRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(SnapshotServiceServer).GetDiffInfo(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: SnapshotService_GetDiffInfo_FullMethodName,
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(SnapshotServiceServer).GetDiffInfo(ctx, req.(*GetDiffInfoRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
// SnapshotService_ServiceDesc is the grpc.ServiceDesc for SnapshotService service.
|
// SnapshotService_ServiceDesc is the grpc.ServiceDesc for SnapshotService service.
|
||||||
// It's only intended for direct use with grpc.RegisterService,
|
// It's only intended for direct use with grpc.RegisterService,
|
||||||
// and not to be introspected or modified (even as a copy)
|
// and not to be introspected or modified (even as a copy)
|
||||||
@@ -199,6 +273,10 @@ var SnapshotService_ServiceDesc = grpc.ServiceDesc{
|
|||||||
MethodName: "GetSnapshotDetails",
|
MethodName: "GetSnapshotDetails",
|
||||||
Handler: _SnapshotService_GetSnapshotDetails_Handler,
|
Handler: _SnapshotService_GetSnapshotDetails_Handler,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
MethodName: "GetDiffInfo",
|
||||||
|
Handler: _SnapshotService_GetDiffInfo_Handler,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{
|
Streams: []grpc.StreamDesc{
|
||||||
{
|
{
|
||||||
@@ -206,6 +284,11 @@ var SnapshotService_ServiceDesc = grpc.ServiceDesc{
|
|||||||
Handler: _SnapshotService_DownloadFile_Handler,
|
Handler: _SnapshotService_DownloadFile_Handler,
|
||||||
ServerStreams: true,
|
ServerStreams: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
StreamName: "DownloadSnapshotDiff",
|
||||||
|
Handler: _SnapshotService_DownloadSnapshotDiff_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Metadata: "snapshot.proto",
|
Metadata: "snapshot.proto",
|
||||||
}
|
}
|
||||||
|
|||||||
142
grpc_test.go
Normal file
142
grpc_test.go
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
package agate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/remote"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestFullUpdateCycle tests a complete workflow: full download, then incremental update.
|
||||||
|
func TestFullUpdateCycle(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping full gRPC update cycle test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- 1. Setup Server ---
|
||||||
|
serverDir, err := os.MkdirTemp("", "agate-server-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create server temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(serverDir)
|
||||||
|
|
||||||
|
serverAgate, err := New(AgateOptions{WorkDir: serverDir})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create server Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer serverAgate.Close()
|
||||||
|
|
||||||
|
// --- 2. Create Initial Snapshot (A) ---
|
||||||
|
dataDir := serverAgate.GetActiveDir()
|
||||||
|
if err := os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("Version 1"), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(dataDir, "file2.txt"), []byte("Original Content"), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
snapshotAID, err := serverAgate.SaveSnapshot(ctx, "Snapshot A", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create Snapshot A: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Created Snapshot A with ID: %s", snapshotAID)
|
||||||
|
|
||||||
|
// --- 3. Start Server ---
|
||||||
|
serverAddress := "localhost:50051"
|
||||||
|
server := remote.NewServer(serverAgate.manager)
|
||||||
|
go func() {
|
||||||
|
if err := server.Start(ctx, serverAddress); err != nil && err != context.Canceled {
|
||||||
|
log.Printf("Server start error: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
defer server.Stop()
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
// --- 4. Setup Client ---
|
||||||
|
clientDir, err := os.MkdirTemp("", "agate-client-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create client temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(clientDir)
|
||||||
|
|
||||||
|
clientAgate, err := New(AgateOptions{WorkDir: clientDir, CleanOnRestore: true})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create client Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer clientAgate.Close()
|
||||||
|
|
||||||
|
// --- 5. Client Performs Full Download of Snapshot A ---
|
||||||
|
t.Log("Client performing full download of Snapshot A...")
|
||||||
|
if err := clientAgate.GetRemoteSnapshot(ctx, serverAddress, snapshotAID, ""); err != nil {
|
||||||
|
t.Fatalf("Client failed to get Snapshot A: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify content of Snapshot A on client
|
||||||
|
if err := clientAgate.RestoreSnapshot(ctx, snapshotAID); err != nil {
|
||||||
|
t.Fatalf("Failed to restore Snapshot A: %v", err)
|
||||||
|
}
|
||||||
|
verifyFileContent(t, clientAgate.GetActiveDir(), "file1.txt", "Version 1")
|
||||||
|
verifyFileContent(t, clientAgate.GetActiveDir(), "file2.txt", "Original Content")
|
||||||
|
t.Log("Snapshot A verified on client.")
|
||||||
|
|
||||||
|
// --- 6. Server Creates Incremental Snapshot (B) ---
|
||||||
|
if err := os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("Version 2"), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(dataDir, "file3.txt"), []byte("New File"), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.Remove(filepath.Join(dataDir, "file2.txt")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotBID, err := serverAgate.SaveSnapshot(ctx, "Snapshot B", snapshotAID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create Snapshot B: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Created Snapshot B with ID: %s", snapshotBID)
|
||||||
|
|
||||||
|
// --- 7. Client Performs Incremental Download of Snapshot B ---
|
||||||
|
t.Log("Client performing incremental download of Snapshot B...")
|
||||||
|
parentIDOnClient := clientAgate.GetCurrentSnapshotID()
|
||||||
|
if parentIDOnClient != snapshotAID {
|
||||||
|
t.Fatalf("Client has incorrect current snapshot ID. Got %s, want %s", parentIDOnClient, snapshotAID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := clientAgate.GetRemoteSnapshot(ctx, serverAddress, snapshotBID, parentIDOnClient); err != nil {
|
||||||
|
t.Fatalf("Client failed to get Snapshot B: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- 8. Verify Final State on Client ---
|
||||||
|
if err := clientAgate.RestoreSnapshot(ctx, snapshotBID); err != nil {
|
||||||
|
t.Fatalf("Failed to restore Snapshot B: %v", err)
|
||||||
|
}
|
||||||
|
t.Log("Snapshot B restored on client. Verifying content...")
|
||||||
|
|
||||||
|
verifyFileContent(t, clientAgate.GetActiveDir(), "file1.txt", "Version 2")
|
||||||
|
verifyFileContent(t, clientAgate.GetActiveDir(), "file3.txt", "New File")
|
||||||
|
|
||||||
|
// Verify that file2.txt was removed because CleanOnRestore is true
|
||||||
|
if _, err := os.Stat(filepath.Join(clientAgate.GetActiveDir(), "file2.txt")); !os.IsNotExist(err) {
|
||||||
|
t.Errorf("file2.txt should have been removed after restoring Snapshot B, but it still exists.")
|
||||||
|
}
|
||||||
|
t.Log("Final state verified successfully!")
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyFileContent(t *testing.T, dir, filename, expectedContent string) {
|
||||||
|
t.Helper()
|
||||||
|
content, err := os.ReadFile(filepath.Join(dir, filename))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read file %s: %v", filename, err)
|
||||||
|
}
|
||||||
|
if string(content) != expectedContent {
|
||||||
|
t.Errorf("File %s has wrong content: got '%s', want '%s'", filename, string(content), expectedContent)
|
||||||
|
}
|
||||||
|
}
|
||||||
95
hash/hash_test.go
Normal file
95
hash/hash_test.go
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
package hash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCalculateFileHash(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a test file with known content
|
||||||
|
testContent := "This is a test file for hashing"
|
||||||
|
testFilePath := filepath.Join(tempDir, "test_file.txt")
|
||||||
|
if err := os.WriteFile(testFilePath, []byte(testContent), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the expected hash manually
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write([]byte(testContent))
|
||||||
|
expectedHash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
// Calculate the hash using the function
|
||||||
|
hash, err := CalculateFileHash(testFilePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to calculate file hash: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the hash matches the expected value
|
||||||
|
if hash != expectedHash {
|
||||||
|
t.Errorf("Hash does not match: got %s, want %s", hash, expectedHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with a non-existent file
|
||||||
|
_, err = CalculateFileHash(filepath.Join(tempDir, "nonexistent.txt"))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when calculating hash of non-existent file, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with a directory
|
||||||
|
dirPath := filepath.Join(tempDir, "test_dir")
|
||||||
|
if err := os.MkdirAll(dirPath, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create test directory: %v", err)
|
||||||
|
}
|
||||||
|
_, err = CalculateFileHash(dirPath)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when calculating hash of a directory, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with an empty file
|
||||||
|
emptyFilePath := filepath.Join(tempDir, "empty_file.txt")
|
||||||
|
if err := os.WriteFile(emptyFilePath, []byte{}, 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create empty test file: %v", err)
|
||||||
|
}
|
||||||
|
emptyHash, err := CalculateFileHash(emptyFilePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to calculate hash of empty file: %v", err)
|
||||||
|
}
|
||||||
|
// The SHA-256 hash of an empty string is e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
|
||||||
|
expectedEmptyHash := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
|
if emptyHash != expectedEmptyHash {
|
||||||
|
t.Errorf("Empty file hash does not match: got %s, want %s", emptyHash, expectedEmptyHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with a large file
|
||||||
|
largeFilePath := filepath.Join(tempDir, "large_file.bin")
|
||||||
|
largeFileSize := 1024 * 1024 // 1 MB
|
||||||
|
largeFile, err := os.Create(largeFilePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create large test file: %v", err)
|
||||||
|
}
|
||||||
|
// Fill the file with a repeating pattern
|
||||||
|
pattern := []byte("0123456789")
|
||||||
|
for i := 0; i < largeFileSize/len(pattern); i++ {
|
||||||
|
if _, err := largeFile.Write(pattern); err != nil {
|
||||||
|
largeFile.Close()
|
||||||
|
t.Fatalf("Failed to write to large test file: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
largeFile.Close()
|
||||||
|
|
||||||
|
// Calculate the hash of the large file
|
||||||
|
_, err = CalculateFileHash(largeFilePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to calculate hash of large file: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
77
interfaces/snapshot.go
Normal file
77
interfaces/snapshot.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package interfaces
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SnapshotManager is an interface that defines operations for managing and interacting with snapshots.
|
||||||
|
type SnapshotManager interface {
|
||||||
|
// CreateSnapshot creates a new snapshot from the specified source directory, associating it with a given name and parent ID.
|
||||||
|
// Returns the created Snapshot with its metadata or an error if the process fails.
|
||||||
|
CreateSnapshot(ctx context.Context, sourceDir string, name string, parentID string) (*store.Snapshot, error)
|
||||||
|
|
||||||
|
// CreateSnapshotAsync initiates a background process to create a snapshot.
|
||||||
|
// Returns the job ID (which is also the snapshot ID) or an error if the process couldn't start.
|
||||||
|
// onStart is called in the background goroutine before the snapshot creation starts.
|
||||||
|
// onFinish is called in the background goroutine after the snapshot creation finishes (successfully or with error).
|
||||||
|
CreateSnapshotAsync(ctx context.Context, sourceDir string, name string, parentID string, onStart func(), onFinish func(string, error)) (string, error)
|
||||||
|
|
||||||
|
// GetSnapshotStatus retrieves the status of an asynchronous snapshot creation job.
|
||||||
|
GetSnapshotStatus(ctx context.Context, jobID string) (*store.SnapshotStatus, error)
|
||||||
|
|
||||||
|
// GetSnapshotDetails retrieves detailed metadata for a specific snapshot identified by its unique snapshotID.
|
||||||
|
// Returns a Snapshot object containing metadata
|
||||||
|
GetSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error)
|
||||||
|
|
||||||
|
// ListSnapshots retrieves a list of available snapshots with filtering and pagination options.
|
||||||
|
ListSnapshots(ctx context.Context, opts store.ListOptions) ([]store.SnapshotInfo, error)
|
||||||
|
|
||||||
|
// DeleteSnapshot removes a snapshot identified by snapshotID. Returns an error if the snapshot does not exist or cannot be deleted.
|
||||||
|
DeleteSnapshot(ctx context.Context, snapshotID string) error
|
||||||
|
|
||||||
|
// OpenFile retrieves and opens a file from the specified snapshot, returning a readable stream and an error, if any.
|
||||||
|
OpenFile(ctx context.Context, snapshotID string, filePath string) (io.ReadCloser, error)
|
||||||
|
|
||||||
|
// ExtractSnapshot extracts the contents of a specified snapshot to a target directory at the given path.
|
||||||
|
// If cleanTarget is true, the target directory will be cleaned before extraction.
|
||||||
|
// Returns an error if the snapshot ID is invalid or the extraction fails.
|
||||||
|
ExtractSnapshot(ctx context.Context, snapshotID string, path string, cleanTarget bool) error
|
||||||
|
|
||||||
|
// UpdateSnapshotMetadata updates the metadata of an existing snapshot, allowing changes to its name.
|
||||||
|
UpdateSnapshotMetadata(ctx context.Context, snapshotID string, newName string) error
|
||||||
|
|
||||||
|
// StreamSnapshotDiff creates and streams a differential archive between two snapshots.
|
||||||
|
// It returns an io.ReadCloser for the archive stream and an error.
|
||||||
|
// The caller is responsible for closing the reader, which will also handle cleanup of temporary resources.
|
||||||
|
StreamSnapshotDiff(ctx context.Context, snapshotID, parentID string, offset int64) (io.ReadCloser, error)
|
||||||
|
|
||||||
|
// GetSnapshotDiffInfo calculates the hash and size of a differential archive between two snapshots.
|
||||||
|
GetSnapshotDiffInfo(ctx context.Context, snapshotID, parentID string) (*store.DiffInfo, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotServer defines the interface for a server that can share snapshots
|
||||||
|
type SnapshotServer interface {
|
||||||
|
// Start initializes and begins the server's operation, handling incoming requests or processes within the provided context.
|
||||||
|
Start(ctx context.Context) error
|
||||||
|
|
||||||
|
// Stop gracefully shuts down the server, releasing any allocated resources and ensuring all operations are completed.
|
||||||
|
Stop(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotClient defines the interface for a client that can connect to a server and download snapshots
|
||||||
|
type SnapshotClient interface {
|
||||||
|
// ListSnapshots retrieves a list of snapshots from the server
|
||||||
|
ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error)
|
||||||
|
|
||||||
|
// FetchSnapshotDetails retrieves detailed information about a specific snapshot
|
||||||
|
FetchSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error)
|
||||||
|
|
||||||
|
// DownloadSnapshotDiff downloads a differential archive between two snapshots to a target directory
|
||||||
|
DownloadSnapshotDiff(ctx context.Context, snapshotID, localParentID, targetPath string) error
|
||||||
|
|
||||||
|
// Close closes the connection to the server
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
376
manager.go
376
manager.go
@@ -2,33 +2,51 @@ package agate
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/models"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"unprism.ru/KRBL/agate/archive"
|
"gitea.unprism.ru/KRBL/Agate/archive"
|
||||||
"unprism.ru/KRBL/agate/hash"
|
"gitea.unprism.ru/KRBL/Agate/hash"
|
||||||
"unprism.ru/KRBL/agate/store"
|
"gitea.unprism.ru/KRBL/Agate/interfaces"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SnapshotManagerData struct {
|
type SnapshotManagerData struct {
|
||||||
metadataStore store.MetadataStore
|
metadataStore store.MetadataStore
|
||||||
blobStore store.BlobStore
|
blobStore store.BlobStore
|
||||||
|
logger *log.Logger
|
||||||
|
jobs map[string]*store.SnapshotStatus
|
||||||
|
jobsMutex sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateSnapshotManager(metadataStore store.MetadataStore, blobStore store.BlobStore) (SnapshotManager, error) {
|
func CreateSnapshotManager(metadataStore store.MetadataStore, blobStore store.BlobStore, logger *log.Logger) (interfaces.SnapshotManager, error) {
|
||||||
if metadataStore == nil || blobStore == nil {
|
if metadataStore == nil || blobStore == nil {
|
||||||
return nil, errors.New("parameters can't be nil")
|
return nil, errors.New("parameters can't be nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &SnapshotManagerData{metadataStore, blobStore}, nil
|
// Ensure logger is never nil.
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.New(io.Discard, "", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SnapshotManagerData{
|
||||||
|
metadataStore: metadataStore,
|
||||||
|
blobStore: blobStore,
|
||||||
|
logger: logger,
|
||||||
|
jobs: make(map[string]*store.SnapshotStatus),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (data *SnapshotManagerData) CreateSnapshot(ctx context.Context, sourceDir string, name string, parentID string) (*store.Snapshot, error) {
|
func (data *SnapshotManagerData) CreateSnapshot(ctx context.Context, sourceDir string, name string, parentID string) (*store.Snapshot, error) {
|
||||||
@@ -36,40 +54,130 @@ func (data *SnapshotManagerData) CreateSnapshot(ctx context.Context, sourceDir s
|
|||||||
info, err := os.Stat(sourceDir)
|
info, err := os.Stat(sourceDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil, ErrSourceNotFound
|
return nil, models.ErrSourceNotFound
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("failed to access source directory: %w", err)
|
return nil, fmt.Errorf("failed to access source directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !info.IsDir() {
|
if !info.IsDir() {
|
||||||
return nil, ErrSourceNotDirectory
|
return nil, models.ErrSourceNotDirectory
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if parent exists if specified
|
// Check if parent exists if specified
|
||||||
if parentID != "" {
|
if parentID != "" {
|
||||||
_, err := data.metadataStore.GetSnapshotMetadata(ctx, parentID)
|
_, err := data.metadataStore.GetSnapshotMetadata(ctx, parentID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNotFound) {
|
fmt.Println("failed to check parent snapshot: %w", err)
|
||||||
return nil, ErrParentNotFound
|
parentID = ""
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("failed to check parent snapshot: %w", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate a unique ID for the snapshot
|
// Generate a unique ID for the snapshot
|
||||||
snapshotID := uuid.New().String()
|
snapshotID := uuid.New().String()
|
||||||
|
|
||||||
// Create a temporary file for the archive
|
return data.createSnapshotInternal(ctx, sourceDir, name, parentID, snapshotID, nil)
|
||||||
tempFile, err := os.CreateTemp("", "agate-snapshot-*.zip")
|
}
|
||||||
|
|
||||||
|
func (data *SnapshotManagerData) CreateSnapshotAsync(ctx context.Context, sourceDir string, name string, parentID string, onStart func(), onFinish func(string, error)) (string, error) {
|
||||||
|
// Validate source directory
|
||||||
|
info, err := os.Stat(sourceDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create temporary file: %w", err)
|
if os.IsNotExist(err) {
|
||||||
|
return "", models.ErrSourceNotFound
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("failed to access source directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !info.IsDir() {
|
||||||
|
return "", models.ErrSourceNotDirectory
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if parent exists if specified
|
||||||
|
if parentID != "" {
|
||||||
|
_, err := data.metadataStore.GetSnapshotMetadata(ctx, parentID)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("failed to check parent snapshot: %w", err)
|
||||||
|
parentID = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotID := uuid.New().String()
|
||||||
|
|
||||||
|
data.jobsMutex.Lock()
|
||||||
|
data.jobs[snapshotID] = &store.SnapshotStatus{
|
||||||
|
ID: snapshotID,
|
||||||
|
Status: "pending",
|
||||||
|
Progress: 0,
|
||||||
|
}
|
||||||
|
data.jobsMutex.Unlock()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if onStart != nil {
|
||||||
|
onStart()
|
||||||
|
}
|
||||||
|
|
||||||
|
data.jobsMutex.Lock()
|
||||||
|
if job, ok := data.jobs[snapshotID]; ok {
|
||||||
|
job.Status = "running"
|
||||||
|
}
|
||||||
|
data.jobsMutex.Unlock()
|
||||||
|
|
||||||
|
_, err := data.createSnapshotInternal(context.Background(), sourceDir, name, parentID, snapshotID, func(current, total int64) {
|
||||||
|
data.jobsMutex.Lock()
|
||||||
|
defer data.jobsMutex.Unlock()
|
||||||
|
if job, ok := data.jobs[snapshotID]; ok {
|
||||||
|
if total > 0 {
|
||||||
|
job.Progress = float64(current) / float64(total)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
data.jobsMutex.Lock()
|
||||||
|
if job, ok := data.jobs[snapshotID]; ok {
|
||||||
|
if err != nil {
|
||||||
|
job.Status = "failed"
|
||||||
|
job.Error = err.Error()
|
||||||
|
} else {
|
||||||
|
job.Status = "done"
|
||||||
|
job.Progress = 1.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data.jobsMutex.Unlock()
|
||||||
|
|
||||||
|
if onFinish != nil {
|
||||||
|
onFinish(snapshotID, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return snapshotID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (data *SnapshotManagerData) GetSnapshotStatus(ctx context.Context, jobID string) (*store.SnapshotStatus, error) {
|
||||||
|
data.jobsMutex.RLock()
|
||||||
|
defer data.jobsMutex.RUnlock()
|
||||||
|
|
||||||
|
job, ok := data.jobs[jobID]
|
||||||
|
if !ok {
|
||||||
|
return nil, models.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a copy to avoid race conditions
|
||||||
|
statusCopy := *job
|
||||||
|
return &statusCopy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (data *SnapshotManagerData) createSnapshotInternal(ctx context.Context, sourceDir, name, parentID, snapshotID string, onProgress func(current, total int64)) (*store.Snapshot, error) {
|
||||||
|
// Create a temporary file for the archive in the working directory
|
||||||
|
tempFilePath := filepath.Join(data.blobStore.GetBaseDir(), "temp-"+snapshotID+".zip")
|
||||||
|
tempFile, err := os.Create(tempFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create temporary file in working directory: %w", err)
|
||||||
}
|
}
|
||||||
tempFilePath := tempFile.Name()
|
|
||||||
tempFile.Close() // Close it as CreateArchive will reopen it
|
tempFile.Close() // Close it as CreateArchive will reopen it
|
||||||
defer os.Remove(tempFilePath) // Clean up temp file after we're done
|
defer os.Remove(tempFilePath) // Clean up temp file after we're done
|
||||||
|
|
||||||
// Create archive of the source directory
|
// Create archive of the source directory
|
||||||
if err := archive.CreateArchive(sourceDir, tempFilePath); err != nil {
|
if err := archive.CreateArchiveWithProgress(sourceDir, tempFilePath, onProgress); err != nil {
|
||||||
return nil, fmt.Errorf("failed to create archive: %w", err)
|
return nil, fmt.Errorf("failed to create archive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,8 +262,8 @@ func (data *SnapshotManagerData) GetSnapshotDetails(ctx context.Context, snapsho
|
|||||||
// Retrieve snapshot metadata from the store
|
// Retrieve snapshot metadata from the store
|
||||||
snapshot, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
snapshot, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNotFound) {
|
if errors.Is(err, models.ErrNotFound) {
|
||||||
return nil, ErrNotFound
|
return nil, models.ErrNotFound
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("failed to retrieve snapshot details: %w", err)
|
return nil, fmt.Errorf("failed to retrieve snapshot details: %w", err)
|
||||||
}
|
}
|
||||||
@@ -163,9 +271,9 @@ func (data *SnapshotManagerData) GetSnapshotDetails(ctx context.Context, snapsho
|
|||||||
return snapshot, nil
|
return snapshot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (data *SnapshotManagerData) ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error) {
|
func (data *SnapshotManagerData) ListSnapshots(ctx context.Context, opts store.ListOptions) ([]store.SnapshotInfo, error) {
|
||||||
// Retrieve list of snapshots from the metadata store
|
// Retrieve list of snapshots from the metadata store with the provided options
|
||||||
snapshots, err := data.metadataStore.ListSnapshotsMetadata(ctx)
|
snapshots, err := data.metadataStore.ListSnapshotsMetadata(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to list snapshots: %w", err)
|
return nil, fmt.Errorf("failed to list snapshots: %w", err)
|
||||||
}
|
}
|
||||||
@@ -178,27 +286,38 @@ func (data *SnapshotManagerData) DeleteSnapshot(ctx context.Context, snapshotID
|
|||||||
return errors.New("snapshot ID cannot be empty")
|
return errors.New("snapshot ID cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
// First check if the snapshot exists
|
snapshot, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||||
_, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNotFound) {
|
if errors.Is(err, models.ErrNotFound) {
|
||||||
// If snapshot doesn't exist, return success (idempotent operation)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("failed to check if snapshot exists: %w", err)
|
return fmt.Errorf("failed to check if snapshot exists: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the metadata first
|
parentID := snapshot.ParentID
|
||||||
|
|
||||||
|
opts := store.ListOptions{}
|
||||||
|
allSnapshots, err := data.metadataStore.ListSnapshotsMetadata(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list snapshots: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, info := range allSnapshots {
|
||||||
|
if info.ParentID == snapshotID {
|
||||||
|
if err := data.metadataStore.UpdateSnapshotParentID(ctx, info.ID, parentID); err != nil {
|
||||||
|
data.logger.Printf("WARNING: failed to update parent reference for snapshot %s: %v", info.ID, err)
|
||||||
|
} else {
|
||||||
|
data.logger.Printf("Updated parent reference for snapshot %s from %s to %s", info.ID, snapshotID, parentID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := data.metadataStore.DeleteSnapshotMetadata(ctx, snapshotID); err != nil {
|
if err := data.metadataStore.DeleteSnapshotMetadata(ctx, snapshotID); err != nil {
|
||||||
return fmt.Errorf("failed to delete snapshot metadata: %w", err)
|
return fmt.Errorf("failed to delete snapshot metadata: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Then delete the blob
|
|
||||||
if err := data.blobStore.DeleteBlob(ctx, snapshotID); err != nil {
|
if err := data.blobStore.DeleteBlob(ctx, snapshotID); err != nil {
|
||||||
// Note: We don't return here because we've already deleted the metadata
|
data.logger.Printf("WARNING: failed to delete snapshot blob: %v", err)
|
||||||
// and the blob store should handle the case where the blob doesn't exist
|
|
||||||
// Log the error instead
|
|
||||||
fmt.Printf("Warning: failed to delete snapshot blob: %v\n", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -215,8 +334,8 @@ func (data *SnapshotManagerData) OpenFile(ctx context.Context, snapshotID string
|
|||||||
// First check if the snapshot exists
|
// First check if the snapshot exists
|
||||||
_, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
_, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNotFound) {
|
if errors.Is(err, models.ErrNotFound) {
|
||||||
return nil, ErrNotFound
|
return nil, models.ErrNotFound
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("failed to check if snapshot exists: %w", err)
|
return nil, fmt.Errorf("failed to check if snapshot exists: %w", err)
|
||||||
}
|
}
|
||||||
@@ -239,7 +358,7 @@ func (data *SnapshotManagerData) OpenFile(ctx context.Context, snapshotID string
|
|||||||
err := archive.ExtractFileFromArchive(blobPath, filePath, pw)
|
err := archive.ExtractFileFromArchive(blobPath, filePath, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, archive.ErrFileNotFoundInArchive) {
|
if errors.Is(err, archive.ErrFileNotFoundInArchive) {
|
||||||
pw.CloseWithError(ErrFileNotFound)
|
pw.CloseWithError(models.ErrFileNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pw.CloseWithError(fmt.Errorf("failed to extract file from archive: %w", err))
|
pw.CloseWithError(fmt.Errorf("failed to extract file from archive: %w", err))
|
||||||
@@ -249,19 +368,21 @@ func (data *SnapshotManagerData) OpenFile(ctx context.Context, snapshotID string
|
|||||||
return pr, nil
|
return pr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (data *SnapshotManagerData) ExtractSnapshot(ctx context.Context, snapshotID string, path string) error {
|
func (data *SnapshotManagerData) ExtractSnapshot(ctx context.Context, snapshotID string, path string, cleanTarget bool) error {
|
||||||
if snapshotID == "" {
|
if snapshotID == "" {
|
||||||
return errors.New("snapshot ID cannot be empty")
|
return errors.New("snapshot ID cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If no specific path is provided, use the active directory
|
||||||
if path == "" {
|
if path == "" {
|
||||||
return errors.New("target path cannot be empty")
|
path = data.blobStore.GetActiveDir()
|
||||||
}
|
}
|
||||||
|
|
||||||
// First check if the snapshot exists
|
// First check if the snapshot exists
|
||||||
_, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
_, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNotFound) {
|
if errors.Is(err, models.ErrNotFound) {
|
||||||
return ErrNotFound
|
return models.ErrNotFound
|
||||||
}
|
}
|
||||||
return fmt.Errorf("failed to check if snapshot exists: %w", err)
|
return fmt.Errorf("failed to check if snapshot exists: %w", err)
|
||||||
}
|
}
|
||||||
@@ -272,10 +393,21 @@ func (data *SnapshotManagerData) ExtractSnapshot(ctx context.Context, snapshotID
|
|||||||
return fmt.Errorf("failed to get blob path: %w", err)
|
return fmt.Errorf("failed to get blob path: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the target directory exists
|
// If cleanTarget is true, clean the target directory before extraction
|
||||||
|
if cleanTarget {
|
||||||
|
// Remove the directory and recreate it
|
||||||
|
if err := os.RemoveAll(path); err != nil {
|
||||||
|
return fmt.Errorf("failed to clean target directory: %w", err)
|
||||||
|
}
|
||||||
if err := os.MkdirAll(path, 0755); err != nil {
|
if err := os.MkdirAll(path, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create target directory: %w", err)
|
return fmt.Errorf("failed to create target directory: %w", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// Just ensure the target directory exists
|
||||||
|
if err := os.MkdirAll(path, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create target directory: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Extract the archive to the target directory
|
// Extract the archive to the target directory
|
||||||
if err := extractArchive(blobPath, path); err != nil {
|
if err := extractArchive(blobPath, path); err != nil {
|
||||||
@@ -351,22 +483,178 @@ func (data *SnapshotManagerData) UpdateSnapshotMetadata(ctx context.Context, sna
|
|||||||
return errors.New("new name cannot be empty")
|
return errors.New("new name cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the current snapshot metadata
|
|
||||||
snapshot, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
snapshot, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNotFound) {
|
if errors.Is(err, models.ErrNotFound) {
|
||||||
return ErrNotFound
|
return models.ErrNotFound
|
||||||
}
|
}
|
||||||
return fmt.Errorf("failed to get snapshot metadata: %w", err)
|
return fmt.Errorf("failed to get snapshot metadata: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the name
|
|
||||||
snapshot.Name = newName
|
snapshot.Name = newName
|
||||||
|
|
||||||
// Save the updated metadata
|
|
||||||
if err := data.metadataStore.SaveSnapshotMetadata(ctx, *snapshot); err != nil {
|
if err := data.metadataStore.SaveSnapshotMetadata(ctx, *snapshot); err != nil {
|
||||||
return fmt.Errorf("failed to update snapshot metadata: %w", err)
|
return fmt.Errorf("failed to update snapshot metadata: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (data *SnapshotManagerData) GetSnapshotDiffInfo(ctx context.Context, snapshotID, parentID string) (*store.DiffInfo, error) {
|
||||||
|
tempArchivePath, tempStagingDir, err := data.createDiffArchive(ctx, snapshotID, parentID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create diff archive for info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tempArchivePath == "" {
|
||||||
|
return &store.DiffInfo{SHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Size: 0}, nil // sha256 of empty string
|
||||||
|
}
|
||||||
|
|
||||||
|
defer os.Remove(tempArchivePath)
|
||||||
|
if tempStagingDir != "" {
|
||||||
|
defer os.RemoveAll(tempStagingDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
hash, err := hash.CalculateFileHash(tempArchivePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to calculate hash for diff archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := os.Stat(tempArchivePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get size of diff archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &store.DiffInfo{
|
||||||
|
SHA256: hash,
|
||||||
|
Size: stat.Size(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// diffArchiveReader is a wrapper around an *os.File that handles cleanup of temporary files.
|
||||||
|
type diffArchiveReader struct {
|
||||||
|
*os.File
|
||||||
|
tempArchive string
|
||||||
|
tempStaging string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the file and removes the temporary archive and staging directory.
|
||||||
|
func (r *diffArchiveReader) Close() error {
|
||||||
|
err := r.File.Close()
|
||||||
|
_ = os.Remove(r.tempArchive)
|
||||||
|
_ = os.RemoveAll(r.tempStaging)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (data *SnapshotManagerData) createDiffArchive(ctx context.Context, snapshotID, parentID string) (string, string, error) {
|
||||||
|
targetSnap, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("failed to get target snapshot metadata: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parentFiles := make(map[string]string)
|
||||||
|
if parentID != "" {
|
||||||
|
parentSnap, err := data.metadataStore.GetSnapshotMetadata(ctx, parentID)
|
||||||
|
if err == nil {
|
||||||
|
for _, file := range parentSnap.Files {
|
||||||
|
if !file.IsDir {
|
||||||
|
parentFiles[file.Path] = file.SHA256
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
data.logger.Printf("Warning: failed to get parent snapshot %s, creating full diff: %v", parentID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var filesToInclude []string
|
||||||
|
for _, file := range targetSnap.Files {
|
||||||
|
if file.IsDir {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if parentHash, ok := parentFiles[file.Path]; !ok || parentHash != file.SHA256 {
|
||||||
|
filesToInclude = append(filesToInclude, file.Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(filesToInclude) == 0 {
|
||||||
|
return "", "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tempStagingDir, err := os.MkdirTemp(data.blobStore.GetBaseDir(), "diff-staging-*")
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("failed to create temp staging directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
targetBlobPath, err := data.blobStore.GetBlobPath(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
os.RemoveAll(tempStagingDir)
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, filePath := range filesToInclude {
|
||||||
|
destPath := filepath.Join(tempStagingDir, filePath)
|
||||||
|
if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
|
||||||
|
os.RemoveAll(tempStagingDir)
|
||||||
|
return "", "", fmt.Errorf("failed to create dir for diff file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fileWriter, err := os.Create(destPath)
|
||||||
|
if err != nil {
|
||||||
|
os.RemoveAll(tempStagingDir)
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = archive.ExtractFileFromArchive(targetBlobPath, filePath, fileWriter)
|
||||||
|
fileWriter.Close()
|
||||||
|
if err != nil {
|
||||||
|
os.RemoveAll(tempStagingDir)
|
||||||
|
return "", "", fmt.Errorf("failed to extract file %s for diff: %w", filePath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tempArchivePath := filepath.Join(data.blobStore.GetBaseDir(), "diff-"+snapshotID+".zip")
|
||||||
|
if err := archive.CreateArchive(tempStagingDir, tempArchivePath); err != nil {
|
||||||
|
_ = os.RemoveAll(tempStagingDir)
|
||||||
|
_ = os.Remove(tempArchivePath)
|
||||||
|
return "", "", fmt.Errorf("failed to create diff archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tempArchivePath, tempStagingDir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (data *SnapshotManagerData) StreamSnapshotDiff(ctx context.Context, snapshotID, parentID string, offset int64) (io.ReadCloser, error) {
|
||||||
|
tempArchivePath, tempStagingDir, err := data.createDiffArchive(ctx, snapshotID, parentID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create diff archive for streaming: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tempArchivePath == "" {
|
||||||
|
return io.NopCloser(bytes.NewReader(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
archiveFile, err := os.Open(tempArchivePath)
|
||||||
|
if err != nil {
|
||||||
|
if tempStagingDir != "" {
|
||||||
|
os.RemoveAll(tempStagingDir)
|
||||||
|
}
|
||||||
|
os.Remove(tempArchivePath)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset > 0 {
|
||||||
|
if _, err := archiveFile.Seek(offset, io.SeekStart); err != nil {
|
||||||
|
archiveFile.Close()
|
||||||
|
if tempStagingDir != "" {
|
||||||
|
os.RemoveAll(tempStagingDir)
|
||||||
|
}
|
||||||
|
os.Remove(tempArchivePath)
|
||||||
|
return nil, fmt.Errorf("failed to seek in diff archive: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &diffArchiveReader{
|
||||||
|
File: archiveFile,
|
||||||
|
tempArchive: tempArchivePath,
|
||||||
|
tempStaging: tempStagingDir,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|||||||
613
manager_test.go
Normal file
613
manager_test.go
Normal file
@@ -0,0 +1,613 @@
|
|||||||
|
package agate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store/filesystem"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store/sqlite"
|
||||||
|
)
|
||||||
|
|
||||||
|
// setupTestEnvironment creates a temporary directory and initializes the stores
|
||||||
|
func setupTestEnvironment(t *testing.T) (string, store.MetadataStore, store.BlobStore, func()) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create directories for metadata and blobs
|
||||||
|
metadataDir := filepath.Join(tempDir, "metadata")
|
||||||
|
blobsDir := filepath.Join(tempDir, "blobs")
|
||||||
|
if err := os.MkdirAll(metadataDir, 0755); err != nil {
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
t.Fatalf("Failed to create metadata directory: %v", err)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(blobsDir, 0755); err != nil {
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
t.Fatalf("Failed to create blobs directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the stores
|
||||||
|
dbPath := filepath.Join(metadataDir, "snapshots.db")
|
||||||
|
metadataStore, err := sqlite.NewSQLiteStore(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
t.Fatalf("Failed to create metadata store: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
blobStore, err := filesystem.NewFileSystemStore(blobsDir)
|
||||||
|
if err != nil {
|
||||||
|
metadataStore.Close()
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
t.Fatalf("Failed to create blob store: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a cleanup function
|
||||||
|
cleanup := func() {
|
||||||
|
metadataStore.Close()
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tempDir, metadataStore, blobStore, cleanup
|
||||||
|
}
|
||||||
|
|
||||||
|
// createTestFiles creates test files in the specified directory
|
||||||
|
func createTestFiles(t *testing.T, dir string) {
|
||||||
|
// Create a subdirectory
|
||||||
|
subDir := filepath.Join(dir, "subdir")
|
||||||
|
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create some test files
|
||||||
|
testFiles := map[string]string{
|
||||||
|
filepath.Join(dir, "file1.txt"): "This is file 1",
|
||||||
|
filepath.Join(dir, "file2.txt"): "This is file 2",
|
||||||
|
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||||
|
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, content := range testFiles {
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateAndGetSnapshot(t *testing.T) {
|
||||||
|
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a source directory with test files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
createTestFiles(t, sourceDir)
|
||||||
|
|
||||||
|
// Create a snapshot manager with nil logger
|
||||||
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the snapshot was created with the correct name
|
||||||
|
if snapshot.Name != "Test Snapshot" {
|
||||||
|
t.Errorf("Snapshot has wrong name: got %s, want %s", snapshot.Name, "Test Snapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the snapshot has the correct number of files
|
||||||
|
if len(snapshot.Files) != 5 { // 4 files + 1 directory
|
||||||
|
t.Errorf("Snapshot has wrong number of files: got %d, want %d", len(snapshot.Files), 5)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the snapshot details
|
||||||
|
retrievedSnapshot, err := manager.GetSnapshotDetails(ctx, snapshot.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to get snapshot details: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the retrieved snapshot matches the original
|
||||||
|
if retrievedSnapshot.ID != snapshot.ID {
|
||||||
|
t.Errorf("Retrieved snapshot ID does not match: got %s, want %s", retrievedSnapshot.ID, snapshot.ID)
|
||||||
|
}
|
||||||
|
if retrievedSnapshot.Name != snapshot.Name {
|
||||||
|
t.Errorf("Retrieved snapshot name does not match: got %s, want %s", retrievedSnapshot.Name, snapshot.Name)
|
||||||
|
}
|
||||||
|
if len(retrievedSnapshot.Files) != len(snapshot.Files) {
|
||||||
|
t.Errorf("Retrieved snapshot has wrong number of files: got %d, want %d", len(retrievedSnapshot.Files), len(snapshot.Files))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListSnapshots(t *testing.T) {
|
||||||
|
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a source directory with test files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
createTestFiles(t, sourceDir)
|
||||||
|
|
||||||
|
// Create a snapshot manager with nil logger
|
||||||
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create multiple snapshots
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshot1, err := manager.CreateSnapshot(ctx, sourceDir, "Snapshot 1", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify a file
|
||||||
|
if err := os.WriteFile(filepath.Join(sourceDir, "file1.txt"), []byte("Modified file 1"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to modify test file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot2, err := manager.CreateSnapshot(ctx, sourceDir, "Snapshot 2", snapshot1.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the snapshots with empty options
|
||||||
|
snapshots, err := manager.ListSnapshots(ctx, store.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that both snapshots are listed
|
||||||
|
if len(snapshots) != 2 {
|
||||||
|
t.Errorf("Wrong number of snapshots listed: got %d, want %d", len(snapshots), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the snapshots have the correct information
|
||||||
|
for _, snap := range snapshots {
|
||||||
|
if snap.ID == snapshot1.ID {
|
||||||
|
if snap.Name != "Snapshot 1" {
|
||||||
|
t.Errorf("Snapshot 1 has wrong name: got %s, want %s", snap.Name, "Snapshot 1")
|
||||||
|
}
|
||||||
|
if snap.ParentID != "" {
|
||||||
|
t.Errorf("Snapshot 1 has wrong parent ID: got %s, want %s", snap.ParentID, "")
|
||||||
|
}
|
||||||
|
} else if snap.ID == snapshot2.ID {
|
||||||
|
if snap.Name != "Snapshot 2" {
|
||||||
|
t.Errorf("Snapshot 2 has wrong name: got %s, want %s", snap.Name, "Snapshot 2")
|
||||||
|
}
|
||||||
|
if snap.ParentID != snapshot1.ID {
|
||||||
|
t.Errorf("Snapshot 2 has wrong parent ID: got %s, want %s", snap.ParentID, snapshot1.ID)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Errorf("Unexpected snapshot ID: %s", snap.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteSnapshot(t *testing.T) {
|
||||||
|
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a source directory with test files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
createTestFiles(t, sourceDir)
|
||||||
|
|
||||||
|
// Create a snapshot manager with nil logger
|
||||||
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the snapshot
|
||||||
|
err = manager.DeleteSnapshot(ctx, snapshot.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to delete snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to get the deleted snapshot
|
||||||
|
_, err = manager.GetSnapshotDetails(ctx, snapshot.ID)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when getting deleted snapshot, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// List snapshots to confirm it's gone
|
||||||
|
snapshots, err := manager.ListSnapshots(ctx, store.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
if len(snapshots) != 0 {
|
||||||
|
t.Errorf("Expected 0 snapshots after deletion, got %d", len(snapshots))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOpenFile(t *testing.T) {
|
||||||
|
_, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a source directory with test files
|
||||||
|
sourceDir := filepath.Join(blobStore.GetActiveDir(), "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
createTestFiles(t, sourceDir)
|
||||||
|
|
||||||
|
// Create a snapshot manager with nil logger
|
||||||
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open a file from the snapshot
|
||||||
|
fileReader, err := manager.OpenFile(ctx, snapshot.ID, "file1.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to open file from snapshot: %v", err)
|
||||||
|
}
|
||||||
|
defer fileReader.Close()
|
||||||
|
|
||||||
|
// Read the file content
|
||||||
|
content, err := io.ReadAll(fileReader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read file content: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the content matches the original
|
||||||
|
if string(content) != "This is file 1" {
|
||||||
|
t.Errorf("File content does not match: got %s, want %s", string(content), "This is file 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to open a non-existent file
|
||||||
|
pipe, err := manager.OpenFile(ctx, snapshot.ID, "nonexistent.txt")
|
||||||
|
if err == nil {
|
||||||
|
tmp := make([]byte, 1)
|
||||||
|
_, err = pipe.Read(tmp)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when opening non-existent file, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExtractSnapshot(t *testing.T) {
|
||||||
|
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a source directory with test files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
createTestFiles(t, sourceDir)
|
||||||
|
|
||||||
|
// Create a snapshot manager with nil logger
|
||||||
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a target directory for extraction
|
||||||
|
targetDir := filepath.Join(tempDir, "target")
|
||||||
|
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create target directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the snapshot with default behavior (cleanTarget=false)
|
||||||
|
err = manager.ExtractSnapshot(ctx, snapshot.ID, targetDir, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to extract snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the files were extracted correctly
|
||||||
|
testFiles := map[string]string{
|
||||||
|
filepath.Join(targetDir, "file1.txt"): "This is file 1",
|
||||||
|
filepath.Join(targetDir, "file2.txt"): "This is file 2",
|
||||||
|
filepath.Join(targetDir, "subdir/subfile1.txt"): "This is subfile 1",
|
||||||
|
filepath.Join(targetDir, "subdir/subfile2.txt"): "This is subfile 2",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, expectedContent := range testFiles {
|
||||||
|
content, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read extracted file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
if string(content) != expectedContent {
|
||||||
|
t.Errorf("Extracted file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to extract a non-existent snapshot
|
||||||
|
err = manager.ExtractSnapshot(ctx, "nonexistent-id", targetDir, false)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when extracting non-existent snapshot, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestExtractSnapshot_SafeRestore tests that ExtractSnapshot with cleanTarget=false
|
||||||
|
// does not remove extra files in the target directory
|
||||||
|
func TestExtractSnapshot_SafeRestore(t *testing.T) {
|
||||||
|
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a source directory with test files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
createTestFiles(t, sourceDir)
|
||||||
|
|
||||||
|
// Create a snapshot manager with nil logger
|
||||||
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a snapshot (snapshot A)
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Snapshot A", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a target directory and place an "extra" file in it
|
||||||
|
targetDir := filepath.Join(tempDir, "target")
|
||||||
|
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create target directory: %v", err)
|
||||||
|
}
|
||||||
|
extraFilePath := filepath.Join(targetDir, "extra.txt")
|
||||||
|
if err := os.WriteFile(extraFilePath, []byte("This is an extra file"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create extra file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the snapshot with cleanTarget=false
|
||||||
|
err = manager.ExtractSnapshot(ctx, snapshot.ID, targetDir, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to extract snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all files from the snapshot were restored
|
||||||
|
testFiles := map[string]string{
|
||||||
|
filepath.Join(targetDir, "file1.txt"): "This is file 1",
|
||||||
|
filepath.Join(targetDir, "file2.txt"): "This is file 2",
|
||||||
|
filepath.Join(targetDir, "subdir/subfile1.txt"): "This is subfile 1",
|
||||||
|
filepath.Join(targetDir, "subdir/subfile2.txt"): "This is subfile 2",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, expectedContent := range testFiles {
|
||||||
|
content, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read extracted file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
if string(content) != expectedContent {
|
||||||
|
t.Errorf("Extracted file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the extra file was NOT deleted
|
||||||
|
if _, err := os.Stat(extraFilePath); os.IsNotExist(err) {
|
||||||
|
t.Errorf("Extra file was deleted, but should have been preserved with cleanTarget=false")
|
||||||
|
} else if err != nil {
|
||||||
|
t.Fatalf("Failed to check if extra file exists: %v", err)
|
||||||
|
} else {
|
||||||
|
// Read the content to make sure it wasn't modified
|
||||||
|
content, err := os.ReadFile(extraFilePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read extra file: %v", err)
|
||||||
|
}
|
||||||
|
if string(content) != "This is an extra file" {
|
||||||
|
t.Errorf("Extra file content was modified: got %s, want %s", string(content), "This is an extra file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestExtractSnapshot_CleanRestore tests that ExtractSnapshot with cleanTarget=true
|
||||||
|
// completely cleans the target directory before restoration
|
||||||
|
func TestExtractSnapshot_CleanRestore(t *testing.T) {
|
||||||
|
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a source directory with test files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
createTestFiles(t, sourceDir)
|
||||||
|
|
||||||
|
// Create a snapshot manager with nil logger
|
||||||
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a snapshot (snapshot A)
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Snapshot A", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a target directory and place an "extra" file in it
|
||||||
|
targetDir := filepath.Join(tempDir, "target")
|
||||||
|
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create target directory: %v", err)
|
||||||
|
}
|
||||||
|
extraFilePath := filepath.Join(targetDir, "extra.txt")
|
||||||
|
if err := os.WriteFile(extraFilePath, []byte("This is an extra file"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create extra file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the snapshot with cleanTarget=true
|
||||||
|
err = manager.ExtractSnapshot(ctx, snapshot.ID, targetDir, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to extract snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all files from the snapshot were restored
|
||||||
|
testFiles := map[string]string{
|
||||||
|
filepath.Join(targetDir, "file1.txt"): "This is file 1",
|
||||||
|
filepath.Join(targetDir, "file2.txt"): "This is file 2",
|
||||||
|
filepath.Join(targetDir, "subdir/subfile1.txt"): "This is subfile 1",
|
||||||
|
filepath.Join(targetDir, "subdir/subfile2.txt"): "This is subfile 2",
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, expectedContent := range testFiles {
|
||||||
|
content, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read extracted file %s: %v", path, err)
|
||||||
|
}
|
||||||
|
if string(content) != expectedContent {
|
||||||
|
t.Errorf("Extracted file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the extra file WAS deleted
|
||||||
|
if _, err := os.Stat(extraFilePath); os.IsNotExist(err) {
|
||||||
|
// This is the expected behavior
|
||||||
|
} else if err != nil {
|
||||||
|
t.Fatalf("Failed to check if extra file exists: %v", err)
|
||||||
|
} else {
|
||||||
|
t.Errorf("Extra file was not deleted, but should have been removed with cleanTarget=true")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateSnapshotMetadata(t *testing.T) {
|
||||||
|
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a source directory with test files
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create source directory: %v", err)
|
||||||
|
}
|
||||||
|
createTestFiles(t, sourceDir)
|
||||||
|
|
||||||
|
// Create a snapshot manager with nil logger
|
||||||
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the snapshot metadata
|
||||||
|
newName := "Updated Snapshot Name"
|
||||||
|
err = manager.UpdateSnapshotMetadata(ctx, snapshot.ID, newName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to update snapshot metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the updated snapshot
|
||||||
|
updatedSnapshot, err := manager.GetSnapshotDetails(ctx, snapshot.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to get updated snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the name was updated
|
||||||
|
if updatedSnapshot.Name != newName {
|
||||||
|
t.Errorf("Snapshot name was not updated: got %s, want %s", updatedSnapshot.Name, newName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to update a non-existent snapshot
|
||||||
|
err = manager.UpdateSnapshotMetadata(ctx, "nonexistent-id", "New Name")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when updating non-existent snapshot, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStreamSnapshotDiff_EdgeCases(t *testing.T) {
|
||||||
|
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
sourceDir := filepath.Join(tempDir, "source")
|
||||||
|
os.MkdirAll(sourceDir, 0755)
|
||||||
|
createTestFiles(t, sourceDir)
|
||||||
|
|
||||||
|
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Create two identical snapshots
|
||||||
|
snap1, _ := manager.CreateSnapshot(ctx, sourceDir, "Snap1", "")
|
||||||
|
snap2, _ := manager.CreateSnapshot(ctx, sourceDir, "Snap2", snap1.ID)
|
||||||
|
|
||||||
|
// Test 1: Diff between identical snapshots should be empty
|
||||||
|
reader, err := manager.StreamSnapshotDiff(ctx, snap2.ID, snap1.ID, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no error for identical snapshots, got %v", err)
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
data, _ := io.ReadAll(reader)
|
||||||
|
if len(data) != 0 {
|
||||||
|
t.Errorf("Expected empty diff for identical snapshots, got %d bytes", len(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 2: Diff with a non-existent parent should be a full archive
|
||||||
|
reader, err = manager.StreamSnapshotDiff(ctx, snap1.ID, "non-existent-parent", 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no error for non-existent parent, got %v", err)
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
data, _ = io.ReadAll(reader)
|
||||||
|
if len(data) == 0 {
|
||||||
|
t.Error("Expected full archive for non-existent parent, got empty diff")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create an empty source dir
|
||||||
|
emptyDir := filepath.Join(tempDir, "empty_source")
|
||||||
|
os.MkdirAll(emptyDir, 0755)
|
||||||
|
emptySnap, _ := manager.CreateSnapshot(ctx, emptyDir, "EmptySnap", "")
|
||||||
|
|
||||||
|
// Test 3: Diff of an empty snapshot should be empty
|
||||||
|
reader, err = manager.StreamSnapshotDiff(ctx, emptySnap.ID, "", 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no error for empty snapshot, got %v", err)
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
data, _ = io.ReadAll(reader)
|
||||||
|
if len(data) != 0 {
|
||||||
|
t.Errorf("Expected empty diff for empty snapshot, got %d bytes", len(data))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package agate
|
package models
|
||||||
|
|
||||||
import "errors"
|
import "errors"
|
||||||
|
|
||||||
353
performance_test.go
Normal file
353
performance_test.go
Normal file
@@ -0,0 +1,353 @@
|
|||||||
|
package agate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BenchmarkCreateSnapshot benchmarks the performance of creating snapshots with different numbers of files
|
||||||
|
func BenchmarkCreateSnapshot(b *testing.B) {
|
||||||
|
// Skip in short mode
|
||||||
|
if testing.Short() {
|
||||||
|
b.Skip("Skipping benchmark in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with different numbers of files
|
||||||
|
fileCounts := []int{10, 100, 1000}
|
||||||
|
for _, fileCount := range fileCounts {
|
||||||
|
b.Run(fmt.Sprintf("Files-%d", fileCount), func(b *testing.B) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-bench-*")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Create a data directory
|
||||||
|
dataDir := filepath.Join(tempDir, "data")
|
||||||
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
|
b.Fatalf("Failed to create data directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test files
|
||||||
|
createBenchmarkFiles(b, dataDir, fileCount, 1024) // 1 KB per file
|
||||||
|
|
||||||
|
// Create Agate options
|
||||||
|
options := AgateOptions{
|
||||||
|
WorkDir: dataDir,
|
||||||
|
OpenFunc: func(dir string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
CloseFunc: func() error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate instance
|
||||||
|
ag, err := New(options)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Reset the timer before the benchmark loop
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
// Run the benchmark
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ctx := context.Background()
|
||||||
|
_, err := ag.SaveSnapshot(ctx, fmt.Sprintf("Benchmark Snapshot %d", i), "")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkRestoreSnapshot benchmarks the performance of restoring snapshots with different numbers of files
|
||||||
|
func BenchmarkRestoreSnapshot(b *testing.B) {
|
||||||
|
// Skip in short mode
|
||||||
|
if testing.Short() {
|
||||||
|
b.Skip("Skipping benchmark in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with different numbers of files
|
||||||
|
fileCounts := []int{10, 100, 1000}
|
||||||
|
for _, fileCount := range fileCounts {
|
||||||
|
b.Run(fmt.Sprintf("Files-%d", fileCount), func(b *testing.B) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-bench-*")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Create a data directory
|
||||||
|
dataDir := filepath.Join(tempDir, "data")
|
||||||
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
|
b.Fatalf("Failed to create data directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test files
|
||||||
|
createBenchmarkFiles(b, dataDir, fileCount, 1024) // 1 KB per file
|
||||||
|
|
||||||
|
// Create Agate options
|
||||||
|
options := AgateOptions{
|
||||||
|
WorkDir: dataDir,
|
||||||
|
OpenFunc: func(dir string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
CloseFunc: func() error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate instance
|
||||||
|
ag, err := New(options)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
snapshotID, err := ag.SaveSnapshot(ctx, "Benchmark Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify some files
|
||||||
|
for i := 0; i < fileCount/2; i++ {
|
||||||
|
filePath := filepath.Join(dataDir, fmt.Sprintf("file_%d.txt", i))
|
||||||
|
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("Modified content %d", i)), 0644); err != nil {
|
||||||
|
b.Fatalf("Failed to modify file: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset the timer before the benchmark loop
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
// Run the benchmark
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
err := ag.RestoreSnapshot(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to restore snapshot: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkLargeFiles benchmarks the performance of creating and restoring snapshots with large files
|
||||||
|
func BenchmarkLargeFiles(b *testing.B) {
|
||||||
|
// Skip in short mode
|
||||||
|
if testing.Short() {
|
||||||
|
b.Skip("Skipping benchmark in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with different file sizes
|
||||||
|
fileSizes := []int{1 * 1024 * 1024, 10 * 1024 * 1024, 100 * 1024 * 1024} // 1 MB, 10 MB, 100 MB
|
||||||
|
for _, fileSize := range fileSizes {
|
||||||
|
b.Run(fmt.Sprintf("Size-%dMB", fileSize/(1024*1024)), func(b *testing.B) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-bench-*")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Create a data directory
|
||||||
|
dataDir := filepath.Join(tempDir, "data")
|
||||||
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
|
b.Fatalf("Failed to create data directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a large file
|
||||||
|
largeFilePath := filepath.Join(dataDir, "large_file.bin")
|
||||||
|
createLargeFile(b, largeFilePath, fileSize)
|
||||||
|
|
||||||
|
// Create Agate options
|
||||||
|
options := AgateOptions{
|
||||||
|
WorkDir: dataDir,
|
||||||
|
OpenFunc: func(dir string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
CloseFunc: func() error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate instance
|
||||||
|
ag, err := New(options)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Create a snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Measure snapshot creation time
|
||||||
|
b.Run("Create", func(b *testing.B) {
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, err := ag.SaveSnapshot(ctx, fmt.Sprintf("Large File Snapshot %d", i), "")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create a snapshot for restoration benchmark
|
||||||
|
snapshotID, err := ag.SaveSnapshot(ctx, "Large File Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify the large file
|
||||||
|
if err := os.WriteFile(largeFilePath, []byte("Modified content"), 0644); err != nil {
|
||||||
|
b.Fatalf("Failed to modify large file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Measure snapshot restoration time
|
||||||
|
b.Run("Restore", func(b *testing.B) {
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
err := ag.RestoreSnapshot(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to restore snapshot: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPerformanceMetrics runs performance tests and reports metrics
|
||||||
|
func TestPerformanceMetrics(t *testing.T) {
|
||||||
|
// Skip in short mode
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping performance metrics test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-perf-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Create a data directory
|
||||||
|
dataDir := filepath.Join(tempDir, "data")
|
||||||
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
|
t.Fatalf("Failed to create data directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with different numbers of files
|
||||||
|
fileCounts := []int{10, 100, 1000}
|
||||||
|
for _, fileCount := range fileCounts {
|
||||||
|
t.Run(fmt.Sprintf("Files-%d", fileCount), func(t *testing.T) {
|
||||||
|
// Create test files
|
||||||
|
createBenchmarkFiles(t, dataDir, fileCount, 1024) // 1 KB per file
|
||||||
|
|
||||||
|
// Create Agate options
|
||||||
|
options := AgateOptions{
|
||||||
|
WorkDir: dataDir,
|
||||||
|
OpenFunc: func(dir string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
CloseFunc: func() error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Agate instance
|
||||||
|
ag, err := New(options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
// Measure snapshot creation time
|
||||||
|
ctx := context.Background()
|
||||||
|
startTime := time.Now()
|
||||||
|
snapshotID, err := ag.SaveSnapshot(ctx, "Performance Test Snapshot", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
createDuration := time.Since(startTime)
|
||||||
|
t.Logf("Created snapshot with %d files in %v (%.2f files/sec)", fileCount, createDuration, float64(fileCount)/createDuration.Seconds())
|
||||||
|
|
||||||
|
// Modify some files
|
||||||
|
for i := 0; i < fileCount/2; i++ {
|
||||||
|
filePath := filepath.Join(dataDir, fmt.Sprintf("file_%d.txt", i))
|
||||||
|
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("Modified content %d", i)), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to modify file: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Measure snapshot restoration time
|
||||||
|
startTime = time.Now()
|
||||||
|
err = ag.RestoreSnapshot(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to restore snapshot: %v", err)
|
||||||
|
}
|
||||||
|
restoreDuration := time.Since(startTime)
|
||||||
|
t.Logf("Restored snapshot with %d files in %v (%.2f files/sec)", fileCount, restoreDuration, float64(fileCount)/restoreDuration.Seconds())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to create benchmark files
|
||||||
|
func createBenchmarkFiles(tb testing.TB, dir string, count, size int) {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
// Create files with sequential names
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
filePath := filepath.Join(dir, fmt.Sprintf("file_%d.txt", i))
|
||||||
|
|
||||||
|
// Create content of specified size
|
||||||
|
content := make([]byte, size)
|
||||||
|
for j := 0; j < size; j++ {
|
||||||
|
content[j] = byte(j % 256)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(filePath, content, 0644); err != nil {
|
||||||
|
tb.Fatalf("Failed to create benchmark file %s: %v", filePath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to create a large file
|
||||||
|
func createLargeFile(tb testing.TB, path string, size int) {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
// Create the file
|
||||||
|
file, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatalf("Failed to create large file: %v", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Create a buffer with a pattern
|
||||||
|
bufferSize := 8192 // 8 KB buffer
|
||||||
|
buffer := make([]byte, bufferSize)
|
||||||
|
for i := 0; i < bufferSize; i++ {
|
||||||
|
buffer[i] = byte(i % 256)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the buffer multiple times to reach the desired size
|
||||||
|
bytesWritten := 0
|
||||||
|
for bytesWritten < size {
|
||||||
|
n, err := file.Write(buffer)
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatalf("Failed to write to large file: %v", err)
|
||||||
|
}
|
||||||
|
bytesWritten += n
|
||||||
|
}
|
||||||
|
}
|
||||||
177
remote/client.go
Normal file
177
remote/client.go
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
stdgrpc "google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
|
||||||
|
agateGrpc "gitea.unprism.ru/KRBL/Agate/grpc"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/hash"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/interfaces"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client представляет клиент для подключения к удаленному серверу снапшотов.
|
||||||
|
type Client struct {
|
||||||
|
conn *stdgrpc.ClientConn
|
||||||
|
client agateGrpc.SnapshotServiceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// Убедимся, что Client реализует интерфейс interfaces.SnapshotClient
|
||||||
|
var _ interfaces.SnapshotClient = (*Client)(nil)
|
||||||
|
|
||||||
|
// NewClient создает нового клиента, подключенного к указанному адресу.
|
||||||
|
func NewClient(address string) (*Client, error) {
|
||||||
|
conn, err := stdgrpc.Dial(address, stdgrpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to connect to server at %s: %w", address, err)
|
||||||
|
}
|
||||||
|
client := agateGrpc.NewSnapshotServiceClient(conn)
|
||||||
|
return &Client{conn: conn, client: client}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close закрывает соединение с сервером.
|
||||||
|
func (c *Client) Close() error {
|
||||||
|
if c.conn != nil {
|
||||||
|
return c.conn.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListSnapshots получает список снапшотов с удаленного сервера.
|
||||||
|
func (c *Client) ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error) {
|
||||||
|
response, err := c.client.ListSnapshots(ctx, &agateGrpc.ListSnapshotsRequest{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list snapshots: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots := make([]store.SnapshotInfo, 0, len(response.Snapshots))
|
||||||
|
for _, snapshot := range response.Snapshots {
|
||||||
|
snapshots = append(snapshots, store.SnapshotInfo{
|
||||||
|
ID: snapshot.Id,
|
||||||
|
Name: snapshot.Name,
|
||||||
|
ParentID: snapshot.ParentId,
|
||||||
|
CreationTime: snapshot.CreationTime.AsTime(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return snapshots, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchSnapshotDetails получает детальную информацию о конкретном снапшоте.
|
||||||
|
func (c *Client) FetchSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error) {
|
||||||
|
response, err := c.client.GetSnapshotDetails(ctx, &agateGrpc.GetSnapshotDetailsRequest{
|
||||||
|
SnapshotId: snapshotID,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get snapshot details: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot := &store.Snapshot{
|
||||||
|
ID: response.Info.Id,
|
||||||
|
Name: response.Info.Name,
|
||||||
|
ParentID: response.Info.ParentId,
|
||||||
|
CreationTime: response.Info.CreationTime.AsTime(),
|
||||||
|
Files: make([]store.FileInfo, 0, len(response.Files)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range response.Files {
|
||||||
|
snapshot.Files = append(snapshot.Files, store.FileInfo{
|
||||||
|
Path: file.Path,
|
||||||
|
Size: file.SizeBytes,
|
||||||
|
IsDir: file.IsDir,
|
||||||
|
SHA256: file.Sha256Hash,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return snapshot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDiffInfo gets the hash and size of a differential archive.
|
||||||
|
func (c *Client) GetDiffInfo(ctx context.Context, snapshotID, localParentID string) (*store.DiffInfo, error) {
|
||||||
|
req := &agateGrpc.GetDiffInfoRequest{
|
||||||
|
SnapshotId: snapshotID,
|
||||||
|
LocalParentId: localParentID,
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := c.client.GetDiffInfo(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get diff info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &store.DiffInfo{
|
||||||
|
SHA256: info.Sha256Hash,
|
||||||
|
Size: info.SizeBytes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadSnapshotDiff скачивает архив с разницей между снапшотами.
|
||||||
|
func (c *Client) DownloadSnapshotDiff(ctx context.Context, snapshotID, localParentID, targetPath string) error {
|
||||||
|
// Check for local file and validate it
|
||||||
|
if fileInfo, err := os.Stat(targetPath); err == nil {
|
||||||
|
remoteDiffInfo, err := c.GetDiffInfo(ctx, snapshotID, localParentID)
|
||||||
|
if err != nil {
|
||||||
|
// Log the error but proceed with download
|
||||||
|
fmt.Printf("could not get remote diff info: %v. proceeding with download.", err)
|
||||||
|
} else {
|
||||||
|
if fileInfo.Size() == remoteDiffInfo.Size {
|
||||||
|
localHash, err := hash.CalculateFileHash(targetPath)
|
||||||
|
if err == nil && localHash == remoteDiffInfo.SHA256 {
|
||||||
|
fmt.Printf("local snapshot archive %s is valid, skipping download.", targetPath)
|
||||||
|
return nil // File is valid, skip download
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset int64
|
||||||
|
fileInfo, err := os.Stat(targetPath)
|
||||||
|
if err == nil {
|
||||||
|
offset = fileInfo.Size()
|
||||||
|
} else if !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("failed to stat temporary file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &agateGrpc.DownloadSnapshotDiffRequest{
|
||||||
|
SnapshotId: snapshotID,
|
||||||
|
LocalParentId: localParentID,
|
||||||
|
Offset: offset,
|
||||||
|
}
|
||||||
|
|
||||||
|
stream, err := c.client.DownloadSnapshotDiff(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to start snapshot diff download: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory for %s: %w", targetPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.OpenFile(targetPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open file %s: %w", targetPath, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := file.Close(); err != nil {
|
||||||
|
fmt.Printf("failed to close file: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error receiving diff chunk: %w", err)
|
||||||
|
}
|
||||||
|
if _, err := file.Write(resp.ChunkData); err != nil {
|
||||||
|
return fmt.Errorf("error writing to file: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
115
remote/remote_test.go
Normal file
115
remote/remote_test.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestClientConnect tests that the client can connect to a server
|
||||||
|
func TestClientConnect(t *testing.T) {
|
||||||
|
// Skip this test in short mode
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping remote test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test requires a running server
|
||||||
|
// For a real test, you would need to start a server
|
||||||
|
// Here we'll just test the client creation
|
||||||
|
_, err := NewClient("localhost:50051")
|
||||||
|
if err != nil {
|
||||||
|
// It's expected that this will fail if no server is running
|
||||||
|
t.Logf("Failed to connect to server: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMockClient tests the client functionality with a mock
|
||||||
|
func TestMockClient(t *testing.T) {
|
||||||
|
// Create a mock client
|
||||||
|
client := &MockClient{}
|
||||||
|
|
||||||
|
// Test ListSnapshots
|
||||||
|
snapshots, err := client.ListSnapshots(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("MockClient.ListSnapshots failed: %v", err)
|
||||||
|
}
|
||||||
|
if len(snapshots) != 1 {
|
||||||
|
t.Errorf("Expected 1 snapshot, got %d", len(snapshots))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test FetchSnapshotDetails
|
||||||
|
snapshot, err := client.FetchSnapshotDetails(context.Background(), "mock-snapshot-id")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("MockClient.FetchSnapshotDetails failed: %v", err)
|
||||||
|
}
|
||||||
|
if snapshot.ID != "mock-snapshot-id" {
|
||||||
|
t.Errorf("Expected snapshot ID 'mock-snapshot-id', got '%s'", snapshot.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test DownloadSnapshot
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-mock-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
err = client.DownloadSnapshot(context.Background(), "mock-snapshot-id", tempDir, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("MockClient.DownloadSnapshot failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the mock file was created
|
||||||
|
mockFilePath := filepath.Join(tempDir, "mock-file.txt")
|
||||||
|
if _, err := os.Stat(mockFilePath); os.IsNotExist(err) {
|
||||||
|
t.Errorf("Mock file was not created")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockClient is a mock implementation of the Client for testing
|
||||||
|
type MockClient struct{}
|
||||||
|
|
||||||
|
// ListSnapshots returns a mock list of snapshots
|
||||||
|
func (m *MockClient) ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error) {
|
||||||
|
return []store.SnapshotInfo{
|
||||||
|
{
|
||||||
|
ID: "mock-snapshot-id",
|
||||||
|
Name: "Mock Snapshot",
|
||||||
|
ParentID: "",
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchSnapshotDetails returns mock snapshot details
|
||||||
|
func (m *MockClient) FetchSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error) {
|
||||||
|
return &store.Snapshot{
|
||||||
|
ID: snapshotID,
|
||||||
|
Name: "Mock Snapshot",
|
||||||
|
ParentID: "",
|
||||||
|
Files: []store.FileInfo{
|
||||||
|
{
|
||||||
|
Path: "mock-file.txt",
|
||||||
|
Size: 100,
|
||||||
|
IsDir: false,
|
||||||
|
SHA256: "mock-hash",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadSnapshot simulates downloading a snapshot
|
||||||
|
func (m *MockClient) DownloadSnapshot(ctx context.Context, snapshotID string, targetDir string, localParentID string) error {
|
||||||
|
// Create a mock file
|
||||||
|
mockFilePath := filepath.Join(targetDir, "mock-file.txt")
|
||||||
|
if err := os.MkdirAll(filepath.Dir(mockFilePath), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(mockFilePath, []byte("Mock file content"), 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is a no-op for the mock client
|
||||||
|
func (m *MockClient) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
182
remote/server.go
Normal file
182
remote/server.go
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
stdgrpc "google.golang.org/grpc"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
|
||||||
|
agateGrpc "gitea.unprism.ru/KRBL/Agate/grpc"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/interfaces"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server реализует gRPC-сервер для снапшотов.
|
||||||
|
type Server struct {
|
||||||
|
agateGrpc.UnimplementedSnapshotServiceServer
|
||||||
|
manager interfaces.SnapshotManager
|
||||||
|
server *stdgrpc.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServer создает новый сервер снапшотов.
|
||||||
|
func NewServer(manager interfaces.SnapshotManager) *Server {
|
||||||
|
return &Server{
|
||||||
|
manager: manager,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start запускает gRPC-сервер на указанном адресе.
|
||||||
|
func (s *Server) Start(ctx context.Context, address string) error {
|
||||||
|
lis, err := net.Listen("tcp", address)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to listen on %s: %w", address, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.server = stdgrpc.NewServer()
|
||||||
|
agateGrpc.RegisterSnapshotServiceServer(s.server, s)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := s.server.Serve(lis); err != nil {
|
||||||
|
fmt.Printf("Server error: %v\n", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
fmt.Printf("Server started on %s\n", address)
|
||||||
|
|
||||||
|
// Ждем отмены контекста для остановки сервера
|
||||||
|
<-ctx.Done()
|
||||||
|
s.Stop()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop изящно останавливает сервер.
|
||||||
|
func (s *Server) Stop() {
|
||||||
|
if s.server != nil {
|
||||||
|
s.server.GracefulStop()
|
||||||
|
fmt.Println("Server stopped")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListSnapshots реализует gRPC-метод ListSnapshots.
|
||||||
|
func (s *Server) ListSnapshots(ctx context.Context, req *agateGrpc.ListSnapshotsRequest) (*agateGrpc.ListSnapshotsResponse, error) {
|
||||||
|
opts := store.ListOptions{}
|
||||||
|
snapshots, err := s.manager.ListSnapshots(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list snapshots: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
response := &agateGrpc.ListSnapshotsResponse{
|
||||||
|
Snapshots: make([]*agateGrpc.SnapshotInfo, 0, len(snapshots)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, snapshot := range snapshots {
|
||||||
|
response.Snapshots = append(response.Snapshots, convertToGrpcSnapshotInfo(snapshot))
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSnapshotDetails реализует gRPC-метод GetSnapshotDetails.
|
||||||
|
func (s *Server) GetSnapshotDetails(ctx context.Context, req *agateGrpc.GetSnapshotDetailsRequest) (*agateGrpc.SnapshotDetails, error) {
|
||||||
|
snapshot, err := s.manager.GetSnapshotDetails(ctx, req.SnapshotId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get snapshot details: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
response := &agateGrpc.SnapshotDetails{
|
||||||
|
Info: convertToGrpcSnapshotInfo(store.SnapshotInfo{
|
||||||
|
ID: snapshot.ID,
|
||||||
|
Name: snapshot.Name,
|
||||||
|
ParentID: snapshot.ParentID,
|
||||||
|
CreationTime: snapshot.CreationTime,
|
||||||
|
}),
|
||||||
|
Files: make([]*agateGrpc.FileInfo, 0, len(snapshot.Files)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range snapshot.Files {
|
||||||
|
response.Files = append(response.Files, &agateGrpc.FileInfo{
|
||||||
|
Path: file.Path,
|
||||||
|
SizeBytes: file.Size,
|
||||||
|
Sha256Hash: file.SHA256,
|
||||||
|
IsDir: file.IsDir,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile реализует gRPC-метод DownloadFile.
|
||||||
|
func (s *Server) DownloadFile(req *agateGrpc.DownloadFileRequest, stream agateGrpc.SnapshotService_DownloadFileServer) error {
|
||||||
|
fileReader, err := s.manager.OpenFile(context.Background(), req.SnapshotId, req.FilePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open file: %w", err)
|
||||||
|
}
|
||||||
|
defer fileReader.Close()
|
||||||
|
|
||||||
|
buffer := make([]byte, 64*1024)
|
||||||
|
for {
|
||||||
|
n, err := fileReader.Read(buffer)
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read file: %w", err)
|
||||||
|
}
|
||||||
|
if err := stream.Send(&agateGrpc.DownloadFileResponse{ChunkData: buffer[:n]}); err != nil {
|
||||||
|
return fmt.Errorf("failed to send chunk: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadSnapshotDiff реализует gRPC-метод DownloadSnapshotDiff.
|
||||||
|
func (s *Server) DownloadSnapshotDiff(req *agateGrpc.DownloadSnapshotDiffRequest, stream agateGrpc.SnapshotService_DownloadSnapshotDiffServer) error {
|
||||||
|
diffReader, err := s.manager.StreamSnapshotDiff(context.Background(), req.SnapshotId, req.LocalParentId, req.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to stream snapshot diff: %w", err)
|
||||||
|
}
|
||||||
|
defer diffReader.Close()
|
||||||
|
|
||||||
|
buffer := make([]byte, 64*1024)
|
||||||
|
for {
|
||||||
|
n, err := diffReader.Read(buffer)
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read from diff stream: %w", err)
|
||||||
|
}
|
||||||
|
if n > 0 {
|
||||||
|
if err := stream.Send(&agateGrpc.DownloadFileResponse{ChunkData: buffer[:n]}); err != nil {
|
||||||
|
return fmt.Errorf("failed to send diff chunk: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDiffInfo реализует gRPC-метод GetDiffInfo.
|
||||||
|
func (s *Server) GetDiffInfo(ctx context.Context, req *agateGrpc.GetDiffInfoRequest) (*agateGrpc.DiffInfo, error) {
|
||||||
|
diffInfo, err := s.manager.GetSnapshotDiffInfo(ctx, req.SnapshotId, req.LocalParentId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get diff info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &agateGrpc.DiffInfo{
|
||||||
|
Sha256Hash: diffInfo.SHA256,
|
||||||
|
SizeBytes: diffInfo.Size,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Вспомогательная функция для конвертации store.SnapshotInfo в grpc.SnapshotInfo
|
||||||
|
func convertToGrpcSnapshotInfo(info store.SnapshotInfo) *agateGrpc.SnapshotInfo {
|
||||||
|
return &agateGrpc.SnapshotInfo{
|
||||||
|
Id: info.ID,
|
||||||
|
Name: info.Name,
|
||||||
|
ParentId: info.ParentID,
|
||||||
|
CreationTime: timestamppb.New(info.CreationTime),
|
||||||
|
}
|
||||||
|
}
|
||||||
53
snapshot.go
53
snapshot.go
@@ -1,53 +0,0 @@
|
|||||||
package agate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"unprism.ru/KRBL/agate/store"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SnapshotManager is an interface that defines operations for managing and interacting with snapshots.
|
|
||||||
type SnapshotManager interface {
|
|
||||||
// CreateSnapshot creates a new snapshot from the specified source directory, associating it with a given name and parent ID.
|
|
||||||
// Returns the created Snapshot with its metadata or an error if the process fails.
|
|
||||||
CreateSnapshot(ctx context.Context, sourceDir string, name string, parentID string) (*store.Snapshot, error)
|
|
||||||
|
|
||||||
// GetSnapshotDetails retrieves detailed metadata for a specific snapshot identified by its unique snapshotID.
|
|
||||||
// Returns a Snapshot object containing metadata
|
|
||||||
GetSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error)
|
|
||||||
|
|
||||||
// ListSnapshots retrieves a list of all available snapshots, returning their basic information as SnapshotInfo.
|
|
||||||
ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error)
|
|
||||||
|
|
||||||
// DeleteSnapshot removes a snapshot identified by snapshotID. Returns an error if the snapshot does not exist or cannot be deleted.
|
|
||||||
DeleteSnapshot(ctx context.Context, snapshotID string) error
|
|
||||||
|
|
||||||
// OpenFile retrieves and opens a file from the specified snapshot, returning a readable stream and an error, if any.
|
|
||||||
OpenFile(ctx context.Context, snapshotID string, filePath string) (io.ReadCloser, error)
|
|
||||||
|
|
||||||
// ExtractSnapshot extracts the contents of a specified snapshot to a target directory at the given path.
|
|
||||||
// Returns an error if the snapshot ID is invalid or the extraction fails.
|
|
||||||
ExtractSnapshot(ctx context.Context, snapshotID string, path string) error
|
|
||||||
|
|
||||||
// UpdateSnapshotMetadata updates the metadata of an existing snapshot, allowing changes to its name.
|
|
||||||
UpdateSnapshotMetadata(ctx context.Context, snapshotID string, newName string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type SnapshotServer interface {
|
|
||||||
// Start initializes and begins the server's operation, handling incoming requests or processes within the provided context.
|
|
||||||
Start(ctx context.Context) error
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the server, releasing any allocated resources and ensuring all operations are completed.
|
|
||||||
Stop(ctx context.Context) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type SnapshotClient interface {
|
|
||||||
// ListSnapshots retrieves a list of snapshots containing basic metadata, such as ID, name, parent ID, and creation time.
|
|
||||||
ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error)
|
|
||||||
|
|
||||||
// FetchSnapshotDetails retrieves detailed metadata about a specific snapshot identified by snapshotID.
|
|
||||||
FetchSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error)
|
|
||||||
|
|
||||||
// DownloadSnapshot retrieves the snapshot content for the given snapshotID and returns it as an io.ReadCloser.
|
|
||||||
DownloadSnapshot(ctx context.Context, snapshotID string) (io.ReadCloser, error)
|
|
||||||
}
|
|
||||||
@@ -6,8 +6,9 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"unprism.ru/KRBL/agate"
|
|
||||||
"unprism.ru/KRBL/agate/store"
|
"gitea.unprism.ru/KRBL/Agate/models"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
)
|
)
|
||||||
|
|
||||||
const blobExtension = ".zip"
|
const blobExtension = ".zip"
|
||||||
@@ -15,15 +16,26 @@ const blobExtension = ".zip"
|
|||||||
// fileSystemStore реализует интерфейс store.BlobStore с использованием локальной файловой системы.
|
// fileSystemStore реализует интерфейс store.BlobStore с использованием локальной файловой системы.
|
||||||
type fileSystemStore struct {
|
type fileSystemStore struct {
|
||||||
baseDir string // Директория для хранения блобов (архивов)
|
baseDir string // Директория для хранения блобов (архивов)
|
||||||
|
activeDir string // Директория для активных операций (создание и восстановление)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFileSystemStore создает новое хранилище блобов в указанной директории.
|
// NewFileSystemStore создает новое хранилище блобов в указанной директории.
|
||||||
func NewFileSystemStore(baseDir string) (store.BlobStore, error) {
|
func NewFileSystemStore(baseDir string) (store.BlobStore, error) {
|
||||||
// Убедимся, что директория существует
|
// Убедимся, что базовая директория существует
|
||||||
if err := os.MkdirAll(baseDir, 0755); err != nil {
|
if err := os.MkdirAll(baseDir, 0755); err != nil {
|
||||||
return nil, fmt.Errorf("failed to create base directory %s for filesystem blob store: %w", baseDir, err)
|
return nil, fmt.Errorf("failed to create base directory %s for filesystem blob store: %w", baseDir, err)
|
||||||
}
|
}
|
||||||
return &fileSystemStore{baseDir: baseDir}, nil
|
|
||||||
|
// Создаем директорию для активных операций внутри базовой директории
|
||||||
|
activeDir := filepath.Join(baseDir, "active")
|
||||||
|
if err := os.MkdirAll(activeDir, 0755); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create active directory %s for filesystem blob store: %w", activeDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &fileSystemStore{
|
||||||
|
baseDir: baseDir,
|
||||||
|
activeDir: activeDir,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBlobPath формирует полный путь к файлу блоба.
|
// getBlobPath формирует полный путь к файлу блоба.
|
||||||
@@ -64,7 +76,7 @@ func (fs *fileSystemStore) RetrieveBlob(ctx context.Context, snapshotID string)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
// Если файл не найден, возвращаем кастомную ошибку
|
// Если файл не найден, возвращаем кастомную ошибку
|
||||||
return nil, agate.ErrNotFound
|
return nil, models.ErrNotFound
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("failed to open blob file %s: %w", blobPath, err)
|
return nil, fmt.Errorf("failed to open blob file %s: %w", blobPath, err)
|
||||||
}
|
}
|
||||||
@@ -98,7 +110,7 @@ func (fs *fileSystemStore) GetBlobPath(ctx context.Context, snapshotID string) (
|
|||||||
// Проверяем существование файла
|
// Проверяем существование файла
|
||||||
if _, err := os.Stat(blobPath); err != nil {
|
if _, err := os.Stat(blobPath); err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return "", agate.ErrNotFound
|
return "", models.ErrNotFound
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("failed to stat blob file %s: %w", blobPath, err)
|
return "", fmt.Errorf("failed to stat blob file %s: %w", blobPath, err)
|
||||||
}
|
}
|
||||||
@@ -106,3 +118,32 @@ func (fs *fileSystemStore) GetBlobPath(ctx context.Context, snapshotID string) (
|
|||||||
// Файл существует, возвращаем путь
|
// Файл существует, возвращаем путь
|
||||||
return blobPath, nil
|
return blobPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetActiveDir возвращает путь к директории для активных операций.
|
||||||
|
func (fs *fileSystemStore) GetBaseDir() string {
|
||||||
|
return fs.baseDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetActiveDir возвращает путь к директории для активных операций.
|
||||||
|
func (fs *fileSystemStore) GetActiveDir() string {
|
||||||
|
return fs.activeDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanActiveDir очищает директорию для активных операций.
|
||||||
|
// Это полезно перед началом новых операций, чтобы избежать конфликтов.
|
||||||
|
func (fs *fileSystemStore) CleanActiveDir(ctx context.Context) error {
|
||||||
|
// Удаляем все файлы в активной директории, но сохраняем саму директорию
|
||||||
|
entries, err := os.ReadDir(fs.activeDir)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read active directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
path := filepath.Join(fs.activeDir, entry.Name())
|
||||||
|
if err := os.RemoveAll(path); err != nil {
|
||||||
|
return fmt.Errorf("failed to remove %s from active directory: %w", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
228
store/filesystem/filesystem_test.go
Normal file
228
store/filesystem/filesystem_test.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewFileSystemStore(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
store, err := NewFileSystemStore(tempDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that directories were created
|
||||||
|
if _, err := os.Stat(tempDir); os.IsNotExist(err) {
|
||||||
|
t.Fatalf("Base directory was not created")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the store's base directory matches the expected path
|
||||||
|
if store.GetBaseDir() != tempDir {
|
||||||
|
t.Fatalf("Store base directory does not match: got %s, want %s", store.GetBaseDir(), tempDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
activeDir := filepath.Join(tempDir, "active")
|
||||||
|
if _, err := os.Stat(activeDir); os.IsNotExist(err) {
|
||||||
|
t.Fatalf("Active directory was not created")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the store's active directory matches the expected path
|
||||||
|
if store.GetActiveDir() != activeDir {
|
||||||
|
t.Fatalf("Store active directory does not match: got %s, want %s", store.GetActiveDir(), activeDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreAndRetrieveBlob(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
store, err := NewFileSystemStore(tempDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test data
|
||||||
|
testData := []byte("test data for blob")
|
||||||
|
reader := bytes.NewReader(testData)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Store the blob
|
||||||
|
snapshotID := "test-snapshot-id"
|
||||||
|
path, err := store.StoreBlob(ctx, snapshotID, reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to store blob: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the file was created
|
||||||
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||||
|
t.Fatalf("Blob file was not created")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the blob
|
||||||
|
blobReader, err := store.RetrieveBlob(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to retrieve blob: %v", err)
|
||||||
|
}
|
||||||
|
defer blobReader.Close()
|
||||||
|
|
||||||
|
// Read the data
|
||||||
|
retrievedData, err := io.ReadAll(blobReader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read blob data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the data matches
|
||||||
|
if !bytes.Equal(testData, retrievedData) {
|
||||||
|
t.Fatalf("Retrieved data does not match original data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteBlob(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
store, err := NewFileSystemStore(tempDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test data
|
||||||
|
testData := []byte("test data for blob")
|
||||||
|
reader := bytes.NewReader(testData)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Store the blob
|
||||||
|
snapshotID := "test-snapshot-id"
|
||||||
|
path, err := store.StoreBlob(ctx, snapshotID, reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to store blob: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the blob
|
||||||
|
err = store.DeleteBlob(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to delete blob: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the file was deleted
|
||||||
|
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||||
|
t.Fatalf("Blob file was not deleted")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deleting a non-existent blob should not return an error
|
||||||
|
err = store.DeleteBlob(ctx, "non-existent-id")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("DeleteBlob returned an error for non-existent blob: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBlobPath(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
store, err := NewFileSystemStore(tempDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test data
|
||||||
|
testData := []byte("test data for blob")
|
||||||
|
reader := bytes.NewReader(testData)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Store the blob
|
||||||
|
snapshotID := "test-snapshot-id"
|
||||||
|
expectedPath, err := store.StoreBlob(ctx, snapshotID, reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to store blob: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the blob path
|
||||||
|
path, err := store.GetBlobPath(ctx, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to get blob path: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the path matches
|
||||||
|
if path != expectedPath {
|
||||||
|
t.Fatalf("GetBlobPath returned incorrect path: got %s, want %s", path, expectedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getting path for non-existent blob should return ErrNotFound
|
||||||
|
_, err = store.GetBlobPath(ctx, "non-existent-id")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("GetBlobPath did not return an error for non-existent blob")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCleanActiveDir(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
store, err := NewFileSystemStore(tempDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the active directory
|
||||||
|
activeDir := store.GetActiveDir()
|
||||||
|
|
||||||
|
// Create some test files in the active directory
|
||||||
|
testFile1 := filepath.Join(activeDir, "test1.txt")
|
||||||
|
testFile2 := filepath.Join(activeDir, "test2.txt")
|
||||||
|
|
||||||
|
if err := os.WriteFile(testFile1, []byte("test1"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file: %v", err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(testFile2, []byte("test2"), 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to create test file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean the active directory
|
||||||
|
ctx := context.Background()
|
||||||
|
err = store.CleanActiveDir(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to clean active directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the files were deleted
|
||||||
|
entries, err := os.ReadDir(activeDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read active directory: %v", err)
|
||||||
|
}
|
||||||
|
if len(entries) > 0 {
|
||||||
|
t.Fatalf("Active directory was not cleaned, %d files remain", len(entries))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -6,12 +6,13 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
_ "github.com/mattn/go-sqlite3"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
"unprism.ru/KRBL/agate"
|
|
||||||
"unprism.ru/KRBL/agate/store"
|
"gitea.unprism.ru/KRBL/Agate/models"
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
|
_ "github.com/mattn/go-sqlite3"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -131,7 +132,7 @@ func (s *sqliteStore) GetSnapshotMetadata(ctx context.Context, snapshotID string
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, sql.ErrNoRows) {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
// Если запись не найдена, возвращаем кастомную ошибку
|
// Если запись не найдена, возвращаем кастомную ошибку
|
||||||
return nil, agate.ErrNotFound
|
return nil, models.ErrNotFound
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("failed to query snapshot %s: %w", snapshotID, err)
|
return nil, fmt.Errorf("failed to query snapshot %s: %w", snapshotID, err)
|
||||||
}
|
}
|
||||||
@@ -168,58 +169,83 @@ func (s *sqliteStore) GetSnapshotMetadata(ctx context.Context, snapshotID string
|
|||||||
return &snap, nil
|
return &snap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListSnapshotsMetadata извлекает краткую информацию обо всех снапшотах.
|
// ListSnapshotsMetadata retrieves basic information about snapshots with filtering and pagination.
|
||||||
func (s *sqliteStore) ListSnapshotsMetadata(ctx context.Context) ([]store.SnapshotInfo, error) {
|
func (s *sqliteStore) ListSnapshotsMetadata(ctx context.Context, opts store.ListOptions) ([]store.SnapshotInfo, error) {
|
||||||
// Simplified implementation to debug the issue
|
// Build the query with optional filtering
|
||||||
fmt.Println("ListSnapshotsMetadata called")
|
var query string
|
||||||
|
var args []interface{}
|
||||||
|
|
||||||
// Get all snapshot IDs first
|
if opts.FilterByName != "" {
|
||||||
query := `SELECT id FROM snapshots ORDER BY creation_time DESC;`
|
query = `SELECT id, name, parent_id, creation_time FROM snapshots WHERE name LIKE ? ORDER BY creation_time DESC`
|
||||||
fmt.Println("Executing query:", query)
|
args = append(args, "%"+opts.FilterByName+"%")
|
||||||
|
} else {
|
||||||
|
query = `SELECT id, name, parent_id, creation_time FROM snapshots ORDER BY creation_time DESC`
|
||||||
|
}
|
||||||
|
|
||||||
rows, err := s.db.QueryContext(ctx, query)
|
// Add pagination if specified
|
||||||
|
if opts.Limit > 0 {
|
||||||
|
query += " LIMIT ?"
|
||||||
|
args = append(args, opts.Limit)
|
||||||
|
|
||||||
|
if opts.Offset > 0 {
|
||||||
|
query += " OFFSET ?"
|
||||||
|
args = append(args, opts.Offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute the query
|
||||||
|
rows, err := s.db.QueryContext(ctx, query, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to query snapshot IDs: %w", err)
|
return nil, fmt.Errorf("failed to query snapshots: %w", err)
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
var snapshots []store.SnapshotInfo
|
var snapshots []store.SnapshotInfo
|
||||||
|
|
||||||
// For each ID, get the full snapshot details
|
// Iterate through the results
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var id string
|
var info store.SnapshotInfo
|
||||||
if err := rows.Scan(&id); err != nil {
|
var parentID sql.NullString
|
||||||
return nil, fmt.Errorf("failed to scan snapshot ID: %w", err)
|
var creationTimeStr string
|
||||||
|
|
||||||
|
if err := rows.Scan(&info.ID, &info.Name, &parentID, &creationTimeStr); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to scan snapshot row: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the full snapshot details
|
// Set parent ID if not NULL
|
||||||
snapshot, err := s.GetSnapshotMetadata(ctx, id)
|
if parentID.Valid {
|
||||||
if err != nil {
|
info.ParentID = parentID.String
|
||||||
return nil, fmt.Errorf("failed to get snapshot details for ID %s: %w", id, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert to SnapshotInfo
|
// Parse creation time
|
||||||
info := store.SnapshotInfo{
|
const sqliteLayout = "2006-01-02 15:04:05" // Standard SQLite DATETIME format without timezone
|
||||||
ID: snapshot.ID,
|
t, parseErr := time.Parse(sqliteLayout, creationTimeStr)
|
||||||
Name: snapshot.Name,
|
if parseErr != nil {
|
||||||
ParentID: snapshot.ParentID,
|
// Try format with milliseconds if the first one didn't work
|
||||||
CreationTime: snapshot.CreationTime,
|
const sqliteLayoutWithMs = "2006-01-02 15:04:05.999999999"
|
||||||
|
t, parseErr = time.Parse(sqliteLayoutWithMs, creationTimeStr)
|
||||||
|
if parseErr != nil {
|
||||||
|
// Try RFC3339 if saved as UTC().Format(time.RFC3339)
|
||||||
|
t, parseErr = time.Parse(time.RFC3339, creationTimeStr)
|
||||||
|
if parseErr != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse creation time '%s' for snapshot %s: %w", creationTimeStr, info.ID, parseErr)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info.CreationTime = t.UTC() // Store as UTC
|
||||||
|
|
||||||
snapshots = append(snapshots, info)
|
snapshots = append(snapshots, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := rows.Err(); err != nil {
|
if err := rows.Err(); err != nil {
|
||||||
return nil, fmt.Errorf("error iterating snapshot IDs: %w", err)
|
return nil, fmt.Errorf("error iterating snapshot rows: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no snapshots found, return an empty slice
|
// If no snapshots found, return an empty slice
|
||||||
if len(snapshots) == 0 {
|
if len(snapshots) == 0 {
|
||||||
fmt.Println("No snapshots found")
|
|
||||||
return []store.SnapshotInfo{}, nil
|
return []store.SnapshotInfo{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Found %d snapshots\n", len(snapshots))
|
|
||||||
return snapshots, nil
|
return snapshots, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -241,3 +267,13 @@ func (s *sqliteStore) DeleteSnapshotMetadata(ctx context.Context, snapshotID str
|
|||||||
|
|
||||||
return nil // Не возвращаем ошибку, если запись не найдена
|
return nil // Не возвращаем ошибку, если запись не найдена
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateSnapshotParentID обновляет ParentID для указанного снапшота.
|
||||||
|
func (s *sqliteStore) UpdateSnapshotParentID(ctx context.Context, snapshotID, newParentID string) error {
|
||||||
|
query := `UPDATE snapshots SET parent_id = ? WHERE id = ?;`
|
||||||
|
_, err := s.db.ExecContext(ctx, query, newParentID, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to update parent ID for snapshot %s: %w", snapshotID, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
399
store/sqlite/sqlite_test.go
Normal file
399
store/sqlite/sqlite_test.go
Normal file
@@ -0,0 +1,399 @@
|
|||||||
|
package sqlite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewSQLiteStore(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
dbPath := filepath.Join(tempDir, "test.db")
|
||||||
|
s, err := NewSQLiteStore(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||||
|
}
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Check that the database file was created
|
||||||
|
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||||
|
t.Fatalf("Database file was not created")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSaveAndGetSnapshotMetadata(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
dbPath := filepath.Join(tempDir, "test.db")
|
||||||
|
s, err := NewSQLiteStore(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||||
|
}
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Create a test snapshot
|
||||||
|
now := time.Now().UTC().Truncate(time.Second) // SQLite doesn't store nanoseconds
|
||||||
|
testSnapshot := store.Snapshot{
|
||||||
|
ID: "test-snapshot-id",
|
||||||
|
Name: "Test Snapshot",
|
||||||
|
ParentID: "parent-snapshot-id",
|
||||||
|
CreationTime: now,
|
||||||
|
Files: []store.FileInfo{
|
||||||
|
{
|
||||||
|
Path: "/test/file1.txt",
|
||||||
|
Size: 100,
|
||||||
|
IsDir: false,
|
||||||
|
SHA256: "hash1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: "/test/dir1",
|
||||||
|
Size: 0,
|
||||||
|
IsDir: true,
|
||||||
|
SHA256: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
err = s.SaveSnapshotMetadata(ctx, testSnapshot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to save snapshot metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the snapshot
|
||||||
|
retrievedSnapshot, err := s.GetSnapshotMetadata(ctx, testSnapshot.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to retrieve snapshot metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the retrieved snapshot matches the original
|
||||||
|
if retrievedSnapshot.ID != testSnapshot.ID {
|
||||||
|
t.Errorf("Retrieved snapshot ID does not match: got %s, want %s", retrievedSnapshot.ID, testSnapshot.ID)
|
||||||
|
}
|
||||||
|
if retrievedSnapshot.Name != testSnapshot.Name {
|
||||||
|
t.Errorf("Retrieved snapshot name does not match: got %s, want %s", retrievedSnapshot.Name, testSnapshot.Name)
|
||||||
|
}
|
||||||
|
if retrievedSnapshot.ParentID != testSnapshot.ParentID {
|
||||||
|
t.Errorf("Retrieved snapshot parent ID does not match: got %s, want %s", retrievedSnapshot.ParentID, testSnapshot.ParentID)
|
||||||
|
}
|
||||||
|
if !retrievedSnapshot.CreationTime.Equal(testSnapshot.CreationTime) {
|
||||||
|
t.Errorf("Retrieved snapshot creation time does not match: got %v, want %v", retrievedSnapshot.CreationTime, testSnapshot.CreationTime)
|
||||||
|
}
|
||||||
|
if len(retrievedSnapshot.Files) != len(testSnapshot.Files) {
|
||||||
|
t.Errorf("Retrieved snapshot has wrong number of files: got %d, want %d", len(retrievedSnapshot.Files), len(testSnapshot.Files))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListSnapshotsMetadata(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
dbPath := filepath.Join(tempDir, "test.db")
|
||||||
|
s, err := NewSQLiteStore(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||||
|
}
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Create test snapshots
|
||||||
|
ctx := context.Background()
|
||||||
|
now := time.Now().UTC().Truncate(time.Second)
|
||||||
|
|
||||||
|
testSnapshots := []store.Snapshot{
|
||||||
|
{
|
||||||
|
ID: "snapshot-1",
|
||||||
|
Name: "Snapshot 1",
|
||||||
|
ParentID: "",
|
||||||
|
CreationTime: now.Add(-2 * time.Hour),
|
||||||
|
Files: []store.FileInfo{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "snapshot-2",
|
||||||
|
Name: "Snapshot 2",
|
||||||
|
ParentID: "snapshot-1",
|
||||||
|
CreationTime: now.Add(-1 * time.Hour),
|
||||||
|
Files: []store.FileInfo{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "snapshot-3",
|
||||||
|
Name: "Snapshot 3",
|
||||||
|
ParentID: "snapshot-2",
|
||||||
|
CreationTime: now,
|
||||||
|
Files: []store.FileInfo{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the snapshots
|
||||||
|
for _, snap := range testSnapshots {
|
||||||
|
err = s.SaveSnapshotMetadata(ctx, snap)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to save snapshot metadata: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the snapshots with empty options
|
||||||
|
snapshots, err := s.ListSnapshotsMetadata(ctx, store.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all snapshots are listed
|
||||||
|
if len(snapshots) != len(testSnapshots) {
|
||||||
|
t.Errorf("Wrong number of snapshots listed: got %d, want %d", len(snapshots), len(testSnapshots))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the snapshots have the correct information
|
||||||
|
for i, snap := range testSnapshots {
|
||||||
|
found := false
|
||||||
|
for _, listedSnap := range snapshots {
|
||||||
|
if listedSnap.ID == snap.ID {
|
||||||
|
found = true
|
||||||
|
if listedSnap.Name != snap.Name {
|
||||||
|
t.Errorf("Snapshot %d has wrong name: got %s, want %s", i, listedSnap.Name, snap.Name)
|
||||||
|
}
|
||||||
|
if listedSnap.ParentID != snap.ParentID {
|
||||||
|
t.Errorf("Snapshot %d has wrong parent ID: got %s, want %s", i, listedSnap.ParentID, snap.ParentID)
|
||||||
|
}
|
||||||
|
if !listedSnap.CreationTime.Equal(snap.CreationTime) {
|
||||||
|
t.Errorf("Snapshot %d has wrong creation time: got %v, want %v", i, listedSnap.CreationTime, snap.CreationTime)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Errorf("Snapshot %d (%s) not found in listed snapshots", i, snap.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListSnapshotsMetadata_WithOptions(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
dbPath := filepath.Join(tempDir, "test.db")
|
||||||
|
s, err := NewSQLiteStore(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||||
|
}
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Create test snapshots with different names
|
||||||
|
ctx := context.Background()
|
||||||
|
now := time.Now().UTC().Truncate(time.Second)
|
||||||
|
|
||||||
|
testSnapshots := []store.Snapshot{
|
||||||
|
{
|
||||||
|
ID: "alpha-1",
|
||||||
|
Name: "alpha-1",
|
||||||
|
ParentID: "",
|
||||||
|
CreationTime: now.Add(-3 * time.Hour),
|
||||||
|
Files: []store.FileInfo{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "alpha-2",
|
||||||
|
Name: "alpha-2",
|
||||||
|
ParentID: "alpha-1",
|
||||||
|
CreationTime: now.Add(-2 * time.Hour),
|
||||||
|
Files: []store.FileInfo{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "beta-1",
|
||||||
|
Name: "beta-1",
|
||||||
|
ParentID: "",
|
||||||
|
CreationTime: now.Add(-1 * time.Hour),
|
||||||
|
Files: []store.FileInfo{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the snapshots
|
||||||
|
for _, snap := range testSnapshots {
|
||||||
|
err = s.SaveSnapshotMetadata(ctx, snap)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to save snapshot metadata: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test different ListOptions scenarios
|
||||||
|
t.Run("FilterByName", func(t *testing.T) {
|
||||||
|
// Filter snapshots by name "alpha"
|
||||||
|
opts := store.ListOptions{
|
||||||
|
FilterByName: "alpha",
|
||||||
|
}
|
||||||
|
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots with filter: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should return 2 snapshots (alpha-1 and alpha-2)
|
||||||
|
if len(snapshots) != 2 {
|
||||||
|
t.Errorf("Wrong number of snapshots returned: got %d, want %d", len(snapshots), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that only alpha snapshots are returned
|
||||||
|
for _, snap := range snapshots {
|
||||||
|
if snap.ID != "alpha-1" && snap.ID != "alpha-2" {
|
||||||
|
t.Errorf("Unexpected snapshot ID in filtered results: %s", snap.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Limit", func(t *testing.T) {
|
||||||
|
// Limit to 1 snapshot (should return the newest one)
|
||||||
|
opts := store.ListOptions{
|
||||||
|
Limit: 1,
|
||||||
|
}
|
||||||
|
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots with limit: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should return 1 snapshot
|
||||||
|
if len(snapshots) != 1 {
|
||||||
|
t.Errorf("Wrong number of snapshots returned: got %d, want %d", len(snapshots), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The newest snapshot should be beta-1
|
||||||
|
if snapshots[0].ID != "beta-1" {
|
||||||
|
t.Errorf("Wrong snapshot returned with limit: got %s, want %s", snapshots[0].ID, "beta-1")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Offset", func(t *testing.T) {
|
||||||
|
// Limit to 1 snapshot with offset 1 (should return the second newest)
|
||||||
|
opts := store.ListOptions{
|
||||||
|
Limit: 1,
|
||||||
|
Offset: 1,
|
||||||
|
}
|
||||||
|
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots with offset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should return 1 snapshot
|
||||||
|
if len(snapshots) != 1 {
|
||||||
|
t.Errorf("Wrong number of snapshots returned: got %d, want %d", len(snapshots), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The second newest snapshot should be alpha-2
|
||||||
|
if snapshots[0].ID != "alpha-2" {
|
||||||
|
t.Errorf("Wrong snapshot returned with offset: got %s, want %s", snapshots[0].ID, "alpha-2")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("FilterAndPagination", func(t *testing.T) {
|
||||||
|
// Filter by "alpha" with limit 1
|
||||||
|
opts := store.ListOptions{
|
||||||
|
FilterByName: "alpha",
|
||||||
|
Limit: 1,
|
||||||
|
}
|
||||||
|
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots with filter and pagination: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should return 1 snapshot
|
||||||
|
if len(snapshots) != 1 {
|
||||||
|
t.Errorf("Wrong number of snapshots returned: got %d, want %d", len(snapshots), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The newest alpha snapshot should be alpha-2
|
||||||
|
if snapshots[0].ID != "alpha-2" {
|
||||||
|
t.Errorf("Wrong snapshot returned with filter and limit: got %s, want %s", snapshots[0].ID, "alpha-2")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NoResults", func(t *testing.T) {
|
||||||
|
// Filter by a name that doesn't exist
|
||||||
|
opts := store.ListOptions{
|
||||||
|
FilterByName: "gamma",
|
||||||
|
}
|
||||||
|
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to list snapshots with non-matching filter: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should return 0 snapshots
|
||||||
|
if len(snapshots) != 0 {
|
||||||
|
t.Errorf("Expected 0 snapshots, got %d", len(snapshots))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteSnapshotMetadata(t *testing.T) {
|
||||||
|
// Create a temporary directory for tests
|
||||||
|
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create a new store
|
||||||
|
dbPath := filepath.Join(tempDir, "test.db")
|
||||||
|
s, err := NewSQLiteStore(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||||
|
}
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Create a test snapshot
|
||||||
|
ctx := context.Background()
|
||||||
|
testSnapshot := store.Snapshot{
|
||||||
|
ID: "test-snapshot-id",
|
||||||
|
Name: "Test Snapshot",
|
||||||
|
ParentID: "",
|
||||||
|
CreationTime: time.Now().UTC().Truncate(time.Second),
|
||||||
|
Files: []store.FileInfo{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the snapshot
|
||||||
|
err = s.SaveSnapshotMetadata(ctx, testSnapshot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to save snapshot metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the snapshot
|
||||||
|
err = s.DeleteSnapshotMetadata(ctx, testSnapshot.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to delete snapshot metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to retrieve the deleted snapshot
|
||||||
|
_, err = s.GetSnapshotMetadata(ctx, testSnapshot.ID)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error when retrieving deleted snapshot, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deleting a non-existent snapshot should not return an error
|
||||||
|
err = s.DeleteSnapshotMetadata(ctx, "non-existent-id")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("DeleteSnapshotMetadata returned an error for non-existent snapshot: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -6,6 +6,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DiffInfo represents metadata about a differential archive.
|
||||||
|
type DiffInfo struct {
|
||||||
|
SHA256 string
|
||||||
|
Size int64
|
||||||
|
}
|
||||||
|
|
||||||
// FileInfo represents metadata and attributes of a file or directory.
|
// FileInfo represents metadata and attributes of a file or directory.
|
||||||
type FileInfo struct {
|
type FileInfo struct {
|
||||||
Path string // Path represents the relative or absolute location of the file or directory in the filesystem.
|
Path string // Path represents the relative or absolute location of the file or directory in the filesystem.
|
||||||
@@ -31,6 +37,21 @@ type SnapshotInfo struct {
|
|||||||
CreationTime time.Time // Время создания
|
CreationTime time.Time // Время создания
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SnapshotStatus represents the status of an asynchronous snapshot creation process.
|
||||||
|
type SnapshotStatus struct {
|
||||||
|
ID string // Unique identifier of the job (usually same as Snapshot ID)
|
||||||
|
Status string // Current status: "pending", "running", "done", "failed"
|
||||||
|
Progress float64 // Progress from 0.0 to 1.0
|
||||||
|
Error string // Error message if failed
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListOptions provides options for filtering and paginating snapshot lists
|
||||||
|
type ListOptions struct {
|
||||||
|
FilterByName string // Filter snapshots by name (substring match)
|
||||||
|
Limit int // Maximum number of snapshots to return
|
||||||
|
Offset int // Number of snapshots to skip
|
||||||
|
}
|
||||||
|
|
||||||
// MetadataStore определяет интерфейс для хранения и извлечения метаданных снапшотов.
|
// MetadataStore определяет интерфейс для хранения и извлечения метаданных снапшотов.
|
||||||
type MetadataStore interface {
|
type MetadataStore interface {
|
||||||
// SaveSnapshotMetadata сохраняет полные метаданные снапшота, включая список файлов.
|
// SaveSnapshotMetadata сохраняет полные метаданные снапшота, включая список файлов.
|
||||||
@@ -41,13 +62,16 @@ type MetadataStore interface {
|
|||||||
// Возвращает agate.ErrNotFound, если снапшот не найден.
|
// Возвращает agate.ErrNotFound, если снапшот не найден.
|
||||||
GetSnapshotMetadata(ctx context.Context, snapshotID string) (*Snapshot, error)
|
GetSnapshotMetadata(ctx context.Context, snapshotID string) (*Snapshot, error)
|
||||||
|
|
||||||
// ListSnapshotsMetadata извлекает краткую информацию обо всех снапшотах.
|
// ListSnapshotsMetadata извлекает краткую информацию о снапшотах с фильтрацией и пагинацией.
|
||||||
ListSnapshotsMetadata(ctx context.Context) ([]SnapshotInfo, error)
|
ListSnapshotsMetadata(ctx context.Context, opts ListOptions) ([]SnapshotInfo, error)
|
||||||
|
|
||||||
// DeleteSnapshotMetadata удаляет метаданные снапшота по его ID.
|
// DeleteSnapshotMetadata удаляет метаданные снапшота по его ID.
|
||||||
// Не должен возвращать ошибку, если снапшот не найден.
|
// Не должен возвращать ошибку, если снапшот не найден.
|
||||||
DeleteSnapshotMetadata(ctx context.Context, snapshotID string) error
|
DeleteSnapshotMetadata(ctx context.Context, snapshotID string) error
|
||||||
|
|
||||||
|
// UpdateSnapshotParentID обновляет ParentID для указанного снапшота.
|
||||||
|
UpdateSnapshotParentID(ctx context.Context, snapshotID, newParentID string) error
|
||||||
|
|
||||||
// Close закрывает соединение с хранилищем метаданных.
|
// Close закрывает соединение с хранилищем метаданных.
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
@@ -71,4 +95,14 @@ type BlobStore interface {
|
|||||||
// Это может быть полезно для функций пакета archive, которые работают с путями.
|
// Это может быть полезно для функций пакета archive, которые работают с путями.
|
||||||
// Возвращает agate.ErrNotFound, если блоб не найден.
|
// Возвращает agate.ErrNotFound, если блоб не найден.
|
||||||
GetBlobPath(ctx context.Context, snapshotID string) (string, error)
|
GetBlobPath(ctx context.Context, snapshotID string) (string, error)
|
||||||
|
|
||||||
|
// GetBaseDir возвращает путь к основной директории
|
||||||
|
GetBaseDir() string
|
||||||
|
|
||||||
|
// GetActiveDir возвращает путь к директории для активных операций.
|
||||||
|
GetActiveDir() string
|
||||||
|
|
||||||
|
// CleanActiveDir очищает директорию для активных операций.
|
||||||
|
// Это полезно перед началом новых операций, чтобы избежать конфликтов.
|
||||||
|
CleanActiveDir(ctx context.Context) error
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"unprism.ru/KRBL/agate/store"
|
"gitea.unprism.ru/KRBL/Agate/store"
|
||||||
"unprism.ru/KRBL/agate/store/filesystem"
|
"gitea.unprism.ru/KRBL/Agate/store/filesystem"
|
||||||
"unprism.ru/KRBL/agate/store/sqlite"
|
"gitea.unprism.ru/KRBL/Agate/store/sqlite"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewDefaultMetadataStore creates a new SQLite-based metadata store.
|
// NewDefaultMetadataStore creates a new SQLite-based metadata store.
|
||||||
|
|||||||
Reference in New Issue
Block a user