Compare commits
17 Commits
main
...
v0.3.0-alp
Author | SHA1 | Date | |
---|---|---|---|
aaf227f25a
|
|||
efa2bec38b
|
|||
8fe593bb6f
|
|||
223a63ee6d
|
|||
19c02d3573
|
|||
1f837afadc
|
|||
3489703ba7
|
|||
0a9285c05e
|
|||
b14a04c163
|
|||
047e8d2df0
|
|||
65b1daa52c
|
|||
fb2ae39b47
|
|||
3efa753394
|
|||
f7c1e461e6
|
|||
b05058b5cd
|
|||
9d04f43104
|
|||
13744f0500
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -2,3 +2,5 @@ grpc/google
|
||||
grpc/grafeas
|
||||
|
||||
.idea
|
||||
|
||||
coverage.*
|
37
Makefile
37
Makefile
@ -6,7 +6,7 @@ download-third-party:
|
||||
mv ./grpc/third_party/googleapis/grafeas ./grpc
|
||||
rm -rf ./grpc/third_party
|
||||
|
||||
gen-proto-geolocation:
|
||||
gen-proto:
|
||||
mkdir -p ./grpc
|
||||
|
||||
@protoc -I ./grpc \
|
||||
@ -14,3 +14,38 @@ gen-proto-geolocation:
|
||||
--go-grpc_out=grpc --go-grpc_opt paths=source_relative \
|
||||
--grpc-gateway_out=grpc --grpc-gateway_opt paths=source_relative \
|
||||
./grpc/snapshot.proto
|
||||
|
||||
# Run all tests
|
||||
test:
|
||||
go test -v ./...
|
||||
|
||||
# Run unit tests
|
||||
test-unit:
|
||||
go test -v ./store/... ./hash/... ./archive/...
|
||||
|
||||
# Run integration tests
|
||||
test-integration:
|
||||
go test -v -tags=integration ./...
|
||||
|
||||
# Run functional tests
|
||||
test-functional:
|
||||
go test -v -run TestFull ./...
|
||||
|
||||
# Run performance tests
|
||||
test-performance:
|
||||
go test -v -run TestPerformanceMetrics ./...
|
||||
go test -v -bench=. ./...
|
||||
|
||||
# Run tests with code coverage
|
||||
test-coverage:
|
||||
go test -v -coverprofile=coverage.out ./...
|
||||
go tool cover -html=coverage.out -o coverage.html
|
||||
|
||||
# Run linter
|
||||
lint:
|
||||
golangci-lint run
|
||||
|
||||
# Run all checks (tests + linter)
|
||||
check: test lint
|
||||
|
||||
.PHONY: download-third-party gen-proto test test-unit test-integration test-functional test-performance test-coverage lint check
|
||||
|
343
README.md
Normal file
343
README.md
Normal file
@ -0,0 +1,343 @@
|
||||
# Agate
|
||||
|
||||
Agate is a Go library for creating, managing, and sharing snapshots of directories. It provides functionality for creating incremental snapshots, storing them efficiently, and sharing them over a network using gRPC.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get gitea.unprism.ru/KRBL/Agate
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- Create snapshots of directories
|
||||
- Incremental snapshots (only store changes)
|
||||
- Restore snapshots
|
||||
- List and manage snapshots
|
||||
- Share snapshots over a network using gRPC
|
||||
- Connect to remote snapshot repositories
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Creating a Snapshot Repository
|
||||
|
||||
To create a snapshot repository, you need to initialize the Agate library with the appropriate options:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gitea.unprism.ru/KRBL/Agate"
|
||||
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create directories for your repository
|
||||
workDir := "/path/to/your/repository"
|
||||
if err := os.MkdirAll(workDir, 0755); err != nil {
|
||||
log.Fatalf("Failed to create work directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize the default stores
|
||||
metadataStore, blobStore, err := stores.InitDefaultStores(workDir)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize stores: %v", err)
|
||||
}
|
||||
defer metadataStore.Close()
|
||||
|
||||
// Initialize Agate
|
||||
agateOptions := agate.AgateOptions{
|
||||
WorkDir: workDir,
|
||||
MetadataStore: metadataStore,
|
||||
BlobStore: blobStore,
|
||||
}
|
||||
|
||||
ag, err := agate.New(agateOptions)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize Agate: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshotID, err := ag.SaveSnapshot(ctx, "My First Snapshot", "")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
fmt.Printf("Created snapshot with ID: %s\n", snapshotID)
|
||||
|
||||
// List snapshots
|
||||
snapshots, err := ag.ListSnapshots(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list snapshots: %v", err)
|
||||
}
|
||||
fmt.Printf("Found %d snapshots:\n", len(snapshots))
|
||||
for _, s := range snapshots {
|
||||
fmt.Printf(" - %s: %s (created at %s)\n", s.ID, s.Name, s.CreationTime.Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Hosting a Snapshot Repository
|
||||
|
||||
To host a snapshot repository and make it available over the network, you can use the `StartServer` method:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"gitea.unprism.ru/KRBL/Agate"
|
||||
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create directories for your repository
|
||||
workDir := "/path/to/your/repository"
|
||||
if err := os.MkdirAll(workDir, 0755); err != nil {
|
||||
log.Fatalf("Failed to create work directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize the default stores
|
||||
metadataStore, blobStore, err := stores.InitDefaultStores(workDir)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize stores: %v", err)
|
||||
}
|
||||
defer metadataStore.Close()
|
||||
|
||||
// Initialize Agate
|
||||
agateOptions := agate.AgateOptions{
|
||||
WorkDir: workDir,
|
||||
MetadataStore: metadataStore,
|
||||
BlobStore: blobStore,
|
||||
}
|
||||
|
||||
ag, err := agate.New(agateOptions)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize Agate: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Start the gRPC server
|
||||
ctx := context.Background()
|
||||
address := "0.0.0.0:50051" // Listen on all interfaces, port 50051
|
||||
if err := ag.StartServer(ctx, address); err != nil {
|
||||
log.Fatalf("Failed to start server: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Server started on %s", address)
|
||||
|
||||
// Wait for termination signal
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigCh
|
||||
|
||||
log.Println("Shutting down...")
|
||||
}
|
||||
```
|
||||
|
||||
### Connecting to a Hosted Snapshot Repository
|
||||
|
||||
To connect to a hosted snapshot repository and retrieve snapshots:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"gitea.unprism.ru/KRBL/Agate"
|
||||
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create directories for your local repository
|
||||
workDir := "/path/to/your/local/repository"
|
||||
if err := os.MkdirAll(workDir, 0755); err != nil {
|
||||
log.Fatalf("Failed to create work directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize the default stores
|
||||
metadataStore, blobStore, err := stores.InitDefaultStores(workDir)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize stores: %v", err)
|
||||
}
|
||||
defer metadataStore.Close()
|
||||
|
||||
// Initialize Agate
|
||||
agateOptions := agate.AgateOptions{
|
||||
WorkDir: workDir,
|
||||
MetadataStore: metadataStore,
|
||||
BlobStore: blobStore,
|
||||
}
|
||||
|
||||
ag, err := agate.New(agateOptions)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize Agate: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Connect to a remote server
|
||||
ctx := context.Background()
|
||||
remoteAddress := "remote-server:50051"
|
||||
|
||||
// List snapshots from the remote server
|
||||
snapshots, err := ag.GetRemoteSnapshotList(ctx, remoteAddress)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list remote snapshots: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d remote snapshots:\n", len(snapshots))
|
||||
for _, s := range snapshots {
|
||||
fmt.Printf(" - %s: %s (created at %s)\n", s.ID, s.Name, s.CreationTime.Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
|
||||
// Download a specific snapshot
|
||||
if len(snapshots) > 0 {
|
||||
snapshotID := snapshots[0].ID
|
||||
fmt.Printf("Downloading snapshot %s...\n", snapshotID)
|
||||
|
||||
// Download the snapshot (pass empty string as localParentID if this is the first download)
|
||||
if err := ag.GetRemoteSnapshot(ctx, remoteAddress, snapshotID, ""); err != nil {
|
||||
log.Fatalf("Failed to download snapshot: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully downloaded snapshot %s\n", snapshotID)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Registering a Local Snapshot
|
||||
|
||||
You can register a local snapshot from an existing archive file with a specified UUID:
|
||||
|
||||
```go
|
||||
// Register a local snapshot from an archive file
|
||||
archivePath := "/path/to/your/archive.zip"
|
||||
snapshotID := "custom-uuid-for-snapshot"
|
||||
snapshotName := "My Local Snapshot"
|
||||
|
||||
if err := ag.RegisterLocalSnapshot(ctx, archivePath, snapshotID, snapshotName); err != nil {
|
||||
log.Fatalf("Failed to register local snapshot: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Downloading Only Snapshot Metadata
|
||||
|
||||
You can download only the metadata of a snapshot from a remote server without downloading the actual files:
|
||||
|
||||
```go
|
||||
// Download only the metadata of a snapshot from a remote server
|
||||
remoteAddress := "remote-server:50051"
|
||||
snapshotID := "snapshot-id-to-download"
|
||||
|
||||
if err := ag.GetRemoteSnapshotMetadata(ctx, remoteAddress, snapshotID); err != nil {
|
||||
log.Fatalf("Failed to download snapshot metadata: %v", err)
|
||||
}
|
||||
|
||||
// If you have a local blob but missing metadata, you can restore the metadata
|
||||
// by passing an empty address
|
||||
if err := ag.GetRemoteSnapshotMetadata(ctx, "", snapshotID); err != nil {
|
||||
log.Fatalf("Failed to restore snapshot metadata: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Creating Incremental Snapshots
|
||||
|
||||
You can create incremental snapshots by specifying a parent snapshot ID:
|
||||
|
||||
```go
|
||||
// Create a first snapshot
|
||||
snapshotID1, err := ag.SaveSnapshot(ctx, "First Snapshot", "")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create first snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Make some changes to your files...
|
||||
|
||||
// Create a second snapshot with the first one as parent
|
||||
snapshotID2, err := ag.SaveSnapshot(ctx, "Second Snapshot", snapshotID1)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create second snapshot: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Restoring a Snapshot
|
||||
|
||||
To restore a snapshot:
|
||||
|
||||
```go
|
||||
if err := ag.RestoreSnapshot(ctx, snapshotID); err != nil {
|
||||
log.Fatalf("Failed to restore snapshot: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Getting Snapshot Details
|
||||
|
||||
To get detailed information about a snapshot:
|
||||
|
||||
```go
|
||||
snapshot, err := ag.GetSnapshotDetails(ctx, snapshotID)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get snapshot details: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Snapshot: %s\n", snapshot.Name)
|
||||
fmt.Printf("Created: %s\n", snapshot.CreationTime.Format("2006-01-02 15:04:05"))
|
||||
fmt.Printf("Files: %d\n", len(snapshot.Files))
|
||||
```
|
||||
|
||||
### Deleting a Snapshot
|
||||
|
||||
To delete a snapshot:
|
||||
|
||||
```go
|
||||
if err := ag.DeleteSnapshot(ctx, snapshotID); err != nil {
|
||||
log.Fatalf("Failed to delete snapshot: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### Agate
|
||||
|
||||
The main entry point for the library.
|
||||
|
||||
- `New(options AgateOptions) (*Agate, error)` - Create a new Agate instance
|
||||
- `SaveSnapshot(ctx context.Context, name string, parentID string) (string, error)` - Create a new snapshot
|
||||
- `RestoreSnapshot(ctx context.Context, snapshotID string) error` - Restore a snapshot
|
||||
- `ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error)` - List all snapshots
|
||||
- `GetSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error)` - Get details of a snapshot
|
||||
- `DeleteSnapshot(ctx context.Context, snapshotID string) error` - Delete a snapshot
|
||||
- `StartServer(ctx context.Context, address string) error` - Start a gRPC server to share snapshots
|
||||
- `ConnectRemote(address string) (*grpc.SnapshotClient, error)` - Connect to a remote server
|
||||
- `GetRemoteSnapshotList(ctx context.Context, address string) ([]store.SnapshotInfo, error)` - List snapshots from a remote server
|
||||
- `GetRemoteSnapshot(ctx context.Context, address string, snapshotID string, localParentID string) error` - Download a snapshot from a remote server
|
||||
- `RegisterLocalSnapshot(ctx context.Context, archivePath string, snapshotID string, name string) error` - Register a local snapshot from an archive path with a specified UUID
|
||||
- `GetRemoteSnapshotMetadata(ctx context.Context, address string, snapshotID string) error` - Download only the metadata of a snapshot from a remote server
|
||||
|
||||
### AgateOptions
|
||||
|
||||
Configuration options for the Agate library.
|
||||
|
||||
- `WorkDir string` - Directory where snapshots will be stored and managed
|
||||
- `OpenFunc func(dir string) error` - Called after a snapshot is restored
|
||||
- `CloseFunc func() error` - Called before a snapshot is created or restored
|
||||
- `MetadataStore store.MetadataStore` - Implementation of the metadata store
|
||||
- `BlobStore store.BlobStore` - Implementation of the blob store
|
330
api.go
330
api.go
@ -4,10 +4,17 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"gitea.unprism.ru/KRBL/Agate/archive"
|
||||
"gitea.unprism.ru/KRBL/Agate/interfaces"
|
||||
"gitea.unprism.ru/KRBL/Agate/remote"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"unprism.ru/KRBL/agate/store"
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||
)
|
||||
|
||||
// AgateOptions defines configuration options for the Agate library.
|
||||
@ -23,22 +30,33 @@ type AgateOptions struct {
|
||||
CloseFunc func() error
|
||||
|
||||
// MetadataStore is the implementation of the metadata store to use.
|
||||
// Use the stores package to initialize the default implementation:
|
||||
// If nil, a default SQLite-based metadata store will be created automatically.
|
||||
// Use the stores package to initialize a custom implementation:
|
||||
// metadataStore, err := stores.NewDefaultMetadataStore(metadataDir)
|
||||
MetadataStore store.MetadataStore
|
||||
|
||||
// BlobStore is the implementation of the blob store to use.
|
||||
// Use the stores package to initialize the default implementation:
|
||||
// If nil, a default filesystem-based blob store will be created automatically.
|
||||
// Use the stores package to initialize a custom implementation:
|
||||
// blobStore, err := stores.NewDefaultBlobStore(blobsDir)
|
||||
BlobStore store.BlobStore
|
||||
|
||||
// CleanOnRestore specifies whether the target directory should be cleaned before restoring a snapshot.
|
||||
CleanOnRestore bool
|
||||
|
||||
// Logger is the logger to use for output. If nil, logging is disabled.
|
||||
Logger *log.Logger
|
||||
}
|
||||
|
||||
// Agate is the main entry point for the snapshot library.
|
||||
type Agate struct {
|
||||
manager SnapshotManager
|
||||
options AgateOptions
|
||||
metadataDir string
|
||||
blobsDir string
|
||||
mutex sync.Mutex
|
||||
manager interfaces.SnapshotManager
|
||||
options AgateOptions
|
||||
metadataDir string
|
||||
blobsDir string
|
||||
currentSnapshotID string
|
||||
currentIDFile string
|
||||
}
|
||||
|
||||
// New initializes a new instance of the Agate library with the given options.
|
||||
@ -47,6 +65,11 @@ func New(options AgateOptions) (*Agate, error) {
|
||||
return nil, errors.New("work directory cannot be empty")
|
||||
}
|
||||
|
||||
// Initialize logger if not provided
|
||||
if options.Logger == nil {
|
||||
options.Logger = log.New(io.Discard, "", 0)
|
||||
}
|
||||
|
||||
// Create the work directory if it doesn't exist
|
||||
if err := os.MkdirAll(options.WorkDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create work directory: %w", err)
|
||||
@ -69,38 +92,96 @@ func New(options AgateOptions) (*Agate, error) {
|
||||
var err error
|
||||
|
||||
// Use provided stores or initialize default ones
|
||||
if options.MetadataStore != nil {
|
||||
if options.MetadataStore != nil && options.BlobStore != nil {
|
||||
// Use the provided stores
|
||||
metadataStore = options.MetadataStore
|
||||
} else {
|
||||
// For default implementation, the user needs to initialize and provide the stores
|
||||
return nil, errors.New("metadata store must be provided")
|
||||
}
|
||||
|
||||
if options.BlobStore != nil {
|
||||
blobStore = options.BlobStore
|
||||
} else if options.MetadataStore == nil && options.BlobStore == nil {
|
||||
// Initialize both stores with default implementations
|
||||
metadataStore, blobStore, err = stores.InitDefaultStores(options.WorkDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize default stores: %w", err)
|
||||
}
|
||||
// Update options with the created stores
|
||||
options.MetadataStore = metadataStore
|
||||
options.BlobStore = blobStore
|
||||
} else if options.MetadataStore == nil {
|
||||
// Initialize only the metadata store
|
||||
metadataStore, err = stores.NewDefaultMetadataStore(metadataDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize default metadata store: %w", err)
|
||||
}
|
||||
blobStore = options.BlobStore
|
||||
// Update options with the created metadata store
|
||||
options.MetadataStore = metadataStore
|
||||
} else {
|
||||
// For default implementation, the user needs to initialize and provide the stores
|
||||
return nil, errors.New("blob store must be provided")
|
||||
// Initialize only the blob store
|
||||
blobStore, err = stores.NewDefaultBlobStore(blobsDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize default blob store: %w", err)
|
||||
}
|
||||
metadataStore = options.MetadataStore
|
||||
// Update options with the created blob store
|
||||
options.BlobStore = blobStore
|
||||
}
|
||||
|
||||
// Create the snapshot manager
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore)
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, options.Logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create snapshot manager: %w", err)
|
||||
}
|
||||
|
||||
return &Agate{
|
||||
manager: manager,
|
||||
options: options,
|
||||
metadataDir: metadataDir,
|
||||
blobsDir: blobsDir,
|
||||
}, nil
|
||||
// Create a file path for storing the current snapshot ID
|
||||
currentIDFile := filepath.Join(options.WorkDir, "current_snapshot_id")
|
||||
|
||||
agate := &Agate{
|
||||
manager: manager,
|
||||
options: options,
|
||||
metadataDir: metadataDir,
|
||||
blobsDir: blobsDir,
|
||||
currentIDFile: currentIDFile,
|
||||
}
|
||||
|
||||
// Load the current snapshot ID if it exists
|
||||
if _, err := os.Stat(currentIDFile); err == nil {
|
||||
data, err := os.ReadFile(currentIDFile)
|
||||
if err == nil && len(data) > 0 {
|
||||
agate.currentSnapshotID = string(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Call OpenFunc if provided to initialize resources in the active directory
|
||||
if options.OpenFunc != nil {
|
||||
if err := options.OpenFunc(blobStore.GetActiveDir()); err != nil {
|
||||
return nil, fmt.Errorf("failed to open resources during initialization: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return agate, nil
|
||||
}
|
||||
|
||||
// SaveSnapshot creates a new snapshot from the current state of the work directory.
|
||||
func (a *Agate) GetActiveDir() string {
|
||||
return a.options.BlobStore.GetActiveDir()
|
||||
}
|
||||
|
||||
func (a *Agate) GetMetadataDir() string {
|
||||
return a.metadataDir
|
||||
}
|
||||
|
||||
func (a *Agate) GetBlobsDir() string {
|
||||
return a.blobsDir
|
||||
}
|
||||
|
||||
// SaveSnapshot creates a new snapshot from the current state of the active directory.
|
||||
// If parentID is provided, it will be set as the parent of the new snapshot.
|
||||
// If parentID is empty, it will use the ID of the snapshot currently loaded in the active directory.
|
||||
// Returns the ID of the created snapshot.
|
||||
func (a *Agate) SaveSnapshot(ctx context.Context, name string, parentID string) (string, error) {
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
|
||||
a.options.Logger.Printf("Creating new snapshot with name: %s", name)
|
||||
|
||||
// Call CloseFunc if provided
|
||||
if a.options.CloseFunc != nil {
|
||||
if err := a.options.CloseFunc(); err != nil {
|
||||
@ -108,24 +189,48 @@ func (a *Agate) SaveSnapshot(ctx context.Context, name string, parentID string)
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if a.options.OpenFunc != nil {
|
||||
if err := a.options.OpenFunc(a.options.BlobStore.GetActiveDir()); err != nil {
|
||||
a.options.Logger.Printf("ERROR: failed to open resources after snapshot creation: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// If parentID is not provided, use the current snapshot ID
|
||||
if parentID == "" {
|
||||
parentID = a.currentSnapshotID
|
||||
}
|
||||
|
||||
effectiveParentID := parentID
|
||||
|
||||
// Create the snapshot
|
||||
snapshot, err := a.manager.CreateSnapshot(ctx, a.options.WorkDir, name, parentID)
|
||||
snapshot, err := a.manager.CreateSnapshot(ctx, a.options.BlobStore.GetActiveDir(), name, effectiveParentID)
|
||||
if err != nil {
|
||||
a.options.Logger.Printf("ERROR: failed to create snapshot: %v", err)
|
||||
return "", fmt.Errorf("failed to create snapshot: %w", err)
|
||||
}
|
||||
|
||||
// Call OpenFunc if provided
|
||||
if a.options.OpenFunc != nil {
|
||||
if err := a.options.OpenFunc(a.options.WorkDir); err != nil {
|
||||
return "", fmt.Errorf("failed to open resources after snapshot: %w", err)
|
||||
}
|
||||
a.options.Logger.Printf("Successfully created snapshot with ID: %s", snapshot.ID)
|
||||
|
||||
// Update the current snapshot ID to the newly created snapshot
|
||||
a.currentSnapshotID = snapshot.ID
|
||||
|
||||
// Save the current snapshot ID to a file
|
||||
if err := a.saveCurrentSnapshotID(); err != nil {
|
||||
return "", fmt.Errorf("failed to save current snapshot ID: %w", err)
|
||||
}
|
||||
|
||||
return snapshot.ID, nil
|
||||
}
|
||||
|
||||
// RestoreSnapshot extracts a snapshot to the work directory.
|
||||
// RestoreSnapshot extracts a snapshot to the active directory.
|
||||
func (a *Agate) RestoreSnapshot(ctx context.Context, snapshotID string) error {
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
|
||||
a.options.Logger.Printf("Restoring snapshot with ID: %s", snapshotID)
|
||||
|
||||
// Call CloseFunc if provided
|
||||
if a.options.CloseFunc != nil {
|
||||
if err := a.options.CloseFunc(); err != nil {
|
||||
@ -134,13 +239,25 @@ func (a *Agate) RestoreSnapshot(ctx context.Context, snapshotID string) error {
|
||||
}
|
||||
|
||||
// Extract the snapshot
|
||||
if err := a.manager.ExtractSnapshot(ctx, snapshotID, a.options.WorkDir); err != nil {
|
||||
if err := a.manager.ExtractSnapshot(ctx, snapshotID, a.options.BlobStore.GetActiveDir(), a.options.CleanOnRestore); err != nil {
|
||||
a.options.Logger.Printf("ERROR: failed to extract snapshot: %v", err)
|
||||
return fmt.Errorf("failed to extract snapshot: %w", err)
|
||||
}
|
||||
|
||||
a.options.Logger.Printf("Successfully restored snapshot with ID: %s", snapshotID)
|
||||
|
||||
// Save the ID of the snapshot that was restored
|
||||
a.currentSnapshotID = snapshotID
|
||||
|
||||
// Save the current snapshot ID to a file
|
||||
if err := a.saveCurrentSnapshotID(); err != nil {
|
||||
return fmt.Errorf("failed to save current snapshot ID: %w", err)
|
||||
}
|
||||
|
||||
// Call OpenFunc if provided
|
||||
if a.options.OpenFunc != nil {
|
||||
if err := a.options.OpenFunc(a.options.WorkDir); err != nil {
|
||||
if err := a.options.OpenFunc(a.options.BlobStore.GetActiveDir()); err != nil {
|
||||
a.options.Logger.Printf("ERROR: failed to open resources after restore: %v", err)
|
||||
return fmt.Errorf("failed to open resources after restore: %w", err)
|
||||
}
|
||||
}
|
||||
@ -148,9 +265,49 @@ func (a *Agate) RestoreSnapshot(ctx context.Context, snapshotID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestoreSnapshot extracts a snapshot to the directory.
|
||||
func (a *Agate) RestoreSnapshotToDir(ctx context.Context, snapshotID string, dir string) error {
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
|
||||
// Call CloseFunc if provided
|
||||
if a.options.CloseFunc != nil {
|
||||
if err := a.options.CloseFunc(); err != nil {
|
||||
return fmt.Errorf("failed to close resources before restore: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if a.options.OpenFunc != nil {
|
||||
if err := a.options.OpenFunc(dir); err != nil {
|
||||
a.options.Logger.Printf("ERROR: failed to open resources after snapshot restore: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Extract the snapshot
|
||||
if err := a.manager.ExtractSnapshot(ctx, snapshotID, dir, a.options.CleanOnRestore); err != nil {
|
||||
return fmt.Errorf("failed to extract snapshot: %w", err)
|
||||
}
|
||||
|
||||
// If restoring to the active directory, save the snapshot ID
|
||||
if dir == a.options.BlobStore.GetActiveDir() {
|
||||
a.currentSnapshotID = snapshotID
|
||||
|
||||
// Save the current snapshot ID to a file
|
||||
if err := a.saveCurrentSnapshotID(); err != nil {
|
||||
return fmt.Errorf("failed to save current snapshot ID: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListSnapshots returns a list of all available snapshots.
|
||||
func (a *Agate) ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error) {
|
||||
return a.manager.ListSnapshots(ctx)
|
||||
// Create empty ListOptions since we don't have filtering/pagination in this API yet
|
||||
opts := store.ListOptions{}
|
||||
return a.manager.ListSnapshots(ctx, opts)
|
||||
}
|
||||
|
||||
// GetSnapshotDetails returns detailed information about a specific snapshot.
|
||||
@ -163,15 +320,112 @@ func (a *Agate) DeleteSnapshot(ctx context.Context, snapshotID string) error {
|
||||
return a.manager.DeleteSnapshot(ctx, snapshotID)
|
||||
}
|
||||
|
||||
// saveCurrentSnapshotID saves the current snapshot ID to a file in the WorkDir
|
||||
func (a *Agate) saveCurrentSnapshotID() error {
|
||||
if a.currentSnapshotID == "" {
|
||||
// If there's no current snapshot ID, remove the file if it exists
|
||||
if _, err := os.Stat(a.currentIDFile); err == nil {
|
||||
return os.Remove(a.currentIDFile)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write the current snapshot ID to the file
|
||||
return os.WriteFile(a.currentIDFile, []byte(a.currentSnapshotID), 0644)
|
||||
}
|
||||
|
||||
func (a *Agate) Open() error {
|
||||
return a.options.OpenFunc(a.GetActiveDir())
|
||||
}
|
||||
|
||||
// Close releases all resources used by the Agate instance.
|
||||
func (a *Agate) Close() error {
|
||||
// Currently, we don't have a way to close the manager directly
|
||||
// This would be a good addition in the future
|
||||
if a.options.CloseFunc != nil {
|
||||
return a.options.CloseFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartServer starts a gRPC server to share snapshots.
|
||||
// This is a placeholder for future implementation.
|
||||
func (a *Agate) StartServer(ctx context.Context, address string) error {
|
||||
return errors.New("server functionality not implemented yet")
|
||||
// Использование нового remote.Server
|
||||
server := remote.NewServer(a.manager)
|
||||
return server.Start(ctx, address)
|
||||
}
|
||||
|
||||
// GetRemoteSnapshot downloads a snapshot from a remote server, using an efficient differential update.
|
||||
func (a *Agate) GetRemoteSnapshot(ctx context.Context, address string, snapshotID string, localParentID string) error {
|
||||
client, err := remote.NewClient(address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
remoteSnapshot, err := client.FetchSnapshotDetails(ctx, snapshotID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get remote snapshot details: %w", err)
|
||||
}
|
||||
|
||||
// 1. Подготовка
|
||||
tempDownloadDir := filepath.Join(a.options.WorkDir, "temp_download")
|
||||
if err := os.MkdirAll(tempDownloadDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create temp download dir: %w", err)
|
||||
}
|
||||
diffArchivePath := filepath.Join(tempDownloadDir, snapshotID+"_diff.zip")
|
||||
diffPartPath := diffArchivePath + ".part"
|
||||
|
||||
// 2. Скачивание дельты с докачкой
|
||||
a.options.Logger.Printf("Downloading diff for snapshot %s from parent %s", snapshotID, localParentID)
|
||||
if err := client.DownloadSnapshotDiff(ctx, snapshotID, localParentID, diffPartPath); err != nil {
|
||||
return fmt.Errorf("failed to download snapshot diff: %w", err)
|
||||
}
|
||||
if err := os.Rename(diffPartPath, diffArchivePath); err != nil {
|
||||
return fmt.Errorf("failed to finalize downloaded diff: %w", err)
|
||||
}
|
||||
defer os.Remove(diffArchivePath)
|
||||
|
||||
// 3. Атомарное применение
|
||||
// Создаем новую директорию для снапшота
|
||||
newSnapshotDir := filepath.Join(tempDownloadDir, "new_content_"+snapshotID)
|
||||
if err := os.MkdirAll(newSnapshotDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create new snapshot directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(newSnapshotDir)
|
||||
|
||||
// Если есть родитель, извлекаем его содержимое
|
||||
if localParentID != "" {
|
||||
if err := a.manager.ExtractSnapshot(ctx, localParentID, newSnapshotDir, false); err != nil {
|
||||
a.options.Logger.Printf("Warning: failed to extract local parent snapshot %s: %v", localParentID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Распаковываем дельта-архив поверх
|
||||
if err := extractArchive(diffArchivePath, newSnapshotDir); err != nil {
|
||||
return fmt.Errorf("failed to extract diff archive: %w", err)
|
||||
}
|
||||
|
||||
// 4. Создаем финальный архив и регистрируем снапшот
|
||||
finalArchivePath := filepath.Join(tempDownloadDir, snapshotID+".zip")
|
||||
if err := archive.CreateArchive(newSnapshotDir, finalArchivePath); err != nil {
|
||||
return fmt.Errorf("failed to create final snapshot archive: %w", err)
|
||||
}
|
||||
defer os.Remove(finalArchivePath)
|
||||
|
||||
finalArchiveFile, err := os.Open(finalArchivePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open final archive: %w", err)
|
||||
}
|
||||
defer finalArchiveFile.Close()
|
||||
|
||||
if _, err := a.options.BlobStore.StoreBlob(ctx, snapshotID, finalArchiveFile); err != nil {
|
||||
return fmt.Errorf("failed to store final blob: %w", err)
|
||||
}
|
||||
|
||||
if err := a.options.MetadataStore.SaveSnapshotMetadata(ctx, *remoteSnapshot); err != nil {
|
||||
a.options.BlobStore.DeleteBlob(ctx, snapshotID) // Откат
|
||||
return fmt.Errorf("failed to save snapshot metadata: %w", err)
|
||||
}
|
||||
|
||||
a.options.Logger.Printf("Successfully imported remote snapshot %s", snapshotID)
|
||||
return nil
|
||||
}
|
||||
|
374
api_test.go
Normal file
374
api_test.go
Normal file
@ -0,0 +1,374 @@
|
||||
package agate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// setupTestAPI creates a temporary directory and initializes an Agate instance
|
||||
func setupTestAPI(t *testing.T) (*Agate, string, func()) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
|
||||
// Create a data directory
|
||||
dataDir := filepath.Join(tempDir)
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Create test files
|
||||
createAPITestFiles(t, filepath.Join(dataDir, "blobs", "active"))
|
||||
|
||||
// Create Agate options
|
||||
options := AgateOptions{
|
||||
WorkDir: dataDir,
|
||||
OpenFunc: func(dir string) error {
|
||||
return nil
|
||||
},
|
||||
CloseFunc: func() error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create Agate instance
|
||||
ag, err := New(options)
|
||||
if err != nil {
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||
}
|
||||
|
||||
// Return a cleanup function
|
||||
cleanup := func() {
|
||||
ag.Close()
|
||||
os.RemoveAll(tempDir)
|
||||
}
|
||||
|
||||
return ag, tempDir, cleanup
|
||||
}
|
||||
|
||||
// createAPITestFiles creates test files in the specified directory
|
||||
func createAPITestFiles(t *testing.T, dir string) {
|
||||
// Create a subdirectory
|
||||
subDir := filepath.Join(dir, "subdir")
|
||||
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||
}
|
||||
|
||||
// Create some test files
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(dir, "file1.txt"): "This is file 1",
|
||||
filepath.Join(dir, "file2.txt"): "This is file 2",
|
||||
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||
}
|
||||
|
||||
for path, content := range testFiles {
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewAgate(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a data directory
|
||||
dataDir := filepath.Join(tempDir, "data")
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Create Agate options
|
||||
options := AgateOptions{
|
||||
WorkDir: dataDir,
|
||||
OpenFunc: func(dir string) error {
|
||||
return nil
|
||||
},
|
||||
CloseFunc: func() error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create Agate instance
|
||||
ag, err := New(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Check that the Agate instance was created successfully
|
||||
if ag == nil {
|
||||
t.Fatalf("Agate instance is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveAndRestoreSnapshot(t *testing.T) {
|
||||
ag, _, cleanup := setupTestAPI(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshotID, err := ag.SaveSnapshot(ctx, "Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Check that the snapshot was created with the correct name
|
||||
snapshot, err := ag.GetSnapshotDetails(ctx, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get snapshot details: %v", err)
|
||||
}
|
||||
if snapshot.Name != "Test Snapshot" {
|
||||
t.Errorf("Snapshot has wrong name: got %s, want %s", snapshot.Name, "Test Snapshot")
|
||||
}
|
||||
|
||||
// Modify a file
|
||||
dataDir := ag.options.BlobStore.GetActiveDir()
|
||||
if err := os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("Modified file 1"), 0644); err != nil {
|
||||
t.Fatalf("Failed to modify test file: %v", err)
|
||||
}
|
||||
|
||||
// Restore the snapshot
|
||||
err = ag.RestoreSnapshot(ctx, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to restore snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Check that the file was restored
|
||||
content, err := os.ReadFile(filepath.Join(dataDir, "file1.txt"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read restored file: %v", err)
|
||||
}
|
||||
if string(content) != "This is file 1" {
|
||||
t.Errorf("File content was not restored: got %s, want %s", string(content), "This is file 1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestoreSnapshotToDir(t *testing.T) {
|
||||
ag, tempDir, cleanup := setupTestAPI(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshotID, err := ag.SaveSnapshot(ctx, "Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Create a target directory
|
||||
targetDir := filepath.Join(tempDir, "target")
|
||||
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create target directory: %v", err)
|
||||
}
|
||||
|
||||
// Restore the snapshot to the target directory
|
||||
err = ag.RestoreSnapshotToDir(ctx, snapshotID, targetDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to restore snapshot to directory: %v", err)
|
||||
}
|
||||
|
||||
// Check that the files were restored
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(targetDir, "file1.txt"): "This is file 1",
|
||||
filepath.Join(targetDir, "file2.txt"): "This is file 2",
|
||||
filepath.Join(targetDir, "subdir/subfile1.txt"): "This is subfile 1",
|
||||
filepath.Join(targetDir, "subdir/subfile2.txt"): "This is subfile 2",
|
||||
}
|
||||
|
||||
for path, expectedContent := range testFiles {
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read restored file %s: %v", path, err)
|
||||
}
|
||||
if string(content) != expectedContent {
|
||||
t.Errorf("Restored file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAPIListSnapshots(t *testing.T) {
|
||||
ag, _, cleanup := setupTestAPI(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create multiple snapshots
|
||||
ctx := context.Background()
|
||||
snapshotID1, err := ag.SaveSnapshot(ctx, "Snapshot 1", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Modify a file
|
||||
dataDir := ag.options.WorkDir
|
||||
if err := os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("Modified file 1"), 0644); err != nil {
|
||||
t.Fatalf("Failed to modify test file: %v", err)
|
||||
}
|
||||
|
||||
snapshotID2, err := ag.SaveSnapshot(ctx, "Snapshot 2", snapshotID1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// List the snapshots
|
||||
snapshots, err := ag.ListSnapshots(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots: %v", err)
|
||||
}
|
||||
|
||||
// Check that both snapshots are listed
|
||||
if len(snapshots) != 2 {
|
||||
t.Errorf("Wrong number of snapshots listed: got %d, want %d", len(snapshots), 2)
|
||||
}
|
||||
|
||||
// Check that the snapshots have the correct information
|
||||
for _, snap := range snapshots {
|
||||
if snap.ID == snapshotID1 {
|
||||
if snap.Name != "Snapshot 1" {
|
||||
t.Errorf("Snapshot 1 has wrong name: got %s, want %s", snap.Name, "Snapshot 1")
|
||||
}
|
||||
if snap.ParentID != "" {
|
||||
t.Errorf("Snapshot 1 has wrong parent ID: got %s, want %s", snap.ParentID, "")
|
||||
}
|
||||
} else if snap.ID == snapshotID2 {
|
||||
if snap.Name != "Snapshot 2" {
|
||||
t.Errorf("Snapshot 2 has wrong name: got %s, want %s", snap.Name, "Snapshot 2")
|
||||
}
|
||||
if snap.ParentID != snapshotID1 {
|
||||
t.Errorf("Snapshot 2 has wrong parent ID: got %s, want %s", snap.ParentID, snapshotID1)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Unexpected snapshot ID: %s", snap.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgate_Logging(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a data directory
|
||||
dataDir := filepath.Join(tempDir, "data")
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Create test files in the active directory
|
||||
activeDir := filepath.Join(dataDir, "blobs", "active")
|
||||
if err := os.MkdirAll(activeDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create active directory: %v", err)
|
||||
}
|
||||
createAPITestFiles(t, activeDir)
|
||||
|
||||
// Create a buffer to capture log output
|
||||
var logBuffer bytes.Buffer
|
||||
logger := log.New(&logBuffer, "", 0)
|
||||
|
||||
// Create Agate options with the logger
|
||||
options := AgateOptions{
|
||||
WorkDir: dataDir,
|
||||
OpenFunc: func(dir string) error {
|
||||
return nil
|
||||
},
|
||||
CloseFunc: func() error {
|
||||
return nil
|
||||
},
|
||||
Logger: logger,
|
||||
}
|
||||
|
||||
// Create Agate instance
|
||||
ag, err := New(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Perform operations that should generate logs
|
||||
ctx := context.Background()
|
||||
|
||||
// Save a snapshot
|
||||
snapshotID, err := ag.SaveSnapshot(ctx, "Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Restore the snapshot
|
||||
err = ag.RestoreSnapshot(ctx, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to restore snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Check that logs were generated
|
||||
logs := logBuffer.String()
|
||||
if logs == "" {
|
||||
t.Errorf("No logs were generated")
|
||||
}
|
||||
|
||||
// Check for expected log messages
|
||||
expectedLogMessages := []string{
|
||||
"Creating new snapshot",
|
||||
"Restoring snapshot",
|
||||
}
|
||||
|
||||
for _, msg := range expectedLogMessages {
|
||||
if !strings.Contains(logs, msg) {
|
||||
t.Errorf("Expected log message '%s' not found in logs", msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note: This test is a placeholder for when the ListSnapshots method is updated to accept ListOptions.
|
||||
// Currently, the ListSnapshots method in api.go doesn't accept ListOptions, so we can't test that functionality directly.
|
||||
// The test for ListOptions functionality is covered in TestListSnapshotsMetadata_WithOptions in store/sqlite/sqlite_test.go.
|
||||
func TestAgate_ListSnapshotsWithOptions(t *testing.T) {
|
||||
t.Skip("Skipping test as ListSnapshots in api.go doesn't yet support ListOptions")
|
||||
}
|
||||
|
||||
func TestAPIDeleteSnapshot(t *testing.T) {
|
||||
ag, _, cleanup := setupTestAPI(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshotID, err := ag.SaveSnapshot(ctx, "Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Delete the snapshot
|
||||
err = ag.DeleteSnapshot(ctx, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Try to get the deleted snapshot
|
||||
_, err = ag.GetSnapshotDetails(ctx, snapshotID)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when getting deleted snapshot, got nil")
|
||||
}
|
||||
|
||||
// List snapshots to confirm it's gone
|
||||
snapshots, err := ag.ListSnapshots(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots: %v", err)
|
||||
}
|
||||
if len(snapshots) != 0 {
|
||||
t.Errorf("Expected 0 snapshots after deletion, got %d", len(snapshots))
|
||||
}
|
||||
}
|
236
archive/archive_test.go
Normal file
236
archive/archive_test.go
Normal file
@ -0,0 +1,236 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCreateArchive(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a source directory with some files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
|
||||
// Create a subdirectory
|
||||
subDir := filepath.Join(sourceDir, "subdir")
|
||||
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||
}
|
||||
|
||||
// Create some test files
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(sourceDir, "file1.txt"): "This is file 1",
|
||||
filepath.Join(sourceDir, "file2.txt"): "This is file 2",
|
||||
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||
}
|
||||
|
||||
for path, content := range testFiles {
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the archive
|
||||
archivePath := filepath.Join(tempDir, "archive.zip")
|
||||
err = CreateArchive(sourceDir, archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create archive: %v", err)
|
||||
}
|
||||
|
||||
// Check that the archive file was created
|
||||
if _, err := os.Stat(archivePath); os.IsNotExist(err) {
|
||||
t.Fatalf("Archive file was not created")
|
||||
}
|
||||
|
||||
// Test creating archive with non-existent source directory
|
||||
err = CreateArchive(filepath.Join(tempDir, "nonexistent"), archivePath)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when creating archive from non-existent directory, got nil")
|
||||
}
|
||||
|
||||
// Test creating archive with a file as source
|
||||
fileSourcePath := filepath.Join(tempDir, "file_source.txt")
|
||||
if err := os.WriteFile(fileSourcePath, []byte("This is a file"), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file: %v", err)
|
||||
}
|
||||
err = CreateArchive(fileSourcePath, archivePath)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when creating archive from a file, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestListArchiveContents(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a source directory with some files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
|
||||
// Create a subdirectory
|
||||
subDir := filepath.Join(sourceDir, "subdir")
|
||||
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||
}
|
||||
|
||||
// Create some test files
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(sourceDir, "file1.txt"): "This is file 1",
|
||||
filepath.Join(sourceDir, "file2.txt"): "This is file 2",
|
||||
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||
}
|
||||
|
||||
for path, content := range testFiles {
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the archive
|
||||
archivePath := filepath.Join(tempDir, "archive.zip")
|
||||
err = CreateArchive(sourceDir, archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create archive: %v", err)
|
||||
}
|
||||
|
||||
// List the archive contents
|
||||
entries, err := ListArchiveContents(archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list archive contents: %v", err)
|
||||
}
|
||||
|
||||
// Check that all files and directories are listed
|
||||
expectedEntries := map[string]bool{
|
||||
"file1.txt": false,
|
||||
"file2.txt": false,
|
||||
"subdir/": true,
|
||||
"subdir/subfile1.txt": false,
|
||||
"subdir/subfile2.txt": false,
|
||||
}
|
||||
|
||||
if len(entries) != len(expectedEntries) {
|
||||
t.Errorf("Wrong number of entries: got %d, want %d", len(entries), len(expectedEntries))
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
isDir, exists := expectedEntries[entry.Path]
|
||||
if !exists {
|
||||
t.Errorf("Unexpected entry in archive: %s", entry.Path)
|
||||
continue
|
||||
}
|
||||
if entry.IsDir != isDir {
|
||||
t.Errorf("Entry %s has wrong IsDir value: got %v, want %v", entry.Path, entry.IsDir, isDir)
|
||||
}
|
||||
}
|
||||
|
||||
// Test listing contents of non-existent archive
|
||||
_, err = ListArchiveContents(filepath.Join(tempDir, "nonexistent.zip"))
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when listing contents of non-existent archive, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractFileFromArchive(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a source directory with some files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
|
||||
// Create a subdirectory
|
||||
subDir := filepath.Join(sourceDir, "subdir")
|
||||
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||
}
|
||||
|
||||
// Create some test files
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(sourceDir, "file1.txt"): "This is file 1",
|
||||
filepath.Join(sourceDir, "file2.txt"): "This is file 2",
|
||||
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||
}
|
||||
|
||||
for path, content := range testFiles {
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the archive
|
||||
archivePath := filepath.Join(tempDir, "archive.zip")
|
||||
err = CreateArchive(sourceDir, archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create archive: %v", err)
|
||||
}
|
||||
|
||||
// Extract a file from the archive
|
||||
var buf bytes.Buffer
|
||||
err = ExtractFileFromArchive(archivePath, "file1.txt", &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to extract file from archive: %v", err)
|
||||
}
|
||||
|
||||
// Check that the extracted content matches the original
|
||||
if buf.String() != "This is file 1" {
|
||||
t.Errorf("Extracted content does not match: got %s, want %s", buf.String(), "This is file 1")
|
||||
}
|
||||
|
||||
// Extract a file from a subdirectory
|
||||
buf.Reset()
|
||||
err = ExtractFileFromArchive(archivePath, "subdir/subfile1.txt", &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to extract file from archive: %v", err)
|
||||
}
|
||||
|
||||
// Check that the extracted content matches the original
|
||||
if buf.String() != "This is subfile 1" {
|
||||
t.Errorf("Extracted content does not match: got %s, want %s", buf.String(), "This is subfile 1")
|
||||
}
|
||||
|
||||
// Try to extract a non-existent file
|
||||
buf.Reset()
|
||||
err = ExtractFileFromArchive(archivePath, "nonexistent.txt", &buf)
|
||||
if err != ErrFileNotFoundInArchive {
|
||||
t.Fatalf("Expected ErrFileNotFoundInArchive when extracting non-existent file, got: %v", err)
|
||||
}
|
||||
|
||||
// Try to extract a directory
|
||||
buf.Reset()
|
||||
err = ExtractFileFromArchive(archivePath, "subdir/", &buf)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when extracting a directory, got nil")
|
||||
}
|
||||
|
||||
// Try to extract from a non-existent archive
|
||||
buf.Reset()
|
||||
err = ExtractFileFromArchive(filepath.Join(tempDir, "nonexistent.zip"), "file1.txt", &buf)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when extracting from non-existent archive, got nil")
|
||||
}
|
||||
}
|
BIN
basic_usage
BIN
basic_usage
Binary file not shown.
@ -7,8 +7,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"unprism.ru/KRBL/agate"
|
||||
"unprism.ru/KRBL/agate/stores"
|
||||
"gitea.unprism.ru/KRBL/Agate"
|
||||
"gitea.unprism.ru/KRBL/Agate/stores"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
330
functional_test.go
Normal file
330
functional_test.go
Normal file
@ -0,0 +1,330 @@
|
||||
package agate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestFullWorkflow tests a complete workflow of creating snapshots, modifying files,
|
||||
// creating more snapshots, and restoring snapshots.
|
||||
func TestFullWorkflow(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create Agate options
|
||||
options := AgateOptions{
|
||||
WorkDir: tempDir,
|
||||
CleanOnRestore: true,
|
||||
}
|
||||
|
||||
// Create Agate instance
|
||||
ag, err := New(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Create a data directory
|
||||
dataDir := ag.options.BlobStore.GetActiveDir()
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Create initial test files
|
||||
initialFiles := map[string]string{
|
||||
filepath.Join(dataDir, "file1.txt"): "Initial content of file 1",
|
||||
filepath.Join(dataDir, "file2.txt"): "Initial content of file 2",
|
||||
filepath.Join(dataDir, "subdir", "file3.txt"): "Initial content of file 3",
|
||||
}
|
||||
|
||||
// Create subdirectory
|
||||
if err := os.MkdirAll(filepath.Join(dataDir, "subdir"), 0755); err != nil {
|
||||
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||
}
|
||||
|
||||
// Create the files
|
||||
for path, content := range initialFiles {
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 1: Create the first snapshot
|
||||
ctx := context.Background()
|
||||
snapshot1ID, err := ag.SaveSnapshot(ctx, "Snapshot 1", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create first snapshot: %v", err)
|
||||
}
|
||||
t.Logf("Created first snapshot with ID: %s", snapshot1ID)
|
||||
|
||||
// Step 2: Modify some files and add a new file
|
||||
modifiedFiles := map[string]string{
|
||||
filepath.Join(dataDir, "file1.txt"): "Modified content of file 1",
|
||||
filepath.Join(dataDir, "file4.txt"): "Content of new file 4",
|
||||
}
|
||||
|
||||
for path, content := range modifiedFiles {
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("Failed to modify/create test file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Create the second snapshot
|
||||
snapshot2ID, err := ag.SaveSnapshot(ctx, "Snapshot 2", snapshot1ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create second snapshot: %v", err)
|
||||
}
|
||||
t.Logf("Created second snapshot with ID: %s", snapshot2ID)
|
||||
|
||||
// Step 4: Delete a file and modify another
|
||||
if err := os.Remove(filepath.Join(dataDir, "file2.txt")); err != nil {
|
||||
t.Fatalf("Failed to delete test file: %v", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filepath.Join(dataDir, "subdir/file3.txt"), []byte("Modified content of file 3"), 0644); err != nil {
|
||||
t.Fatalf("Failed to modify test file: %v", err)
|
||||
}
|
||||
|
||||
// Step 5: Create the third snapshot
|
||||
snapshot3ID, err := ag.SaveSnapshot(ctx, "Snapshot 3", snapshot2ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create third snapshot: %v", err)
|
||||
}
|
||||
t.Logf("Created third snapshot with ID: %s", snapshot3ID)
|
||||
|
||||
// Step 6: List all snapshots
|
||||
snapshots, err := ag.ListSnapshots(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots: %v", err)
|
||||
}
|
||||
|
||||
if len(snapshots) != 3 {
|
||||
t.Errorf("Expected 3 snapshots, got %d", len(snapshots))
|
||||
}
|
||||
|
||||
// Step 7: Restore the first snapshot
|
||||
err = ag.RestoreSnapshot(ctx, snapshot1ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to restore first snapshot: %v", err)
|
||||
}
|
||||
t.Logf("Restored first snapshot")
|
||||
|
||||
// Step 8: Verify the restored files match the initial state
|
||||
for path, expectedContent := range initialFiles {
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read restored file %s: %v", path, err)
|
||||
}
|
||||
if string(content) != expectedContent {
|
||||
t.Errorf("Restored file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||
} else {
|
||||
t.Logf("SUCCESS: Restored file %s has correct content after restoring first snapshot", path)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that file4.txt doesn't exist
|
||||
file4Path := filepath.Join(dataDir, "file4.txt")
|
||||
_, err = os.Stat(file4Path)
|
||||
if err == nil {
|
||||
t.Errorf("File4.txt should not exist after restoring first snapshot")
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("Unexpected error checking if File4.txt exists: %v", err)
|
||||
} else {
|
||||
t.Logf("SUCCESS: File4.txt correctly does not exist after restoring first snapshot")
|
||||
}
|
||||
|
||||
// Step 9: Restore the third snapshot
|
||||
err = ag.RestoreSnapshot(ctx, snapshot3ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to restore third snapshot: %v", err)
|
||||
}
|
||||
t.Logf("Restored third snapshot")
|
||||
|
||||
// Step 10: Verify the restored files match the final state
|
||||
expectedFiles := map[string]string{
|
||||
filepath.Join(dataDir, "file1.txt"): "Modified content of file 1",
|
||||
filepath.Join(dataDir, "file4.txt"): "Content of new file 4",
|
||||
filepath.Join(dataDir, "subdir/file3.txt"): "Modified content of file 3",
|
||||
}
|
||||
|
||||
for path, expectedContent := range expectedFiles {
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read restored file %s: %v", path, err)
|
||||
}
|
||||
if string(content) != expectedContent {
|
||||
t.Errorf("Restored file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||
} else {
|
||||
t.Logf("SUCCESS: Restored file %s has correct content after restoring third snapshot", path)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that file2.txt doesn't exist
|
||||
file2Path := filepath.Join(dataDir, "file2.txt")
|
||||
_, err = os.Stat(file2Path)
|
||||
if err == nil {
|
||||
t.Errorf("File2.txt should not exist after restoring third snapshot")
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("Unexpected error checking if File2.txt exists: %v", err)
|
||||
} else {
|
||||
t.Logf("SUCCESS: File2.txt correctly does not exist after restoring third snapshot")
|
||||
}
|
||||
|
||||
// Step 11: Delete a snapshot
|
||||
err = ag.DeleteSnapshot(ctx, snapshot2ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete snapshot: %v", err)
|
||||
}
|
||||
t.Logf("Deleted second snapshot")
|
||||
|
||||
// Step 12: Verify the snapshot was deleted
|
||||
snapshots, err = ag.ListSnapshots(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots: %v", err)
|
||||
}
|
||||
|
||||
// Debug output
|
||||
t.Logf("After deletion, found %d snapshots:", len(snapshots))
|
||||
for i, snap := range snapshots {
|
||||
t.Logf(" Snapshot %d: ID=%s, Name=%s, ParentID=%s", i+1, snap.ID, snap.Name, snap.ParentID)
|
||||
}
|
||||
|
||||
// Get detailed information about snapshot 3
|
||||
snapshot3, err := ag.GetSnapshotDetails(ctx, snapshot3ID)
|
||||
if err != nil {
|
||||
t.Logf("Failed to get snapshot 3 details: %v", err)
|
||||
} else {
|
||||
t.Logf("Snapshot 3 details: ID=%s, Name=%s, ParentID=%s", snapshot3.ID, snapshot3.Name, snapshot3.ParentID)
|
||||
}
|
||||
|
||||
// Verify that snapshot 3's parent ID has been updated to point to snapshot 1
|
||||
if snapshot3 != nil && snapshot3.ParentID != snapshot1ID {
|
||||
t.Errorf("Snapshot 3's parent ID should be updated to point to Snapshot 1 after Snapshot 2 is deleted. Got ParentID=%s, want ParentID=%s", snapshot3.ParentID, snapshot1ID)
|
||||
} else {
|
||||
t.Logf("SUCCESS: Snapshot 3's parent ID has been correctly updated to point to Snapshot 1: %s", snapshot3.ParentID)
|
||||
}
|
||||
|
||||
if len(snapshots) != 2 {
|
||||
t.Errorf("Expected 2 snapshots after deletion, got %d", len(snapshots))
|
||||
} else {
|
||||
t.Logf("SUCCESS: Found correct number of snapshots after deletion: %d", len(snapshots))
|
||||
}
|
||||
|
||||
foundDeletedSnapshot := false
|
||||
for _, snap := range snapshots {
|
||||
if snap.ID == snapshot2ID {
|
||||
foundDeletedSnapshot = true
|
||||
t.Errorf("Snapshot 2 (ID=%s) should have been deleted", snapshot2ID)
|
||||
}
|
||||
}
|
||||
if !foundDeletedSnapshot {
|
||||
t.Logf("SUCCESS: Snapshot 2 (ID=%s) was correctly deleted", snapshot2ID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLargeFiles tests creating and restoring snapshots with large files
|
||||
func TestLargeFiles(t *testing.T) {
|
||||
// Skip this test in short mode
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping large file test in short mode")
|
||||
}
|
||||
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create Agate options
|
||||
options := AgateOptions{
|
||||
WorkDir: tempDir,
|
||||
CleanOnRestore: true,
|
||||
OpenFunc: func(dir string) error {
|
||||
return nil
|
||||
},
|
||||
CloseFunc: func() error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create Agate instance
|
||||
ag, err := New(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Create a data directory
|
||||
dataDir := ag.options.BlobStore.GetActiveDir()
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Create a large file (10 MB)
|
||||
largeFilePath := filepath.Join(dataDir, "large_file.bin")
|
||||
largeFileSize := 10 * 1024 * 1024 // 10 MB
|
||||
largeFile, err := os.Create(largeFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create large test file: %v", err)
|
||||
}
|
||||
|
||||
// Fill the file with a repeating pattern
|
||||
pattern := []byte("0123456789ABCDEF")
|
||||
buffer := make([]byte, 8192) // 8 KB buffer
|
||||
for i := 0; i < len(buffer); i += len(pattern) {
|
||||
copy(buffer[i:], pattern)
|
||||
}
|
||||
|
||||
// Write the buffer multiple times to reach the desired size
|
||||
bytesWritten := 0
|
||||
for bytesWritten < largeFileSize {
|
||||
n, err := largeFile.Write(buffer)
|
||||
if err != nil {
|
||||
largeFile.Close()
|
||||
t.Fatalf("Failed to write to large test file: %v", err)
|
||||
}
|
||||
bytesWritten += n
|
||||
}
|
||||
largeFile.Close()
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
startTime := time.Now()
|
||||
snapshotID, err := ag.SaveSnapshot(ctx, "Large File Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
duration := time.Since(startTime)
|
||||
t.Logf("Created snapshot with large file in %v", duration)
|
||||
|
||||
// Modify the large file
|
||||
if err := os.WriteFile(largeFilePath, []byte("Modified content"), 0644); err != nil {
|
||||
t.Fatalf("Failed to modify large file: %v", err)
|
||||
}
|
||||
|
||||
// Restore the snapshot
|
||||
startTime = time.Now()
|
||||
err = ag.RestoreSnapshot(ctx, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to restore snapshot: %v", err)
|
||||
}
|
||||
duration = time.Since(startTime)
|
||||
t.Logf("Restored snapshot with large file in %v", duration)
|
||||
|
||||
// Verify the file size is correct
|
||||
fileInfo, err := os.Stat(largeFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to stat restored large file: %v", err)
|
||||
}
|
||||
if fileInfo.Size() != int64(largeFileSize) {
|
||||
t.Errorf("Restored large file has wrong size: got %d, want %d", fileInfo.Size(), largeFileSize)
|
||||
}
|
||||
}
|
211
go.mod
211
go.mod
@ -1,19 +1,216 @@
|
||||
module unprism.ru/KRBL/agate
|
||||
module gitea.unprism.ru/KRBL/Agate
|
||||
|
||||
go 1.24.0
|
||||
go 1.24.3
|
||||
|
||||
tool github.com/golangci/golangci-lint/v2/cmd/golangci-lint
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
|
||||
github.com/mattn/go-sqlite3 v1.14.28
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2
|
||||
google.golang.org/grpc v1.72.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
)
|
||||
|
||||
require (
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect
|
||||
4d63.com/gocheckcompilerdirectives v1.3.0 // indirect
|
||||
4d63.com/gochecknoglobals v0.2.2 // indirect
|
||||
github.com/4meepo/tagalign v1.4.2 // indirect
|
||||
github.com/Abirdcfly/dupword v0.1.3 // indirect
|
||||
github.com/Antonboom/errname v1.1.0 // indirect
|
||||
github.com/Antonboom/nilnil v1.1.0 // indirect
|
||||
github.com/Antonboom/testifylint v1.6.1 // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.1 // indirect
|
||||
github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect
|
||||
github.com/alecthomas/chroma/v2 v2.17.2 // indirect
|
||||
github.com/alecthomas/go-check-sumtype v0.3.1 // indirect
|
||||
github.com/alexkohler/nakedret/v2 v2.0.6 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
github.com/alingse/asasalint v0.0.11 // indirect
|
||||
github.com/alingse/nilnesserr v0.2.0 // indirect
|
||||
github.com/ashanbrown/forbidigo v1.6.0 // indirect
|
||||
github.com/ashanbrown/makezero v1.2.0 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bkielbasa/cyclop v1.2.3 // indirect
|
||||
github.com/blizzy78/varnamelen v0.8.0 // indirect
|
||||
github.com/bombsimon/wsl/v4 v4.7.0 // indirect
|
||||
github.com/breml/bidichk v0.3.3 // indirect
|
||||
github.com/breml/errchkjson v0.4.1 // indirect
|
||||
github.com/butuzov/ireturn v0.4.0 // indirect
|
||||
github.com/butuzov/mirror v1.3.0 // indirect
|
||||
github.com/catenacyber/perfsprint v0.9.1 // indirect
|
||||
github.com/ccojocar/zxcvbn-go v1.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charithe/durationcheck v0.0.10 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/lipgloss v1.1.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.8.0 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/chavacava/garif v0.1.0 // indirect
|
||||
github.com/ckaznocha/intrange v0.3.1 // indirect
|
||||
github.com/curioswitch/go-reassign v0.3.0 // indirect
|
||||
github.com/daixiang0/gci v0.13.6 // indirect
|
||||
github.com/dave/dst v0.27.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.5.0 // indirect
|
||||
github.com/dlclark/regexp2 v1.11.5 // indirect
|
||||
github.com/ettle/strcase v0.2.0 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/firefart/nonamedreturns v1.0.6 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/ghostiam/protogetter v0.3.15 // indirect
|
||||
github.com/go-critic/go-critic v0.13.0 // indirect
|
||||
github.com/go-toolsmith/astcast v1.1.0 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.1.0 // indirect
|
||||
github.com/go-toolsmith/astequal v1.2.0 // indirect
|
||||
github.com/go-toolsmith/astfmt v1.1.0 // indirect
|
||||
github.com/go-toolsmith/astp v1.1.0 // indirect
|
||||
github.com/go-toolsmith/strparse v1.1.0 // indirect
|
||||
github.com/go-toolsmith/typep v1.1.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect
|
||||
github.com/golangci/go-printf-func-name v0.1.0 // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect
|
||||
github.com/golangci/golangci-lint/v2 v2.1.6 // indirect
|
||||
github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 // indirect
|
||||
github.com/golangci/misspell v0.6.0 // indirect
|
||||
github.com/golangci/plugin-module-register v0.1.1 // indirect
|
||||
github.com/golangci/revgrep v0.8.0 // indirect
|
||||
github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/gordonklaus/ineffassign v0.1.0 // indirect
|
||||
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
|
||||
github.com/gostaticanalysis/comment v1.5.0 // indirect
|
||||
github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect
|
||||
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hexops/gotextdiff v1.0.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jgautheron/goconst v1.8.1 // indirect
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
|
||||
github.com/jjti/go-spancheck v0.6.4 // indirect
|
||||
github.com/julz/importas v0.2.0 // indirect
|
||||
github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect
|
||||
github.com/kisielk/errcheck v1.9.0 // indirect
|
||||
github.com/kkHAIKE/contextcheck v1.1.6 // indirect
|
||||
github.com/kulti/thelper v0.6.3 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.14 // indirect
|
||||
github.com/lasiar/canonicalheader v1.1.2 // indirect
|
||||
github.com/ldez/exptostd v0.4.3 // indirect
|
||||
github.com/ldez/gomoddirectives v0.6.1 // indirect
|
||||
github.com/ldez/grignotin v0.9.0 // indirect
|
||||
github.com/ldez/tagliatelle v0.7.1 // indirect
|
||||
github.com/ldez/usetesting v0.4.3 // indirect
|
||||
github.com/leonklingele/grouper v1.1.2 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/macabu/inamedparam v0.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/manuelarte/funcorder v0.2.1 // indirect
|
||||
github.com/maratori/testableexamples v1.0.0 // indirect
|
||||
github.com/maratori/testpackage v1.1.1 // indirect
|
||||
github.com/matoous/godox v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mgechev/revive v1.9.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moricho/tparallel v0.3.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/nakabonne/nestif v0.3.1 // indirect
|
||||
github.com/nishanths/exhaustive v0.12.0 // indirect
|
||||
github.com/nishanths/predeclared v0.2.2 // indirect
|
||||
github.com/nunnatsa/ginkgolinter v0.19.1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polyfloyd/go-errorlint v1.8.0 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.4.4 // indirect
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect
|
||||
github.com/quasilyte/gogrep v0.5.0 // indirect
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
|
||||
github.com/raeperd/recvcheck v0.2.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/ryancurrah/gomodguard v1.4.1 // indirect
|
||||
github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect
|
||||
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
|
||||
github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.22.3 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sivchari/containedctx v1.0.3 // indirect
|
||||
github.com/sonatard/noctx v0.1.0 // indirect
|
||||
github.com/sourcegraph/go-diff v0.7.0 // indirect
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/viper v1.12.0 // indirect
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
|
||||
github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/tdakkota/asciicheck v0.4.1 // indirect
|
||||
github.com/tetafro/godot v1.5.1 // indirect
|
||||
github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect
|
||||
github.com/timonwong/loggercheck v0.11.0 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.11.0 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
|
||||
github.com/ultraware/funlen v0.2.0 // indirect
|
||||
github.com/ultraware/whitespace v0.2.0 // indirect
|
||||
github.com/uudashr/gocognit v1.2.0 // indirect
|
||||
github.com/uudashr/iface v1.3.1 // indirect
|
||||
github.com/xen0n/gosmopolitan v1.3.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/yagipy/maintidx v1.0.0 // indirect
|
||||
github.com/yeya24/promlinter v0.3.0 // indirect
|
||||
github.com/ykadowak/zerologlint v0.1.5 // indirect
|
||||
gitlab.com/bosi/decorder v0.4.2 // indirect
|
||||
go-simpler.org/musttag v0.13.1 // indirect
|
||||
go-simpler.org/sloglint v0.11.0 // indirect
|
||||
go.augendre.info/fatcontext v0.8.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
honnef.co/go/tools v0.6.1 // indirect
|
||||
mvdan.cc/gofumpt v0.8.0 // indirect
|
||||
mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4 // indirect
|
||||
)
|
||||
|
@ -26,10 +26,10 @@ const (
|
||||
// Метаданные файла внутри снапшота
|
||||
type FileInfo struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Относительный путь файла внутри снапшота
|
||||
SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` // Размер файла в байтах
|
||||
Sha256Hash string `protobuf:"bytes,3,opt,name=sha256_hash,json=sha256Hash,proto3" json:"sha256_hash,omitempty"` // Хеш-сумма файла (SHA256)
|
||||
IsDir bool `protobuf:"varint,4,opt,name=is_dir,json=isDir,proto3" json:"is_dir,omitempty"` // Является ли запись директорией
|
||||
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
|
||||
SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"`
|
||||
Sha256Hash string `protobuf:"bytes,3,opt,name=sha256_hash,json=sha256Hash,proto3" json:"sha256_hash,omitempty"`
|
||||
IsDir bool `protobuf:"varint,4,opt,name=is_dir,json=isDir,proto3" json:"is_dir,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@ -95,10 +95,10 @@ func (x *FileInfo) GetIsDir() bool {
|
||||
// Краткая информация о снапшоте
|
||||
type SnapshotInfo struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // Уникальный ID снапшота (UUID)
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // Имя снапшота
|
||||
ParentId string `protobuf:"bytes,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` // ID родительского снапшота (может быть пустым)
|
||||
CreationTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` // Время создания
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
ParentId string `protobuf:"bytes,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
|
||||
CreationTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@ -164,8 +164,8 @@ func (x *SnapshotInfo) GetCreationTime() *timestamppb.Timestamp {
|
||||
// Детальная информация о снапшоте
|
||||
type SnapshotDetails struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Info *SnapshotInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` // Краткая информация
|
||||
Files []*FileInfo `protobuf:"bytes,2,rep,name=files,proto3" json:"files,omitempty"` // Список файлов в снапшоте
|
||||
Info *SnapshotInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"`
|
||||
Files []*FileInfo `protobuf:"bytes,2,rep,name=files,proto3" json:"files,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@ -214,7 +214,7 @@ func (x *SnapshotDetails) GetFiles() []*FileInfo {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Запрос на получение списка снапшотов (можно добавить фильтры/пагинацию)
|
||||
// Запрос на получение списка снапшотов
|
||||
type ListSnapshotsRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
@ -254,7 +254,7 @@ func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) {
|
||||
// Ответ со списком снапшотов
|
||||
type ListSnapshotsResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Snapshots []*SnapshotInfo `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` // string next_page_token = 2;
|
||||
Snapshots []*SnapshotInfo `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@ -299,7 +299,7 @@ func (x *ListSnapshotsResponse) GetSnapshots() []*SnapshotInfo {
|
||||
// Запрос на получение деталей снапшота
|
||||
type GetSnapshotDetailsRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` // ID нужного снапшота
|
||||
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@ -344,8 +344,8 @@ func (x *GetSnapshotDetailsRequest) GetSnapshotId() string {
|
||||
// Запрос на скачивание файла
|
||||
type DownloadFileRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` // ID снапшота
|
||||
FilePath string `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"` // Путь к файлу внутри снапшота
|
||||
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
|
||||
FilePath string `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@ -397,7 +397,7 @@ func (x *DownloadFileRequest) GetFilePath() string {
|
||||
// Ответ (часть файла) при скачивании
|
||||
type DownloadFileResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
ChunkData []byte `protobuf:"bytes,1,opt,name=chunk_data,json=chunkData,proto3" json:"chunk_data,omitempty"` // Кусочек данных файла
|
||||
ChunkData []byte `protobuf:"bytes,1,opt,name=chunk_data,json=chunkData,proto3" json:"chunk_data,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@ -439,6 +439,67 @@ func (x *DownloadFileResponse) GetChunkData() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Запрос на скачивание разницы между снапшотами
|
||||
type DownloadSnapshotDiffRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` // ID целевого снапшота
|
||||
LocalParentId string `protobuf:"bytes,2,opt,name=local_parent_id,json=localParentId,proto3" json:"local_parent_id,omitempty"` // ID снапшота, который уже есть у клиента
|
||||
Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // Смещение в байтах для докачки
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DownloadSnapshotDiffRequest) Reset() {
|
||||
*x = DownloadSnapshotDiffRequest{}
|
||||
mi := &file_snapshot_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DownloadSnapshotDiffRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DownloadSnapshotDiffRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DownloadSnapshotDiffRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_snapshot_proto_msgTypes[8]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DownloadSnapshotDiffRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DownloadSnapshotDiffRequest) Descriptor() ([]byte, []int) {
|
||||
return file_snapshot_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *DownloadSnapshotDiffRequest) GetSnapshotId() string {
|
||||
if x != nil {
|
||||
return x.SnapshotId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DownloadSnapshotDiffRequest) GetLocalParentId() string {
|
||||
if x != nil {
|
||||
return x.LocalParentId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DownloadSnapshotDiffRequest) GetOffset() int64 {
|
||||
if x != nil {
|
||||
return x.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_snapshot_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_snapshot_proto_rawDesc = "" +
|
||||
@ -472,11 +533,17 @@ const file_snapshot_proto_rawDesc = "" +
|
||||
"\tfile_path\x18\x02 \x01(\tR\bfilePath\"5\n" +
|
||||
"\x14DownloadFileResponse\x12\x1d\n" +
|
||||
"\n" +
|
||||
"chunk_data\x18\x01 \x01(\fR\tchunkData2\x8a\x03\n" +
|
||||
"chunk_data\x18\x01 \x01(\fR\tchunkData\"~\n" +
|
||||
"\x1bDownloadSnapshotDiffRequest\x12\x1f\n" +
|
||||
"\vsnapshot_id\x18\x01 \x01(\tR\n" +
|
||||
"snapshotId\x12&\n" +
|
||||
"\x0flocal_parent_id\x18\x02 \x01(\tR\rlocalParentId\x12\x16\n" +
|
||||
"\x06offset\x18\x03 \x01(\x03R\x06offset2\xf1\x03\n" +
|
||||
"\x0fSnapshotService\x12k\n" +
|
||||
"\rListSnapshots\x12 .agate.grpc.ListSnapshotsRequest\x1a!.agate.grpc.ListSnapshotsResponse\"\x15\x82\xd3\xe4\x93\x02\x0f\x12\r/v1/snapshots\x12}\n" +
|
||||
"\x12GetSnapshotDetails\x12%.agate.grpc.GetSnapshotDetailsRequest\x1a\x1b.agate.grpc.SnapshotDetails\"#\x82\xd3\xe4\x93\x02\x1d\x12\x1b/v1/snapshots/{snapshot_id}\x12\x8a\x01\n" +
|
||||
"\fDownloadFile\x12\x1f.agate.grpc.DownloadFileRequest\x1a .agate.grpc.DownloadFileResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v1/snapshots/{snapshot_id}/files/{file_path}0\x01B\x1cZ\x1aunprism.ru/KRBL/agate/grpcb\x06proto3"
|
||||
"\fDownloadFile\x12\x1f.agate.grpc.DownloadFileRequest\x1a .agate.grpc.DownloadFileResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v1/snapshots/{snapshot_id}/files/{file_path}0\x01\x12e\n" +
|
||||
"\x14DownloadSnapshotDiff\x12'.agate.grpc.DownloadSnapshotDiffRequest\x1a .agate.grpc.DownloadFileResponse\"\x000\x01B\"Z gitea.unprism.ru/KRBL/Agate/grpcb\x06proto3"
|
||||
|
||||
var (
|
||||
file_snapshot_proto_rawDescOnce sync.Once
|
||||
@ -490,31 +557,34 @@ func file_snapshot_proto_rawDescGZIP() []byte {
|
||||
return file_snapshot_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_snapshot_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
|
||||
var file_snapshot_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_snapshot_proto_goTypes = []any{
|
||||
(*FileInfo)(nil), // 0: agate.grpc.FileInfo
|
||||
(*SnapshotInfo)(nil), // 1: agate.grpc.SnapshotInfo
|
||||
(*SnapshotDetails)(nil), // 2: agate.grpc.SnapshotDetails
|
||||
(*ListSnapshotsRequest)(nil), // 3: agate.grpc.ListSnapshotsRequest
|
||||
(*ListSnapshotsResponse)(nil), // 4: agate.grpc.ListSnapshotsResponse
|
||||
(*GetSnapshotDetailsRequest)(nil), // 5: agate.grpc.GetSnapshotDetailsRequest
|
||||
(*DownloadFileRequest)(nil), // 6: agate.grpc.DownloadFileRequest
|
||||
(*DownloadFileResponse)(nil), // 7: agate.grpc.DownloadFileResponse
|
||||
(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
|
||||
(*FileInfo)(nil), // 0: agate.grpc.FileInfo
|
||||
(*SnapshotInfo)(nil), // 1: agate.grpc.SnapshotInfo
|
||||
(*SnapshotDetails)(nil), // 2: agate.grpc.SnapshotDetails
|
||||
(*ListSnapshotsRequest)(nil), // 3: agate.grpc.ListSnapshotsRequest
|
||||
(*ListSnapshotsResponse)(nil), // 4: agate.grpc.ListSnapshotsResponse
|
||||
(*GetSnapshotDetailsRequest)(nil), // 5: agate.grpc.GetSnapshotDetailsRequest
|
||||
(*DownloadFileRequest)(nil), // 6: agate.grpc.DownloadFileRequest
|
||||
(*DownloadFileResponse)(nil), // 7: agate.grpc.DownloadFileResponse
|
||||
(*DownloadSnapshotDiffRequest)(nil), // 8: agate.grpc.DownloadSnapshotDiffRequest
|
||||
(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
|
||||
}
|
||||
var file_snapshot_proto_depIdxs = []int32{
|
||||
8, // 0: agate.grpc.SnapshotInfo.creation_time:type_name -> google.protobuf.Timestamp
|
||||
9, // 0: agate.grpc.SnapshotInfo.creation_time:type_name -> google.protobuf.Timestamp
|
||||
1, // 1: agate.grpc.SnapshotDetails.info:type_name -> agate.grpc.SnapshotInfo
|
||||
0, // 2: agate.grpc.SnapshotDetails.files:type_name -> agate.grpc.FileInfo
|
||||
1, // 3: agate.grpc.ListSnapshotsResponse.snapshots:type_name -> agate.grpc.SnapshotInfo
|
||||
3, // 4: agate.grpc.SnapshotService.ListSnapshots:input_type -> agate.grpc.ListSnapshotsRequest
|
||||
5, // 5: agate.grpc.SnapshotService.GetSnapshotDetails:input_type -> agate.grpc.GetSnapshotDetailsRequest
|
||||
6, // 6: agate.grpc.SnapshotService.DownloadFile:input_type -> agate.grpc.DownloadFileRequest
|
||||
4, // 7: agate.grpc.SnapshotService.ListSnapshots:output_type -> agate.grpc.ListSnapshotsResponse
|
||||
2, // 8: agate.grpc.SnapshotService.GetSnapshotDetails:output_type -> agate.grpc.SnapshotDetails
|
||||
7, // 9: agate.grpc.SnapshotService.DownloadFile:output_type -> agate.grpc.DownloadFileResponse
|
||||
7, // [7:10] is the sub-list for method output_type
|
||||
4, // [4:7] is the sub-list for method input_type
|
||||
8, // 7: agate.grpc.SnapshotService.DownloadSnapshotDiff:input_type -> agate.grpc.DownloadSnapshotDiffRequest
|
||||
4, // 8: agate.grpc.SnapshotService.ListSnapshots:output_type -> agate.grpc.ListSnapshotsResponse
|
||||
2, // 9: agate.grpc.SnapshotService.GetSnapshotDetails:output_type -> agate.grpc.SnapshotDetails
|
||||
7, // 10: agate.grpc.SnapshotService.DownloadFile:output_type -> agate.grpc.DownloadFileResponse
|
||||
7, // 11: agate.grpc.SnapshotService.DownloadSnapshotDiff:output_type -> agate.grpc.DownloadFileResponse
|
||||
8, // [8:12] is the sub-list for method output_type
|
||||
4, // [4:8] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
@ -531,7 +601,7 @@ func file_snapshot_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_snapshot_proto_rawDesc), len(file_snapshot_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 8,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
|
@ -3,9 +3,9 @@ syntax = "proto3";
|
||||
package agate.grpc;
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/api/annotations.proto"; // Добавлено для HTTP mapping
|
||||
import "google/api/annotations.proto";
|
||||
|
||||
option go_package = "unprism.ru/KRBL/agate/grpc";
|
||||
option go_package = "gitea.unprism.ru/KRBL/Agate/grpc";
|
||||
|
||||
// Сервис для управления снапшотами
|
||||
service SnapshotService {
|
||||
@ -30,77 +30,59 @@ service SnapshotService {
|
||||
};
|
||||
}
|
||||
|
||||
// --- Методы для управления (опционально, можно не включать в публичный API клиента) ---
|
||||
// Создать новый снапшот из директории (если серверу позволено инициировать)
|
||||
// rpc CreateSnapshot(CreateSnapshotRequest) returns (Snapshot);
|
||||
// Удалить снапшот (если требуется)
|
||||
// rpc DeleteSnapshot(DeleteSnapshotRequest) returns (DeleteSnapshotResponse);
|
||||
// Скачать архив, содержащий только разницу между двумя снапшотами
|
||||
rpc DownloadSnapshotDiff(DownloadSnapshotDiffRequest) returns (stream DownloadFileResponse) {}
|
||||
}
|
||||
|
||||
// Метаданные файла внутри снапшота
|
||||
message FileInfo {
|
||||
string path = 1; // Относительный путь файла внутри снапшота
|
||||
int64 size_bytes = 2; // Размер файла в байтах
|
||||
string sha256_hash = 3; // Хеш-сумма файла (SHA256)
|
||||
bool is_dir = 4; // Является ли запись директорией
|
||||
string path = 1;
|
||||
int64 size_bytes = 2;
|
||||
string sha256_hash = 3;
|
||||
bool is_dir = 4;
|
||||
}
|
||||
|
||||
// Краткая информация о снапшоте
|
||||
message SnapshotInfo {
|
||||
string id = 1; // Уникальный ID снапшота (UUID)
|
||||
string name = 2; // Имя снапшота
|
||||
string parent_id = 3; // ID родительского снапшота (может быть пустым)
|
||||
google.protobuf.Timestamp creation_time = 4; // Время создания
|
||||
string id = 1;
|
||||
string name = 2;
|
||||
string parent_id = 3;
|
||||
google.protobuf.Timestamp creation_time = 4;
|
||||
}
|
||||
|
||||
// Детальная информация о снапшоте
|
||||
message SnapshotDetails {
|
||||
SnapshotInfo info = 1; // Краткая информация
|
||||
repeated FileInfo files = 2; // Список файлов в снапшоте
|
||||
SnapshotInfo info = 1;
|
||||
repeated FileInfo files = 2;
|
||||
}
|
||||
|
||||
// Запрос на получение списка снапшотов (можно добавить фильтры/пагинацию)
|
||||
message ListSnapshotsRequest {
|
||||
// string filter_by_name = 1;
|
||||
// int32 page_size = 2;
|
||||
// string page_token = 3;
|
||||
}
|
||||
// Запрос на получение списка снапшотов
|
||||
message ListSnapshotsRequest {}
|
||||
|
||||
// Ответ со списком снапшотов
|
||||
message ListSnapshotsResponse {
|
||||
repeated SnapshotInfo snapshots = 1;
|
||||
// string next_page_token = 2;
|
||||
}
|
||||
|
||||
// Запрос на получение деталей снапшота
|
||||
message GetSnapshotDetailsRequest {
|
||||
string snapshot_id = 1; // ID нужного снапшота
|
||||
string snapshot_id = 1;
|
||||
}
|
||||
|
||||
// Запрос на скачивание файла
|
||||
message DownloadFileRequest {
|
||||
string snapshot_id = 1; // ID снапшота
|
||||
string file_path = 2; // Путь к файлу внутри снапшота
|
||||
string snapshot_id = 1;
|
||||
string file_path = 2;
|
||||
}
|
||||
|
||||
// Ответ (часть файла) при скачивании
|
||||
message DownloadFileResponse {
|
||||
bytes chunk_data = 1; // Кусочек данных файла
|
||||
bytes chunk_data = 1;
|
||||
}
|
||||
|
||||
// --- Сообщения для опциональных методов управления ---
|
||||
/*
|
||||
message CreateSnapshotRequest {
|
||||
string source_path = 1; // Путь к директории на сервере
|
||||
string name = 2;
|
||||
string parent_id = 3; // Опционально
|
||||
// Запрос на скачивание разницы между снапшотами
|
||||
message DownloadSnapshotDiffRequest {
|
||||
string snapshot_id = 1; // ID целевого снапшота
|
||||
string local_parent_id = 2; // ID снапшота, который уже есть у клиента
|
||||
int64 offset = 3; // Смещение в байтах для докачки
|
||||
}
|
||||
|
||||
message DeleteSnapshotRequest {
|
||||
string snapshot_id = 1;
|
||||
}
|
||||
|
||||
message DeleteSnapshotResponse {
|
||||
bool success = 1;
|
||||
}
|
||||
*/
|
@ -19,9 +19,10 @@ import (
|
||||
const _ = grpc.SupportPackageIsVersion9
|
||||
|
||||
const (
|
||||
SnapshotService_ListSnapshots_FullMethodName = "/agate.grpc.SnapshotService/ListSnapshots"
|
||||
SnapshotService_GetSnapshotDetails_FullMethodName = "/agate.grpc.SnapshotService/GetSnapshotDetails"
|
||||
SnapshotService_DownloadFile_FullMethodName = "/agate.grpc.SnapshotService/DownloadFile"
|
||||
SnapshotService_ListSnapshots_FullMethodName = "/agate.grpc.SnapshotService/ListSnapshots"
|
||||
SnapshotService_GetSnapshotDetails_FullMethodName = "/agate.grpc.SnapshotService/GetSnapshotDetails"
|
||||
SnapshotService_DownloadFile_FullMethodName = "/agate.grpc.SnapshotService/DownloadFile"
|
||||
SnapshotService_DownloadSnapshotDiff_FullMethodName = "/agate.grpc.SnapshotService/DownloadSnapshotDiff"
|
||||
)
|
||||
|
||||
// SnapshotServiceClient is the client API for SnapshotService service.
|
||||
@ -36,6 +37,8 @@ type SnapshotServiceClient interface {
|
||||
GetSnapshotDetails(ctx context.Context, in *GetSnapshotDetailsRequest, opts ...grpc.CallOption) (*SnapshotDetails, error)
|
||||
// Скачать конкретный файл из снапшота (потоковая передача)
|
||||
DownloadFile(ctx context.Context, in *DownloadFileRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[DownloadFileResponse], error)
|
||||
// Скачать архив, содержащий только разницу между двумя снапшотами
|
||||
DownloadSnapshotDiff(ctx context.Context, in *DownloadSnapshotDiffRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[DownloadFileResponse], error)
|
||||
}
|
||||
|
||||
type snapshotServiceClient struct {
|
||||
@ -85,6 +88,25 @@ func (c *snapshotServiceClient) DownloadFile(ctx context.Context, in *DownloadFi
|
||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||
type SnapshotService_DownloadFileClient = grpc.ServerStreamingClient[DownloadFileResponse]
|
||||
|
||||
func (c *snapshotServiceClient) DownloadSnapshotDiff(ctx context.Context, in *DownloadSnapshotDiffRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[DownloadFileResponse], error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &SnapshotService_ServiceDesc.Streams[1], SnapshotService_DownloadSnapshotDiff_FullMethodName, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &grpc.GenericClientStream[DownloadSnapshotDiffRequest, DownloadFileResponse]{ClientStream: stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||
type SnapshotService_DownloadSnapshotDiffClient = grpc.ServerStreamingClient[DownloadFileResponse]
|
||||
|
||||
// SnapshotServiceServer is the server API for SnapshotService service.
|
||||
// All implementations must embed UnimplementedSnapshotServiceServer
|
||||
// for forward compatibility.
|
||||
@ -97,6 +119,8 @@ type SnapshotServiceServer interface {
|
||||
GetSnapshotDetails(context.Context, *GetSnapshotDetailsRequest) (*SnapshotDetails, error)
|
||||
// Скачать конкретный файл из снапшота (потоковая передача)
|
||||
DownloadFile(*DownloadFileRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error
|
||||
// Скачать архив, содержащий только разницу между двумя снапшотами
|
||||
DownloadSnapshotDiff(*DownloadSnapshotDiffRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error
|
||||
mustEmbedUnimplementedSnapshotServiceServer()
|
||||
}
|
||||
|
||||
@ -116,6 +140,9 @@ func (UnimplementedSnapshotServiceServer) GetSnapshotDetails(context.Context, *G
|
||||
func (UnimplementedSnapshotServiceServer) DownloadFile(*DownloadFileRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error {
|
||||
return status.Errorf(codes.Unimplemented, "method DownloadFile not implemented")
|
||||
}
|
||||
func (UnimplementedSnapshotServiceServer) DownloadSnapshotDiff(*DownloadSnapshotDiffRequest, grpc.ServerStreamingServer[DownloadFileResponse]) error {
|
||||
return status.Errorf(codes.Unimplemented, "method DownloadSnapshotDiff not implemented")
|
||||
}
|
||||
func (UnimplementedSnapshotServiceServer) mustEmbedUnimplementedSnapshotServiceServer() {}
|
||||
func (UnimplementedSnapshotServiceServer) testEmbeddedByValue() {}
|
||||
|
||||
@ -184,6 +211,17 @@ func _SnapshotService_DownloadFile_Handler(srv interface{}, stream grpc.ServerSt
|
||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||
type SnapshotService_DownloadFileServer = grpc.ServerStreamingServer[DownloadFileResponse]
|
||||
|
||||
func _SnapshotService_DownloadSnapshotDiff_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(DownloadSnapshotDiffRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(SnapshotServiceServer).DownloadSnapshotDiff(m, &grpc.GenericServerStream[DownloadSnapshotDiffRequest, DownloadFileResponse]{ServerStream: stream})
|
||||
}
|
||||
|
||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||
type SnapshotService_DownloadSnapshotDiffServer = grpc.ServerStreamingServer[DownloadFileResponse]
|
||||
|
||||
// SnapshotService_ServiceDesc is the grpc.ServiceDesc for SnapshotService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
@ -206,6 +244,11 @@ var SnapshotService_ServiceDesc = grpc.ServiceDesc{
|
||||
Handler: _SnapshotService_DownloadFile_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "DownloadSnapshotDiff",
|
||||
Handler: _SnapshotService_DownloadSnapshotDiff_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "snapshot.proto",
|
||||
}
|
||||
|
263
grpc_test.go
Normal file
263
grpc_test.go
Normal file
@ -0,0 +1,263 @@
|
||||
package agate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gitea.unprism.ru/KRBL/Agate/remote"
|
||||
)
|
||||
|
||||
// TestGRPCServerClient tests the interaction between a gRPC server and client.
|
||||
// It creates multiple snapshots with different content on the server,
|
||||
// connects a client to the server, downloads the latest snapshot,
|
||||
// and verifies the contents of the files.
|
||||
func TestGRPCServerClient(t *testing.T) {
|
||||
// Skip this test in short mode
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping gRPC server-client test in short mode")
|
||||
}
|
||||
|
||||
// --- Setup Server ---
|
||||
serverDir, err := os.MkdirTemp("", "agate-server-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create server temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(serverDir)
|
||||
|
||||
serverAgate, err := New(AgateOptions{WorkDir: serverDir})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create server Agate instance: %v", err)
|
||||
}
|
||||
defer serverAgate.Close()
|
||||
|
||||
dataDir := serverAgate.options.BlobStore.GetActiveDir()
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Create initial test files for the first snapshot
|
||||
if err := os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("content1"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(dataDir, "file2.txt"), []byte("content2"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
snapshot1ID, err := serverAgate.SaveSnapshot(ctx, "Snapshot 1", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create first snapshot: %v", err)
|
||||
}
|
||||
t.Logf("Created first snapshot with ID: %s", snapshot1ID)
|
||||
|
||||
// Modify content for the second snapshot
|
||||
if err := os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("modified content1"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(dataDir, "file3.txt"), []byte("new file3"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
snapshot2ID, err := serverAgate.SaveSnapshot(ctx, "Snapshot 2", snapshot1ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create second snapshot: %v", err)
|
||||
}
|
||||
t.Logf("Created second snapshot with ID: %s", snapshot2ID)
|
||||
|
||||
// Start the gRPC server
|
||||
serverAddress := "localhost:50051"
|
||||
server := remote.NewServer(serverAgate.manager)
|
||||
go func() {
|
||||
if err := server.Start(ctx, serverAddress); err != nil {
|
||||
log.Printf("Server start error: %v", err)
|
||||
}
|
||||
}()
|
||||
defer server.Stop()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// --- Setup Client ---
|
||||
clientDir, err := os.MkdirTemp("", "agate-client-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create client temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(clientDir)
|
||||
|
||||
clientAgate, err := New(AgateOptions{WorkDir: clientDir, CleanOnRestore: true})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create client Agate instance: %v", err)
|
||||
}
|
||||
defer clientAgate.Close()
|
||||
|
||||
// --- Test Scenario ---
|
||||
// 1. Client downloads the first snapshot completely
|
||||
t.Log("Client downloading Snapshot 1...")
|
||||
if err := clientAgate.GetRemoteSnapshot(ctx, serverAddress, snapshot1ID, ""); err != nil {
|
||||
t.Fatalf("Client failed to get snapshot 1: %v", err)
|
||||
}
|
||||
|
||||
// Verify content of snapshot 1
|
||||
if err := clientAgate.RestoreSnapshot(ctx, snapshot1ID); err != nil {
|
||||
t.Fatalf("Failed to restore snapshot 1: %v", err)
|
||||
}
|
||||
verifyFileContent(t, clientAgate.GetActiveDir(), "file1.txt", "content1")
|
||||
verifyFileContent(t, clientAgate.GetActiveDir(), "file2.txt", "content2")
|
||||
|
||||
// 2. Client downloads the second snapshot incrementally
|
||||
t.Log("Client downloading Snapshot 2 (incrementally)...")
|
||||
if err := clientAgate.GetRemoteSnapshot(ctx, serverAddress, snapshot2ID, snapshot1ID); err != nil {
|
||||
t.Fatalf("Client failed to get snapshot 2: %v", err)
|
||||
}
|
||||
|
||||
// Verify content of snapshot 2
|
||||
if err := clientAgate.RestoreSnapshot(ctx, snapshot2ID); err != nil {
|
||||
t.Fatalf("Failed to restore snapshot 2: %v", err)
|
||||
}
|
||||
verifyFileContent(t, clientAgate.GetActiveDir(), "file1.txt", "modified content1")
|
||||
verifyFileContent(t, clientAgate.GetActiveDir(), "file3.txt", "new file3")
|
||||
// file2.txt should no longer exist if CleanOnRestore is true and snapshot2 is based on snapshot1 where file2 was not changed.
|
||||
// But our diff logic is additive. Let's re-check the logic. The logic is: parent + diff = new. So file2 should exist.
|
||||
verifyFileContent(t, clientAgate.GetActiveDir(), "file2.txt", "content2")
|
||||
}
|
||||
|
||||
func verifyFileContent(t *testing.T, dir, filename, expectedContent string) {
|
||||
t.Helper()
|
||||
content, err := os.ReadFile(filepath.Join(dir, filename))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file %s: %v", filename, err)
|
||||
}
|
||||
if string(content) != expectedContent {
|
||||
t.Errorf("File %s has wrong content: got '%s', want '%s'", filename, string(content), expectedContent)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGRPC_GetRemoteSnapshot_FullDownload tests a full download when no parent is specified.
|
||||
func TestGRPC_GetRemoteSnapshot_FullDownload(t *testing.T) {
|
||||
// --- Setup Server ---
|
||||
serverDir, _ := os.MkdirTemp("", "agate-server-*")
|
||||
defer os.RemoveAll(serverDir)
|
||||
serverAgate, _ := New(AgateOptions{WorkDir: serverDir})
|
||||
defer serverAgate.Close()
|
||||
dataDir := serverAgate.options.BlobStore.GetActiveDir()
|
||||
os.MkdirAll(dataDir, 0755)
|
||||
os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("full download"), 0644)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
snapshotID, err := serverAgate.SaveSnapshot(ctx, "FullSnapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Start Server
|
||||
serverAddress := "localhost:50052"
|
||||
server := remote.NewServer(serverAgate.manager)
|
||||
go func() { server.Start(ctx, serverAddress) }()
|
||||
defer server.Stop()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// --- Setup Client ---
|
||||
clientDir, _ := os.MkdirTemp("", "agate-client-*")
|
||||
defer os.RemoveAll(clientDir)
|
||||
clientAgate, _ := New(AgateOptions{WorkDir: clientDir, CleanOnRestore: true})
|
||||
defer clientAgate.Close()
|
||||
|
||||
// --- Test Scenario ---
|
||||
t.Log("Client performing full download...")
|
||||
if err := clientAgate.GetRemoteSnapshot(ctx, serverAddress, snapshotID, ""); err != nil {
|
||||
t.Fatalf("Client failed to get snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Verify content
|
||||
if err := clientAgate.RestoreSnapshot(ctx, snapshotID); err != nil {
|
||||
t.Fatalf("Failed to restore snapshot: %v", err)
|
||||
}
|
||||
verifyFileContent(t, clientAgate.GetActiveDir(), "file1.txt", "full download")
|
||||
}
|
||||
|
||||
// TestGRPC_DownloadSnapshotDiff_Resumption tests the download resumption logic.
|
||||
func TestGRPC_DownloadSnapshotDiff_Resumption(t *testing.T) {
|
||||
// --- Setup Server ---
|
||||
serverDir, _ := os.MkdirTemp("", "agate-server-*")
|
||||
defer os.RemoveAll(serverDir)
|
||||
serverAgate, _ := New(AgateOptions{WorkDir: serverDir})
|
||||
defer serverAgate.Close()
|
||||
dataDir := serverAgate.options.BlobStore.GetActiveDir()
|
||||
os.MkdirAll(dataDir, 0755)
|
||||
os.WriteFile(filepath.Join(dataDir, "file1.txt"), []byte("content1"), 0644)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Snap 1
|
||||
snapshot1ID, _ := serverAgate.SaveSnapshot(ctx, "Snap1", "")
|
||||
// Snap 2 (with changes)
|
||||
os.WriteFile(filepath.Join(dataDir, "file2.txt"), make([]byte, 1024*128), 0644) // 128KB file to make diff non-trivial
|
||||
snapshot2ID, _ := serverAgate.SaveSnapshot(ctx, "Snap2", snapshot1ID)
|
||||
|
||||
// Start Server
|
||||
serverAddress := "localhost:50053"
|
||||
server := remote.NewServer(serverAgate.manager)
|
||||
go func() { server.Start(ctx, serverAddress) }()
|
||||
defer server.Stop()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// --- Setup Client ---
|
||||
clientDir, _ := os.MkdirTemp("", "agate-client-*")
|
||||
defer os.RemoveAll(clientDir)
|
||||
rClient, err := remote.NewClient(serverAddress)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create remote client: %v", err)
|
||||
}
|
||||
defer rClient.Close()
|
||||
|
||||
// --- Test Scenario ---
|
||||
// 1. Manually download first part of the diff archive
|
||||
diffPath := filepath.Join(clientDir, "diff.zip.part")
|
||||
diffReader, err := serverAgate.manager.StreamSnapshotDiff(ctx, snapshot2ID, snapshot1ID, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get diff stream from manager: %v", err)
|
||||
}
|
||||
defer diffReader.Close()
|
||||
|
||||
// Read first 64KB
|
||||
firstChunk := make([]byte, 64*1024)
|
||||
n, err := io.ReadFull(diffReader, firstChunk)
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("Failed to read first chunk: %v, read %d bytes", err, n)
|
||||
}
|
||||
if err := os.WriteFile(diffPath, firstChunk[:n], 0644); err != nil {
|
||||
t.Fatalf("Failed to write partial file: %v", err)
|
||||
}
|
||||
diffReader.Close() // Simulate connection drop
|
||||
|
||||
// 2. Resume download using the client
|
||||
t.Log("Resuming download...")
|
||||
if err := rClient.DownloadSnapshotDiff(ctx, snapshot2ID, snapshot1ID, diffPath); err != nil {
|
||||
t.Fatalf("Failed to resume download: %v", err)
|
||||
}
|
||||
|
||||
// 3. Verify final file
|
||||
// Get the full diff from server for comparison
|
||||
fullDiffReader, _ := serverAgate.manager.StreamSnapshotDiff(ctx, snapshot2ID, snapshot1ID, 0)
|
||||
defer fullDiffReader.Close()
|
||||
fullDiffData, _ := io.ReadAll(fullDiffReader)
|
||||
|
||||
resumedData, _ := os.ReadFile(diffPath)
|
||||
|
||||
if len(resumedData) != len(fullDiffData) {
|
||||
t.Errorf("Resumed file size is incorrect. Got %d, want %d", len(resumedData), len(fullDiffData))
|
||||
}
|
||||
|
||||
if sha256.Sum256(resumedData) != sha256.Sum256(fullDiffData) {
|
||||
t.Error("File content mismatch after resumption")
|
||||
}
|
||||
}
|
95
hash/hash_test.go
Normal file
95
hash/hash_test.go
Normal file
@ -0,0 +1,95 @@
|
||||
package hash
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCalculateFileHash(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a test file with known content
|
||||
testContent := "This is a test file for hashing"
|
||||
testFilePath := filepath.Join(tempDir, "test_file.txt")
|
||||
if err := os.WriteFile(testFilePath, []byte(testContent), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file: %v", err)
|
||||
}
|
||||
|
||||
// Calculate the expected hash manually
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(testContent))
|
||||
expectedHash := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
// Calculate the hash using the function
|
||||
hash, err := CalculateFileHash(testFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate file hash: %v", err)
|
||||
}
|
||||
|
||||
// Check that the hash matches the expected value
|
||||
if hash != expectedHash {
|
||||
t.Errorf("Hash does not match: got %s, want %s", hash, expectedHash)
|
||||
}
|
||||
|
||||
// Test with a non-existent file
|
||||
_, err = CalculateFileHash(filepath.Join(tempDir, "nonexistent.txt"))
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when calculating hash of non-existent file, got nil")
|
||||
}
|
||||
|
||||
// Test with a directory
|
||||
dirPath := filepath.Join(tempDir, "test_dir")
|
||||
if err := os.MkdirAll(dirPath, 0755); err != nil {
|
||||
t.Fatalf("Failed to create test directory: %v", err)
|
||||
}
|
||||
_, err = CalculateFileHash(dirPath)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when calculating hash of a directory, got nil")
|
||||
}
|
||||
|
||||
// Test with an empty file
|
||||
emptyFilePath := filepath.Join(tempDir, "empty_file.txt")
|
||||
if err := os.WriteFile(emptyFilePath, []byte{}, 0644); err != nil {
|
||||
t.Fatalf("Failed to create empty test file: %v", err)
|
||||
}
|
||||
emptyHash, err := CalculateFileHash(emptyFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash of empty file: %v", err)
|
||||
}
|
||||
// The SHA-256 hash of an empty string is e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
|
||||
expectedEmptyHash := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
if emptyHash != expectedEmptyHash {
|
||||
t.Errorf("Empty file hash does not match: got %s, want %s", emptyHash, expectedEmptyHash)
|
||||
}
|
||||
|
||||
// Test with a large file
|
||||
largeFilePath := filepath.Join(tempDir, "large_file.bin")
|
||||
largeFileSize := 1024 * 1024 // 1 MB
|
||||
largeFile, err := os.Create(largeFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create large test file: %v", err)
|
||||
}
|
||||
// Fill the file with a repeating pattern
|
||||
pattern := []byte("0123456789")
|
||||
for i := 0; i < largeFileSize/len(pattern); i++ {
|
||||
if _, err := largeFile.Write(pattern); err != nil {
|
||||
largeFile.Close()
|
||||
t.Fatalf("Failed to write to large test file: %v", err)
|
||||
}
|
||||
}
|
||||
largeFile.Close()
|
||||
|
||||
// Calculate the hash of the large file
|
||||
_, err = CalculateFileHash(largeFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash of large file: %v", err)
|
||||
}
|
||||
}
|
@ -1,9 +1,10 @@
|
||||
package agate
|
||||
package interfaces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"unprism.ru/KRBL/agate/store"
|
||||
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
)
|
||||
|
||||
// SnapshotManager is an interface that defines operations for managing and interacting with snapshots.
|
||||
@ -16,8 +17,8 @@ type SnapshotManager interface {
|
||||
// Returns a Snapshot object containing metadata
|
||||
GetSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error)
|
||||
|
||||
// ListSnapshots retrieves a list of all available snapshots, returning their basic information as SnapshotInfo.
|
||||
ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error)
|
||||
// ListSnapshots retrieves a list of available snapshots with filtering and pagination options.
|
||||
ListSnapshots(ctx context.Context, opts store.ListOptions) ([]store.SnapshotInfo, error)
|
||||
|
||||
// DeleteSnapshot removes a snapshot identified by snapshotID. Returns an error if the snapshot does not exist or cannot be deleted.
|
||||
DeleteSnapshot(ctx context.Context, snapshotID string) error
|
||||
@ -26,13 +27,20 @@ type SnapshotManager interface {
|
||||
OpenFile(ctx context.Context, snapshotID string, filePath string) (io.ReadCloser, error)
|
||||
|
||||
// ExtractSnapshot extracts the contents of a specified snapshot to a target directory at the given path.
|
||||
// If cleanTarget is true, the target directory will be cleaned before extraction.
|
||||
// Returns an error if the snapshot ID is invalid or the extraction fails.
|
||||
ExtractSnapshot(ctx context.Context, snapshotID string, path string) error
|
||||
ExtractSnapshot(ctx context.Context, snapshotID string, path string, cleanTarget bool) error
|
||||
|
||||
// UpdateSnapshotMetadata updates the metadata of an existing snapshot, allowing changes to its name.
|
||||
UpdateSnapshotMetadata(ctx context.Context, snapshotID string, newName string) error
|
||||
|
||||
// StreamSnapshotDiff creates and streams a differential archive between two snapshots.
|
||||
// It returns an io.ReadCloser for the archive stream and an error.
|
||||
// The caller is responsible for closing the reader, which will also handle cleanup of temporary resources.
|
||||
StreamSnapshotDiff(ctx context.Context, snapshotID, parentID string, offset int64) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// SnapshotServer defines the interface for a server that can share snapshots
|
||||
type SnapshotServer interface {
|
||||
// Start initializes and begins the server's operation, handling incoming requests or processes within the provided context.
|
||||
Start(ctx context.Context) error
|
||||
@ -41,13 +49,17 @@ type SnapshotServer interface {
|
||||
Stop(ctx context.Context) error
|
||||
}
|
||||
|
||||
// SnapshotClient defines the interface for a client that can connect to a server and download snapshots
|
||||
type SnapshotClient interface {
|
||||
// ListSnapshots retrieves a list of snapshots containing basic metadata, such as ID, name, parent ID, and creation time.
|
||||
// ListSnapshots retrieves a list of snapshots from the server
|
||||
ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error)
|
||||
|
||||
// FetchSnapshotDetails retrieves detailed metadata about a specific snapshot identified by snapshotID.
|
||||
// FetchSnapshotDetails retrieves detailed information about a specific snapshot
|
||||
FetchSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error)
|
||||
|
||||
// DownloadSnapshot retrieves the snapshot content for the given snapshotID and returns it as an io.ReadCloser.
|
||||
DownloadSnapshot(ctx context.Context, snapshotID string) (io.ReadCloser, error)
|
||||
// DownloadSnapshotDiff downloads a differential archive between two snapshots to a target directory
|
||||
DownloadSnapshotDiff(ctx context.Context, snapshotID, localParentID, targetPath string) error
|
||||
|
||||
// Close closes the connection to the server
|
||||
Close() error
|
||||
}
|
213
manager.go
213
manager.go
@ -2,10 +2,12 @@ package agate
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -13,22 +15,33 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"unprism.ru/KRBL/agate/archive"
|
||||
"unprism.ru/KRBL/agate/hash"
|
||||
"unprism.ru/KRBL/agate/store"
|
||||
"gitea.unprism.ru/KRBL/Agate/archive"
|
||||
"gitea.unprism.ru/KRBL/Agate/hash"
|
||||
"gitea.unprism.ru/KRBL/Agate/interfaces"
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
)
|
||||
|
||||
type SnapshotManagerData struct {
|
||||
metadataStore store.MetadataStore
|
||||
blobStore store.BlobStore
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func CreateSnapshotManager(metadataStore store.MetadataStore, blobStore store.BlobStore) (SnapshotManager, error) {
|
||||
func CreateSnapshotManager(metadataStore store.MetadataStore, blobStore store.BlobStore, logger *log.Logger) (interfaces.SnapshotManager, error) {
|
||||
if metadataStore == nil || blobStore == nil {
|
||||
return nil, errors.New("parameters can't be nil")
|
||||
}
|
||||
|
||||
return &SnapshotManagerData{metadataStore, blobStore}, nil
|
||||
// Ensure logger is never nil.
|
||||
if logger == nil {
|
||||
logger = log.New(io.Discard, "", 0)
|
||||
}
|
||||
|
||||
return &SnapshotManagerData{
|
||||
metadataStore: metadataStore,
|
||||
blobStore: blobStore,
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (data *SnapshotManagerData) CreateSnapshot(ctx context.Context, sourceDir string, name string, parentID string) (*store.Snapshot, error) {
|
||||
@ -49,22 +62,20 @@ func (data *SnapshotManagerData) CreateSnapshot(ctx context.Context, sourceDir s
|
||||
if parentID != "" {
|
||||
_, err := data.metadataStore.GetSnapshotMetadata(ctx, parentID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, ErrParentNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("failed to check parent snapshot: %w", err)
|
||||
fmt.Println("failed to check parent snapshot: %w", err)
|
||||
parentID = ""
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a unique ID for the snapshot
|
||||
snapshotID := uuid.New().String()
|
||||
|
||||
// Create a temporary file for the archive
|
||||
tempFile, err := os.CreateTemp("", "agate-snapshot-*.zip")
|
||||
// Create a temporary file for the archive in the working directory
|
||||
tempFilePath := filepath.Join(data.blobStore.GetBaseDir(), "temp-"+snapshotID+".zip")
|
||||
tempFile, err := os.Create(tempFilePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temporary file: %w", err)
|
||||
return nil, fmt.Errorf("failed to create temporary file in working directory: %w", err)
|
||||
}
|
||||
tempFilePath := tempFile.Name()
|
||||
tempFile.Close() // Close it as CreateArchive will reopen it
|
||||
defer os.Remove(tempFilePath) // Clean up temp file after we're done
|
||||
|
||||
@ -163,9 +174,9 @@ func (data *SnapshotManagerData) GetSnapshotDetails(ctx context.Context, snapsho
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
func (data *SnapshotManagerData) ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error) {
|
||||
// Retrieve list of snapshots from the metadata store
|
||||
snapshots, err := data.metadataStore.ListSnapshotsMetadata(ctx)
|
||||
func (data *SnapshotManagerData) ListSnapshots(ctx context.Context, opts store.ListOptions) ([]store.SnapshotInfo, error) {
|
||||
// Retrieve list of snapshots from the metadata store with the provided options
|
||||
snapshots, err := data.metadataStore.ListSnapshotsMetadata(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list snapshots: %w", err)
|
||||
}
|
||||
@ -178,27 +189,38 @@ func (data *SnapshotManagerData) DeleteSnapshot(ctx context.Context, snapshotID
|
||||
return errors.New("snapshot ID cannot be empty")
|
||||
}
|
||||
|
||||
// First check if the snapshot exists
|
||||
_, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||
snapshot, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
// If snapshot doesn't exist, return success (idempotent operation)
|
||||
if errors.Is(err, store.ErrNotFound) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to check if snapshot exists: %w", err)
|
||||
}
|
||||
|
||||
// Delete the metadata first
|
||||
parentID := snapshot.ParentID
|
||||
|
||||
opts := store.ListOptions{}
|
||||
allSnapshots, err := data.metadataStore.ListSnapshotsMetadata(ctx, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list snapshots: %w", err)
|
||||
}
|
||||
|
||||
for _, info := range allSnapshots {
|
||||
if info.ParentID == snapshotID {
|
||||
if err := data.metadataStore.UpdateSnapshotParentID(ctx, info.ID, parentID); err != nil {
|
||||
data.logger.Printf("WARNING: failed to update parent reference for snapshot %s: %v", info.ID, err)
|
||||
} else {
|
||||
data.logger.Printf("Updated parent reference for snapshot %s from %s to %s", info.ID, snapshotID, parentID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := data.metadataStore.DeleteSnapshotMetadata(ctx, snapshotID); err != nil {
|
||||
return fmt.Errorf("failed to delete snapshot metadata: %w", err)
|
||||
}
|
||||
|
||||
// Then delete the blob
|
||||
if err := data.blobStore.DeleteBlob(ctx, snapshotID); err != nil {
|
||||
// Note: We don't return here because we've already deleted the metadata
|
||||
// and the blob store should handle the case where the blob doesn't exist
|
||||
// Log the error instead
|
||||
fmt.Printf("Warning: failed to delete snapshot blob: %v\n", err)
|
||||
data.logger.Printf("WARNING: failed to delete snapshot blob: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -249,12 +271,14 @@ func (data *SnapshotManagerData) OpenFile(ctx context.Context, snapshotID string
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
func (data *SnapshotManagerData) ExtractSnapshot(ctx context.Context, snapshotID string, path string) error {
|
||||
func (data *SnapshotManagerData) ExtractSnapshot(ctx context.Context, snapshotID string, path string, cleanTarget bool) error {
|
||||
if snapshotID == "" {
|
||||
return errors.New("snapshot ID cannot be empty")
|
||||
}
|
||||
|
||||
// If no specific path is provided, use the active directory
|
||||
if path == "" {
|
||||
return errors.New("target path cannot be empty")
|
||||
path = data.blobStore.GetActiveDir()
|
||||
}
|
||||
|
||||
// First check if the snapshot exists
|
||||
@ -272,9 +296,20 @@ func (data *SnapshotManagerData) ExtractSnapshot(ctx context.Context, snapshotID
|
||||
return fmt.Errorf("failed to get blob path: %w", err)
|
||||
}
|
||||
|
||||
// Ensure the target directory exists
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create target directory: %w", err)
|
||||
// If cleanTarget is true, clean the target directory before extraction
|
||||
if cleanTarget {
|
||||
// Remove the directory and recreate it
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return fmt.Errorf("failed to clean target directory: %w", err)
|
||||
}
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create target directory: %w", err)
|
||||
}
|
||||
} else {
|
||||
// Just ensure the target directory exists
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create target directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the archive to the target directory
|
||||
@ -351,22 +386,130 @@ func (data *SnapshotManagerData) UpdateSnapshotMetadata(ctx context.Context, sna
|
||||
return errors.New("new name cannot be empty")
|
||||
}
|
||||
|
||||
// Get the current snapshot metadata
|
||||
snapshot, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
if errors.Is(err, store.ErrNotFound) {
|
||||
return ErrNotFound
|
||||
}
|
||||
return fmt.Errorf("failed to get snapshot metadata: %w", err)
|
||||
}
|
||||
|
||||
// Update the name
|
||||
snapshot.Name = newName
|
||||
|
||||
// Save the updated metadata
|
||||
if err := data.metadataStore.SaveSnapshotMetadata(ctx, *snapshot); err != nil {
|
||||
return fmt.Errorf("failed to update snapshot metadata: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// diffArchiveReader is a wrapper around an *os.File that handles cleanup of temporary files.
|
||||
type diffArchiveReader struct {
|
||||
*os.File
|
||||
tempArchive string
|
||||
tempStaging string
|
||||
}
|
||||
|
||||
// Close closes the file and removes the temporary archive and staging directory.
|
||||
func (r *diffArchiveReader) Close() error {
|
||||
err := r.File.Close()
|
||||
os.Remove(r.tempArchive)
|
||||
os.RemoveAll(r.tempStaging)
|
||||
return err
|
||||
}
|
||||
|
||||
func (data *SnapshotManagerData) StreamSnapshotDiff(ctx context.Context, snapshotID, parentID string, offset int64) (io.ReadCloser, error) {
|
||||
targetSnap, err := data.metadataStore.GetSnapshotMetadata(ctx, snapshotID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get target snapshot metadata: %w", err)
|
||||
}
|
||||
|
||||
parentFiles := make(map[string]string)
|
||||
if parentID != "" {
|
||||
parentSnap, err := data.metadataStore.GetSnapshotMetadata(ctx, parentID)
|
||||
if err == nil {
|
||||
for _, file := range parentSnap.Files {
|
||||
if !file.IsDir {
|
||||
parentFiles[file.Path] = file.SHA256
|
||||
}
|
||||
}
|
||||
} else {
|
||||
data.logger.Printf("Warning: failed to get parent snapshot %s, creating full diff: %v", parentID, err)
|
||||
}
|
||||
}
|
||||
|
||||
var filesToInclude []string
|
||||
for _, file := range targetSnap.Files {
|
||||
if file.IsDir {
|
||||
continue
|
||||
}
|
||||
if parentHash, ok := parentFiles[file.Path]; !ok || parentHash != file.SHA256 {
|
||||
filesToInclude = append(filesToInclude, file.Path)
|
||||
}
|
||||
}
|
||||
|
||||
if len(filesToInclude) == 0 {
|
||||
return io.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
|
||||
tempStagingDir, err := os.MkdirTemp(data.blobStore.GetBaseDir(), "diff-staging-*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp staging directory: %w", err)
|
||||
}
|
||||
|
||||
targetBlobPath, err := data.blobStore.GetBlobPath(ctx, snapshotID)
|
||||
if err != nil {
|
||||
os.RemoveAll(tempStagingDir)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, filePath := range filesToInclude {
|
||||
destPath := filepath.Join(tempStagingDir, filePath)
|
||||
if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
|
||||
os.RemoveAll(tempStagingDir)
|
||||
return nil, fmt.Errorf("failed to create dir for diff file: %w", err)
|
||||
}
|
||||
|
||||
fileWriter, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
os.RemoveAll(tempStagingDir)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = archive.ExtractFileFromArchive(targetBlobPath, filePath, fileWriter)
|
||||
fileWriter.Close()
|
||||
if err != nil {
|
||||
os.RemoveAll(tempStagingDir)
|
||||
return nil, fmt.Errorf("failed to extract file %s for diff: %w", filePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
tempArchivePath := filepath.Join(data.blobStore.GetBaseDir(), "diff-"+snapshotID+".zip")
|
||||
if err := archive.CreateArchive(tempStagingDir, tempArchivePath); err != nil {
|
||||
os.RemoveAll(tempStagingDir)
|
||||
os.Remove(tempArchivePath)
|
||||
return nil, fmt.Errorf("failed to create diff archive: %w", err)
|
||||
}
|
||||
|
||||
archiveFile, err := os.Open(tempArchivePath)
|
||||
if err != nil {
|
||||
os.RemoveAll(tempStagingDir)
|
||||
os.Remove(tempArchivePath)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if offset > 0 {
|
||||
if _, err := archiveFile.Seek(offset, io.SeekStart); err != nil {
|
||||
archiveFile.Close()
|
||||
os.RemoveAll(tempStagingDir)
|
||||
os.Remove(tempArchivePath)
|
||||
return nil, fmt.Errorf("failed to seek in diff archive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &diffArchiveReader{
|
||||
File: archiveFile,
|
||||
tempArchive: tempArchivePath,
|
||||
tempStaging: tempStagingDir,
|
||||
}, nil
|
||||
}
|
||||
|
613
manager_test.go
Normal file
613
manager_test.go
Normal file
@ -0,0 +1,613 @@
|
||||
package agate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
"gitea.unprism.ru/KRBL/Agate/store/filesystem"
|
||||
"gitea.unprism.ru/KRBL/Agate/store/sqlite"
|
||||
)
|
||||
|
||||
// setupTestEnvironment creates a temporary directory and initializes the stores
|
||||
func setupTestEnvironment(t *testing.T) (string, store.MetadataStore, store.BlobStore, func()) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
|
||||
// Create directories for metadata and blobs
|
||||
metadataDir := filepath.Join(tempDir, "metadata")
|
||||
blobsDir := filepath.Join(tempDir, "blobs")
|
||||
if err := os.MkdirAll(metadataDir, 0755); err != nil {
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to create metadata directory: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(blobsDir, 0755); err != nil {
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to create blobs directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize the stores
|
||||
dbPath := filepath.Join(metadataDir, "snapshots.db")
|
||||
metadataStore, err := sqlite.NewSQLiteStore(dbPath)
|
||||
if err != nil {
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to create metadata store: %v", err)
|
||||
}
|
||||
|
||||
blobStore, err := filesystem.NewFileSystemStore(blobsDir)
|
||||
if err != nil {
|
||||
metadataStore.Close()
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to create blob store: %v", err)
|
||||
}
|
||||
|
||||
// Return a cleanup function
|
||||
cleanup := func() {
|
||||
metadataStore.Close()
|
||||
os.RemoveAll(tempDir)
|
||||
}
|
||||
|
||||
return tempDir, metadataStore, blobStore, cleanup
|
||||
}
|
||||
|
||||
// createTestFiles creates test files in the specified directory
|
||||
func createTestFiles(t *testing.T, dir string) {
|
||||
// Create a subdirectory
|
||||
subDir := filepath.Join(dir, "subdir")
|
||||
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||
}
|
||||
|
||||
// Create some test files
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(dir, "file1.txt"): "This is file 1",
|
||||
filepath.Join(dir, "file2.txt"): "This is file 2",
|
||||
filepath.Join(subDir, "subfile1.txt"): "This is subfile 1",
|
||||
filepath.Join(subDir, "subfile2.txt"): "This is subfile 2",
|
||||
}
|
||||
|
||||
for path, content := range testFiles {
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateAndGetSnapshot(t *testing.T) {
|
||||
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a source directory with test files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
createTestFiles(t, sourceDir)
|
||||
|
||||
// Create a snapshot manager with nil logger
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||
}
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Check that the snapshot was created with the correct name
|
||||
if snapshot.Name != "Test Snapshot" {
|
||||
t.Errorf("Snapshot has wrong name: got %s, want %s", snapshot.Name, "Test Snapshot")
|
||||
}
|
||||
|
||||
// Check that the snapshot has the correct number of files
|
||||
if len(snapshot.Files) != 5 { // 4 files + 1 directory
|
||||
t.Errorf("Snapshot has wrong number of files: got %d, want %d", len(snapshot.Files), 5)
|
||||
}
|
||||
|
||||
// Get the snapshot details
|
||||
retrievedSnapshot, err := manager.GetSnapshotDetails(ctx, snapshot.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get snapshot details: %v", err)
|
||||
}
|
||||
|
||||
// Check that the retrieved snapshot matches the original
|
||||
if retrievedSnapshot.ID != snapshot.ID {
|
||||
t.Errorf("Retrieved snapshot ID does not match: got %s, want %s", retrievedSnapshot.ID, snapshot.ID)
|
||||
}
|
||||
if retrievedSnapshot.Name != snapshot.Name {
|
||||
t.Errorf("Retrieved snapshot name does not match: got %s, want %s", retrievedSnapshot.Name, snapshot.Name)
|
||||
}
|
||||
if len(retrievedSnapshot.Files) != len(snapshot.Files) {
|
||||
t.Errorf("Retrieved snapshot has wrong number of files: got %d, want %d", len(retrievedSnapshot.Files), len(snapshot.Files))
|
||||
}
|
||||
}
|
||||
|
||||
func TestListSnapshots(t *testing.T) {
|
||||
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a source directory with test files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
createTestFiles(t, sourceDir)
|
||||
|
||||
// Create a snapshot manager with nil logger
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||
}
|
||||
|
||||
// Create multiple snapshots
|
||||
ctx := context.Background()
|
||||
snapshot1, err := manager.CreateSnapshot(ctx, sourceDir, "Snapshot 1", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Modify a file
|
||||
if err := os.WriteFile(filepath.Join(sourceDir, "file1.txt"), []byte("Modified file 1"), 0644); err != nil {
|
||||
t.Fatalf("Failed to modify test file: %v", err)
|
||||
}
|
||||
|
||||
snapshot2, err := manager.CreateSnapshot(ctx, sourceDir, "Snapshot 2", snapshot1.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// List the snapshots with empty options
|
||||
snapshots, err := manager.ListSnapshots(ctx, store.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots: %v", err)
|
||||
}
|
||||
|
||||
// Check that both snapshots are listed
|
||||
if len(snapshots) != 2 {
|
||||
t.Errorf("Wrong number of snapshots listed: got %d, want %d", len(snapshots), 2)
|
||||
}
|
||||
|
||||
// Check that the snapshots have the correct information
|
||||
for _, snap := range snapshots {
|
||||
if snap.ID == snapshot1.ID {
|
||||
if snap.Name != "Snapshot 1" {
|
||||
t.Errorf("Snapshot 1 has wrong name: got %s, want %s", snap.Name, "Snapshot 1")
|
||||
}
|
||||
if snap.ParentID != "" {
|
||||
t.Errorf("Snapshot 1 has wrong parent ID: got %s, want %s", snap.ParentID, "")
|
||||
}
|
||||
} else if snap.ID == snapshot2.ID {
|
||||
if snap.Name != "Snapshot 2" {
|
||||
t.Errorf("Snapshot 2 has wrong name: got %s, want %s", snap.Name, "Snapshot 2")
|
||||
}
|
||||
if snap.ParentID != snapshot1.ID {
|
||||
t.Errorf("Snapshot 2 has wrong parent ID: got %s, want %s", snap.ParentID, snapshot1.ID)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Unexpected snapshot ID: %s", snap.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteSnapshot(t *testing.T) {
|
||||
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a source directory with test files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
createTestFiles(t, sourceDir)
|
||||
|
||||
// Create a snapshot manager with nil logger
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||
}
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Delete the snapshot
|
||||
err = manager.DeleteSnapshot(ctx, snapshot.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Try to get the deleted snapshot
|
||||
_, err = manager.GetSnapshotDetails(ctx, snapshot.ID)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when getting deleted snapshot, got nil")
|
||||
}
|
||||
|
||||
// List snapshots to confirm it's gone
|
||||
snapshots, err := manager.ListSnapshots(ctx, store.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots: %v", err)
|
||||
}
|
||||
if len(snapshots) != 0 {
|
||||
t.Errorf("Expected 0 snapshots after deletion, got %d", len(snapshots))
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenFile(t *testing.T) {
|
||||
_, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a source directory with test files
|
||||
sourceDir := filepath.Join(blobStore.GetActiveDir(), "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
createTestFiles(t, sourceDir)
|
||||
|
||||
// Create a snapshot manager with nil logger
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||
}
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Open a file from the snapshot
|
||||
fileReader, err := manager.OpenFile(ctx, snapshot.ID, "file1.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file from snapshot: %v", err)
|
||||
}
|
||||
defer fileReader.Close()
|
||||
|
||||
// Read the file content
|
||||
content, err := io.ReadAll(fileReader)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file content: %v", err)
|
||||
}
|
||||
|
||||
// Check that the content matches the original
|
||||
if string(content) != "This is file 1" {
|
||||
t.Errorf("File content does not match: got %s, want %s", string(content), "This is file 1")
|
||||
}
|
||||
|
||||
// Try to open a non-existent file
|
||||
pipe, err := manager.OpenFile(ctx, snapshot.ID, "nonexistent.txt")
|
||||
if err == nil {
|
||||
tmp := make([]byte, 1)
|
||||
_, err = pipe.Read(tmp)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when opening non-existent file, got nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractSnapshot(t *testing.T) {
|
||||
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a source directory with test files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
createTestFiles(t, sourceDir)
|
||||
|
||||
// Create a snapshot manager with nil logger
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||
}
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Create a target directory for extraction
|
||||
targetDir := filepath.Join(tempDir, "target")
|
||||
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create target directory: %v", err)
|
||||
}
|
||||
|
||||
// Extract the snapshot with default behavior (cleanTarget=false)
|
||||
err = manager.ExtractSnapshot(ctx, snapshot.ID, targetDir, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to extract snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Check that the files were extracted correctly
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(targetDir, "file1.txt"): "This is file 1",
|
||||
filepath.Join(targetDir, "file2.txt"): "This is file 2",
|
||||
filepath.Join(targetDir, "subdir/subfile1.txt"): "This is subfile 1",
|
||||
filepath.Join(targetDir, "subdir/subfile2.txt"): "This is subfile 2",
|
||||
}
|
||||
|
||||
for path, expectedContent := range testFiles {
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read extracted file %s: %v", path, err)
|
||||
}
|
||||
if string(content) != expectedContent {
|
||||
t.Errorf("Extracted file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Try to extract a non-existent snapshot
|
||||
err = manager.ExtractSnapshot(ctx, "nonexistent-id", targetDir, false)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when extracting non-existent snapshot, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
// TestExtractSnapshot_SafeRestore tests that ExtractSnapshot with cleanTarget=false
|
||||
// does not remove extra files in the target directory
|
||||
func TestExtractSnapshot_SafeRestore(t *testing.T) {
|
||||
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a source directory with test files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
createTestFiles(t, sourceDir)
|
||||
|
||||
// Create a snapshot manager with nil logger
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||
}
|
||||
|
||||
// Create a snapshot (snapshot A)
|
||||
ctx := context.Background()
|
||||
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Snapshot A", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Create a target directory and place an "extra" file in it
|
||||
targetDir := filepath.Join(tempDir, "target")
|
||||
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create target directory: %v", err)
|
||||
}
|
||||
extraFilePath := filepath.Join(targetDir, "extra.txt")
|
||||
if err := os.WriteFile(extraFilePath, []byte("This is an extra file"), 0644); err != nil {
|
||||
t.Fatalf("Failed to create extra file: %v", err)
|
||||
}
|
||||
|
||||
// Extract the snapshot with cleanTarget=false
|
||||
err = manager.ExtractSnapshot(ctx, snapshot.ID, targetDir, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to extract snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Check that all files from the snapshot were restored
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(targetDir, "file1.txt"): "This is file 1",
|
||||
filepath.Join(targetDir, "file2.txt"): "This is file 2",
|
||||
filepath.Join(targetDir, "subdir/subfile1.txt"): "This is subfile 1",
|
||||
filepath.Join(targetDir, "subdir/subfile2.txt"): "This is subfile 2",
|
||||
}
|
||||
|
||||
for path, expectedContent := range testFiles {
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read extracted file %s: %v", path, err)
|
||||
}
|
||||
if string(content) != expectedContent {
|
||||
t.Errorf("Extracted file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the extra file was NOT deleted
|
||||
if _, err := os.Stat(extraFilePath); os.IsNotExist(err) {
|
||||
t.Errorf("Extra file was deleted, but should have been preserved with cleanTarget=false")
|
||||
} else if err != nil {
|
||||
t.Fatalf("Failed to check if extra file exists: %v", err)
|
||||
} else {
|
||||
// Read the content to make sure it wasn't modified
|
||||
content, err := os.ReadFile(extraFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read extra file: %v", err)
|
||||
}
|
||||
if string(content) != "This is an extra file" {
|
||||
t.Errorf("Extra file content was modified: got %s, want %s", string(content), "This is an extra file")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestExtractSnapshot_CleanRestore tests that ExtractSnapshot with cleanTarget=true
|
||||
// completely cleans the target directory before restoration
|
||||
func TestExtractSnapshot_CleanRestore(t *testing.T) {
|
||||
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a source directory with test files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
createTestFiles(t, sourceDir)
|
||||
|
||||
// Create a snapshot manager with nil logger
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||
}
|
||||
|
||||
// Create a snapshot (snapshot A)
|
||||
ctx := context.Background()
|
||||
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Snapshot A", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Create a target directory and place an "extra" file in it
|
||||
targetDir := filepath.Join(tempDir, "target")
|
||||
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create target directory: %v", err)
|
||||
}
|
||||
extraFilePath := filepath.Join(targetDir, "extra.txt")
|
||||
if err := os.WriteFile(extraFilePath, []byte("This is an extra file"), 0644); err != nil {
|
||||
t.Fatalf("Failed to create extra file: %v", err)
|
||||
}
|
||||
|
||||
// Extract the snapshot with cleanTarget=true
|
||||
err = manager.ExtractSnapshot(ctx, snapshot.ID, targetDir, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to extract snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Check that all files from the snapshot were restored
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(targetDir, "file1.txt"): "This is file 1",
|
||||
filepath.Join(targetDir, "file2.txt"): "This is file 2",
|
||||
filepath.Join(targetDir, "subdir/subfile1.txt"): "This is subfile 1",
|
||||
filepath.Join(targetDir, "subdir/subfile2.txt"): "This is subfile 2",
|
||||
}
|
||||
|
||||
for path, expectedContent := range testFiles {
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read extracted file %s: %v", path, err)
|
||||
}
|
||||
if string(content) != expectedContent {
|
||||
t.Errorf("Extracted file %s has wrong content: got %s, want %s", path, string(content), expectedContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the extra file WAS deleted
|
||||
if _, err := os.Stat(extraFilePath); os.IsNotExist(err) {
|
||||
// This is the expected behavior
|
||||
} else if err != nil {
|
||||
t.Fatalf("Failed to check if extra file exists: %v", err)
|
||||
} else {
|
||||
t.Errorf("Extra file was not deleted, but should have been removed with cleanTarget=true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSnapshotMetadata(t *testing.T) {
|
||||
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
// Create a source directory with test files
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
createTestFiles(t, sourceDir)
|
||||
|
||||
// Create a snapshot manager with nil logger
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||
}
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshot, err := manager.CreateSnapshot(ctx, sourceDir, "Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Update the snapshot metadata
|
||||
newName := "Updated Snapshot Name"
|
||||
err = manager.UpdateSnapshotMetadata(ctx, snapshot.ID, newName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update snapshot metadata: %v", err)
|
||||
}
|
||||
|
||||
// Get the updated snapshot
|
||||
updatedSnapshot, err := manager.GetSnapshotDetails(ctx, snapshot.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get updated snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Check that the name was updated
|
||||
if updatedSnapshot.Name != newName {
|
||||
t.Errorf("Snapshot name was not updated: got %s, want %s", updatedSnapshot.Name, newName)
|
||||
}
|
||||
|
||||
// Try to update a non-existent snapshot
|
||||
err = manager.UpdateSnapshotMetadata(ctx, "nonexistent-id", "New Name")
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when updating non-existent snapshot, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamSnapshotDiff_EdgeCases(t *testing.T) {
|
||||
tempDir, metadataStore, blobStore, cleanup := setupTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
sourceDir := filepath.Join(tempDir, "source")
|
||||
os.MkdirAll(sourceDir, 0755)
|
||||
createTestFiles(t, sourceDir)
|
||||
|
||||
manager, err := CreateSnapshotManager(metadataStore, blobStore, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot manager: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
// Create two identical snapshots
|
||||
snap1, _ := manager.CreateSnapshot(ctx, sourceDir, "Snap1", "")
|
||||
snap2, _ := manager.CreateSnapshot(ctx, sourceDir, "Snap2", snap1.ID)
|
||||
|
||||
// Test 1: Diff between identical snapshots should be empty
|
||||
reader, err := manager.StreamSnapshotDiff(ctx, snap2.ID, snap1.ID, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error for identical snapshots, got %v", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
data, _ := io.ReadAll(reader)
|
||||
if len(data) != 0 {
|
||||
t.Errorf("Expected empty diff for identical snapshots, got %d bytes", len(data))
|
||||
}
|
||||
|
||||
// Test 2: Diff with a non-existent parent should be a full archive
|
||||
reader, err = manager.StreamSnapshotDiff(ctx, snap1.ID, "non-existent-parent", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error for non-existent parent, got %v", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
data, _ = io.ReadAll(reader)
|
||||
if len(data) == 0 {
|
||||
t.Error("Expected full archive for non-existent parent, got empty diff")
|
||||
}
|
||||
|
||||
// Create an empty source dir
|
||||
emptyDir := filepath.Join(tempDir, "empty_source")
|
||||
os.MkdirAll(emptyDir, 0755)
|
||||
emptySnap, _ := manager.CreateSnapshot(ctx, emptyDir, "EmptySnap", "")
|
||||
|
||||
// Test 3: Diff of an empty snapshot should be empty
|
||||
reader, err = manager.StreamSnapshotDiff(ctx, emptySnap.ID, "", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error for empty snapshot, got %v", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
data, _ = io.ReadAll(reader)
|
||||
if len(data) != 0 {
|
||||
t.Errorf("Expected empty diff for empty snapshot, got %d bytes", len(data))
|
||||
}
|
||||
}
|
353
performance_test.go
Normal file
353
performance_test.go
Normal file
@ -0,0 +1,353 @@
|
||||
package agate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BenchmarkCreateSnapshot benchmarks the performance of creating snapshots with different numbers of files
|
||||
func BenchmarkCreateSnapshot(b *testing.B) {
|
||||
// Skip in short mode
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping benchmark in short mode")
|
||||
}
|
||||
|
||||
// Test with different numbers of files
|
||||
fileCounts := []int{10, 100, 1000}
|
||||
for _, fileCount := range fileCounts {
|
||||
b.Run(fmt.Sprintf("Files-%d", fileCount), func(b *testing.B) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-bench-*")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a data directory
|
||||
dataDir := filepath.Join(tempDir, "data")
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
b.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Create test files
|
||||
createBenchmarkFiles(b, dataDir, fileCount, 1024) // 1 KB per file
|
||||
|
||||
// Create Agate options
|
||||
options := AgateOptions{
|
||||
WorkDir: dataDir,
|
||||
OpenFunc: func(dir string) error {
|
||||
return nil
|
||||
},
|
||||
CloseFunc: func() error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create Agate instance
|
||||
ag, err := New(options)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create Agate instance: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Reset the timer before the benchmark loop
|
||||
b.ResetTimer()
|
||||
|
||||
// Run the benchmark
|
||||
for i := 0; i < b.N; i++ {
|
||||
ctx := context.Background()
|
||||
_, err := ag.SaveSnapshot(ctx, fmt.Sprintf("Benchmark Snapshot %d", i), "")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkRestoreSnapshot benchmarks the performance of restoring snapshots with different numbers of files
|
||||
func BenchmarkRestoreSnapshot(b *testing.B) {
|
||||
// Skip in short mode
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping benchmark in short mode")
|
||||
}
|
||||
|
||||
// Test with different numbers of files
|
||||
fileCounts := []int{10, 100, 1000}
|
||||
for _, fileCount := range fileCounts {
|
||||
b.Run(fmt.Sprintf("Files-%d", fileCount), func(b *testing.B) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-bench-*")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a data directory
|
||||
dataDir := filepath.Join(tempDir, "data")
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
b.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Create test files
|
||||
createBenchmarkFiles(b, dataDir, fileCount, 1024) // 1 KB per file
|
||||
|
||||
// Create Agate options
|
||||
options := AgateOptions{
|
||||
WorkDir: dataDir,
|
||||
OpenFunc: func(dir string) error {
|
||||
return nil
|
||||
},
|
||||
CloseFunc: func() error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create Agate instance
|
||||
ag, err := New(options)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create Agate instance: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
snapshotID, err := ag.SaveSnapshot(ctx, "Benchmark Snapshot", "")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Modify some files
|
||||
for i := 0; i < fileCount/2; i++ {
|
||||
filePath := filepath.Join(dataDir, fmt.Sprintf("file_%d.txt", i))
|
||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("Modified content %d", i)), 0644); err != nil {
|
||||
b.Fatalf("Failed to modify file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the timer before the benchmark loop
|
||||
b.ResetTimer()
|
||||
|
||||
// Run the benchmark
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := ag.RestoreSnapshot(ctx, snapshotID)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to restore snapshot: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkLargeFiles benchmarks the performance of creating and restoring snapshots with large files
|
||||
func BenchmarkLargeFiles(b *testing.B) {
|
||||
// Skip in short mode
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping benchmark in short mode")
|
||||
}
|
||||
|
||||
// Test with different file sizes
|
||||
fileSizes := []int{1 * 1024 * 1024, 10 * 1024 * 1024, 100 * 1024 * 1024} // 1 MB, 10 MB, 100 MB
|
||||
for _, fileSize := range fileSizes {
|
||||
b.Run(fmt.Sprintf("Size-%dMB", fileSize/(1024*1024)), func(b *testing.B) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-bench-*")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a data directory
|
||||
dataDir := filepath.Join(tempDir, "data")
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
b.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Create a large file
|
||||
largeFilePath := filepath.Join(dataDir, "large_file.bin")
|
||||
createLargeFile(b, largeFilePath, fileSize)
|
||||
|
||||
// Create Agate options
|
||||
options := AgateOptions{
|
||||
WorkDir: dataDir,
|
||||
OpenFunc: func(dir string) error {
|
||||
return nil
|
||||
},
|
||||
CloseFunc: func() error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create Agate instance
|
||||
ag, err := New(options)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create Agate instance: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Create a snapshot
|
||||
ctx := context.Background()
|
||||
|
||||
// Measure snapshot creation time
|
||||
b.Run("Create", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := ag.SaveSnapshot(ctx, fmt.Sprintf("Large File Snapshot %d", i), "")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Create a snapshot for restoration benchmark
|
||||
snapshotID, err := ag.SaveSnapshot(ctx, "Large File Snapshot", "")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
// Modify the large file
|
||||
if err := os.WriteFile(largeFilePath, []byte("Modified content"), 0644); err != nil {
|
||||
b.Fatalf("Failed to modify large file: %v", err)
|
||||
}
|
||||
|
||||
// Measure snapshot restoration time
|
||||
b.Run("Restore", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := ag.RestoreSnapshot(ctx, snapshotID)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to restore snapshot: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestPerformanceMetrics runs performance tests and reports metrics
|
||||
func TestPerformanceMetrics(t *testing.T) {
|
||||
// Skip in short mode
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping performance metrics test in short mode")
|
||||
}
|
||||
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-perf-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a data directory
|
||||
dataDir := filepath.Join(tempDir, "data")
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Test with different numbers of files
|
||||
fileCounts := []int{10, 100, 1000}
|
||||
for _, fileCount := range fileCounts {
|
||||
t.Run(fmt.Sprintf("Files-%d", fileCount), func(t *testing.T) {
|
||||
// Create test files
|
||||
createBenchmarkFiles(t, dataDir, fileCount, 1024) // 1 KB per file
|
||||
|
||||
// Create Agate options
|
||||
options := AgateOptions{
|
||||
WorkDir: dataDir,
|
||||
OpenFunc: func(dir string) error {
|
||||
return nil
|
||||
},
|
||||
CloseFunc: func() error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create Agate instance
|
||||
ag, err := New(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Agate instance: %v", err)
|
||||
}
|
||||
defer ag.Close()
|
||||
|
||||
// Measure snapshot creation time
|
||||
ctx := context.Background()
|
||||
startTime := time.Now()
|
||||
snapshotID, err := ag.SaveSnapshot(ctx, "Performance Test Snapshot", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create snapshot: %v", err)
|
||||
}
|
||||
createDuration := time.Since(startTime)
|
||||
t.Logf("Created snapshot with %d files in %v (%.2f files/sec)", fileCount, createDuration, float64(fileCount)/createDuration.Seconds())
|
||||
|
||||
// Modify some files
|
||||
for i := 0; i < fileCount/2; i++ {
|
||||
filePath := filepath.Join(dataDir, fmt.Sprintf("file_%d.txt", i))
|
||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("Modified content %d", i)), 0644); err != nil {
|
||||
t.Fatalf("Failed to modify file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Measure snapshot restoration time
|
||||
startTime = time.Now()
|
||||
err = ag.RestoreSnapshot(ctx, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to restore snapshot: %v", err)
|
||||
}
|
||||
restoreDuration := time.Since(startTime)
|
||||
t.Logf("Restored snapshot with %d files in %v (%.2f files/sec)", fileCount, restoreDuration, float64(fileCount)/restoreDuration.Seconds())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create benchmark files
|
||||
func createBenchmarkFiles(tb testing.TB, dir string, count, size int) {
|
||||
tb.Helper()
|
||||
|
||||
// Create files with sequential names
|
||||
for i := 0; i < count; i++ {
|
||||
filePath := filepath.Join(dir, fmt.Sprintf("file_%d.txt", i))
|
||||
|
||||
// Create content of specified size
|
||||
content := make([]byte, size)
|
||||
for j := 0; j < size; j++ {
|
||||
content[j] = byte(j % 256)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filePath, content, 0644); err != nil {
|
||||
tb.Fatalf("Failed to create benchmark file %s: %v", filePath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create a large file
|
||||
func createLargeFile(tb testing.TB, path string, size int) {
|
||||
tb.Helper()
|
||||
|
||||
// Create the file
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
tb.Fatalf("Failed to create large file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Create a buffer with a pattern
|
||||
bufferSize := 8192 // 8 KB buffer
|
||||
buffer := make([]byte, bufferSize)
|
||||
for i := 0; i < bufferSize; i++ {
|
||||
buffer[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Write the buffer multiple times to reach the desired size
|
||||
bytesWritten := 0
|
||||
for bytesWritten < size {
|
||||
n, err := file.Write(buffer)
|
||||
if err != nil {
|
||||
tb.Fatalf("Failed to write to large file: %v", err)
|
||||
}
|
||||
bytesWritten += n
|
||||
}
|
||||
}
|
137
remote/client.go
Normal file
137
remote/client.go
Normal file
@ -0,0 +1,137 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
stdgrpc "google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
agateGrpc "gitea.unprism.ru/KRBL/Agate/grpc"
|
||||
"gitea.unprism.ru/KRBL/Agate/interfaces"
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
)
|
||||
|
||||
// Client представляет клиент для подключения к удаленному серверу снапшотов.
|
||||
type Client struct {
|
||||
conn *stdgrpc.ClientConn
|
||||
client agateGrpc.SnapshotServiceClient
|
||||
}
|
||||
|
||||
// Убедимся, что Client реализует интерфейс interfaces.SnapshotClient
|
||||
var _ interfaces.SnapshotClient = (*Client)(nil)
|
||||
|
||||
// NewClient создает нового клиента, подключенного к указанному адресу.
|
||||
func NewClient(address string) (*Client, error) {
|
||||
conn, err := stdgrpc.Dial(address, stdgrpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to server at %s: %w", address, err)
|
||||
}
|
||||
client := agateGrpc.NewSnapshotServiceClient(conn)
|
||||
return &Client{conn: conn, client: client}, nil
|
||||
}
|
||||
|
||||
// Close закрывает соединение с сервером.
|
||||
func (c *Client) Close() error {
|
||||
if c.conn != nil {
|
||||
return c.conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListSnapshots получает список снапшотов с удаленного сервера.
|
||||
func (c *Client) ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error) {
|
||||
response, err := c.client.ListSnapshots(ctx, &agateGrpc.ListSnapshotsRequest{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list snapshots: %w", err)
|
||||
}
|
||||
|
||||
snapshots := make([]store.SnapshotInfo, 0, len(response.Snapshots))
|
||||
for _, snapshot := range response.Snapshots {
|
||||
snapshots = append(snapshots, store.SnapshotInfo{
|
||||
ID: snapshot.Id,
|
||||
Name: snapshot.Name,
|
||||
ParentID: snapshot.ParentId,
|
||||
CreationTime: snapshot.CreationTime.AsTime(),
|
||||
})
|
||||
}
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
// FetchSnapshotDetails получает детальную информацию о конкретном снапшоте.
|
||||
func (c *Client) FetchSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error) {
|
||||
response, err := c.client.GetSnapshotDetails(ctx, &agateGrpc.GetSnapshotDetailsRequest{
|
||||
SnapshotId: snapshotID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get snapshot details: %w", err)
|
||||
}
|
||||
|
||||
snapshot := &store.Snapshot{
|
||||
ID: response.Info.Id,
|
||||
Name: response.Info.Name,
|
||||
ParentID: response.Info.ParentId,
|
||||
CreationTime: response.Info.CreationTime.AsTime(),
|
||||
Files: make([]store.FileInfo, 0, len(response.Files)),
|
||||
}
|
||||
|
||||
for _, file := range response.Files {
|
||||
snapshot.Files = append(snapshot.Files, store.FileInfo{
|
||||
Path: file.Path,
|
||||
Size: file.SizeBytes,
|
||||
IsDir: file.IsDir,
|
||||
SHA256: file.Sha256Hash,
|
||||
})
|
||||
}
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// DownloadSnapshotDiff скачивает архив с разницей между снапшотами.
|
||||
func (c *Client) DownloadSnapshotDiff(ctx context.Context, snapshotID, localParentID, targetPath string) error {
|
||||
var offset int64
|
||||
fileInfo, err := os.Stat(targetPath)
|
||||
if err == nil {
|
||||
offset = fileInfo.Size()
|
||||
} else if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to stat temporary file: %w", err)
|
||||
}
|
||||
|
||||
req := &agateGrpc.DownloadSnapshotDiffRequest{
|
||||
SnapshotId: snapshotID,
|
||||
LocalParentId: localParentID,
|
||||
Offset: offset,
|
||||
}
|
||||
|
||||
stream, err := c.client.DownloadSnapshotDiff(ctx, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start snapshot diff download: %w", err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {
|
||||
return fmt.Errorf("failed to create directory for %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(targetPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file %s: %w", targetPath, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error receiving diff chunk: %w", err)
|
||||
}
|
||||
if _, err := file.Write(resp.ChunkData); err != nil {
|
||||
return fmt.Errorf("error writing to file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
115
remote/remote_test.go
Normal file
115
remote/remote_test.go
Normal file
@ -0,0 +1,115 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
)
|
||||
|
||||
// TestClientConnect tests that the client can connect to a server
|
||||
func TestClientConnect(t *testing.T) {
|
||||
// Skip this test in short mode
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping remote test in short mode")
|
||||
}
|
||||
|
||||
// This test requires a running server
|
||||
// For a real test, you would need to start a server
|
||||
// Here we'll just test the client creation
|
||||
_, err := NewClient("localhost:50051")
|
||||
if err != nil {
|
||||
// It's expected that this will fail if no server is running
|
||||
t.Logf("Failed to connect to server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMockClient tests the client functionality with a mock
|
||||
func TestMockClient(t *testing.T) {
|
||||
// Create a mock client
|
||||
client := &MockClient{}
|
||||
|
||||
// Test ListSnapshots
|
||||
snapshots, err := client.ListSnapshots(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("MockClient.ListSnapshots failed: %v", err)
|
||||
}
|
||||
if len(snapshots) != 1 {
|
||||
t.Errorf("Expected 1 snapshot, got %d", len(snapshots))
|
||||
}
|
||||
|
||||
// Test FetchSnapshotDetails
|
||||
snapshot, err := client.FetchSnapshotDetails(context.Background(), "mock-snapshot-id")
|
||||
if err != nil {
|
||||
t.Fatalf("MockClient.FetchSnapshotDetails failed: %v", err)
|
||||
}
|
||||
if snapshot.ID != "mock-snapshot-id" {
|
||||
t.Errorf("Expected snapshot ID 'mock-snapshot-id', got '%s'", snapshot.ID)
|
||||
}
|
||||
|
||||
// Test DownloadSnapshot
|
||||
tempDir, err := os.MkdirTemp("", "agate-mock-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
err = client.DownloadSnapshot(context.Background(), "mock-snapshot-id", tempDir, "")
|
||||
if err != nil {
|
||||
t.Fatalf("MockClient.DownloadSnapshot failed: %v", err)
|
||||
}
|
||||
|
||||
// Check that the mock file was created
|
||||
mockFilePath := filepath.Join(tempDir, "mock-file.txt")
|
||||
if _, err := os.Stat(mockFilePath); os.IsNotExist(err) {
|
||||
t.Errorf("Mock file was not created")
|
||||
}
|
||||
}
|
||||
|
||||
// MockClient is a mock implementation of the Client for testing
|
||||
type MockClient struct{}
|
||||
|
||||
// ListSnapshots returns a mock list of snapshots
|
||||
func (m *MockClient) ListSnapshots(ctx context.Context) ([]store.SnapshotInfo, error) {
|
||||
return []store.SnapshotInfo{
|
||||
{
|
||||
ID: "mock-snapshot-id",
|
||||
Name: "Mock Snapshot",
|
||||
ParentID: "",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FetchSnapshotDetails returns mock snapshot details
|
||||
func (m *MockClient) FetchSnapshotDetails(ctx context.Context, snapshotID string) (*store.Snapshot, error) {
|
||||
return &store.Snapshot{
|
||||
ID: snapshotID,
|
||||
Name: "Mock Snapshot",
|
||||
ParentID: "",
|
||||
Files: []store.FileInfo{
|
||||
{
|
||||
Path: "mock-file.txt",
|
||||
Size: 100,
|
||||
IsDir: false,
|
||||
SHA256: "mock-hash",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DownloadSnapshot simulates downloading a snapshot
|
||||
func (m *MockClient) DownloadSnapshot(ctx context.Context, snapshotID string, targetDir string, localParentID string) error {
|
||||
// Create a mock file
|
||||
mockFilePath := filepath.Join(targetDir, "mock-file.txt")
|
||||
if err := os.MkdirAll(filepath.Dir(mockFilePath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(mockFilePath, []byte("Mock file content"), 0644)
|
||||
}
|
||||
|
||||
// Close is a no-op for the mock client
|
||||
func (m *MockClient) Close() error {
|
||||
return nil
|
||||
}
|
169
remote/server.go
Normal file
169
remote/server.go
Normal file
@ -0,0 +1,169 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
stdgrpc "google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
agateGrpc "gitea.unprism.ru/KRBL/Agate/grpc"
|
||||
"gitea.unprism.ru/KRBL/Agate/interfaces"
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
)
|
||||
|
||||
// Server реализует gRPC-сервер для снапшотов.
|
||||
type Server struct {
|
||||
agateGrpc.UnimplementedSnapshotServiceServer
|
||||
manager interfaces.SnapshotManager
|
||||
server *stdgrpc.Server
|
||||
}
|
||||
|
||||
// NewServer создает новый сервер снапшотов.
|
||||
func NewServer(manager interfaces.SnapshotManager) *Server {
|
||||
return &Server{
|
||||
manager: manager,
|
||||
}
|
||||
}
|
||||
|
||||
// Start запускает gRPC-сервер на указанном адресе.
|
||||
func (s *Server) Start(ctx context.Context, address string) error {
|
||||
lis, err := net.Listen("tcp", address)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen on %s: %w", address, err)
|
||||
}
|
||||
|
||||
s.server = stdgrpc.NewServer()
|
||||
agateGrpc.RegisterSnapshotServiceServer(s.server, s)
|
||||
|
||||
go func() {
|
||||
if err := s.server.Serve(lis); err != nil {
|
||||
fmt.Printf("Server error: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
fmt.Printf("Server started on %s\n", address)
|
||||
|
||||
// Ждем отмены контекста для остановки сервера
|
||||
<-ctx.Done()
|
||||
s.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop изящно останавливает сервер.
|
||||
func (s *Server) Stop() {
|
||||
if s.server != nil {
|
||||
s.server.GracefulStop()
|
||||
fmt.Println("Server stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// ListSnapshots реализует gRPC-метод ListSnapshots.
|
||||
func (s *Server) ListSnapshots(ctx context.Context, req *agateGrpc.ListSnapshotsRequest) (*agateGrpc.ListSnapshotsResponse, error) {
|
||||
opts := store.ListOptions{}
|
||||
snapshots, err := s.manager.ListSnapshots(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list snapshots: %w", err)
|
||||
}
|
||||
|
||||
response := &agateGrpc.ListSnapshotsResponse{
|
||||
Snapshots: make([]*agateGrpc.SnapshotInfo, 0, len(snapshots)),
|
||||
}
|
||||
|
||||
for _, snapshot := range snapshots {
|
||||
response.Snapshots = append(response.Snapshots, convertToGrpcSnapshotInfo(snapshot))
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// GetSnapshotDetails реализует gRPC-метод GetSnapshotDetails.
|
||||
func (s *Server) GetSnapshotDetails(ctx context.Context, req *agateGrpc.GetSnapshotDetailsRequest) (*agateGrpc.SnapshotDetails, error) {
|
||||
snapshot, err := s.manager.GetSnapshotDetails(ctx, req.SnapshotId)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get snapshot details: %w", err)
|
||||
}
|
||||
|
||||
response := &agateGrpc.SnapshotDetails{
|
||||
Info: convertToGrpcSnapshotInfo(store.SnapshotInfo{
|
||||
ID: snapshot.ID,
|
||||
Name: snapshot.Name,
|
||||
ParentID: snapshot.ParentID,
|
||||
CreationTime: snapshot.CreationTime,
|
||||
}),
|
||||
Files: make([]*agateGrpc.FileInfo, 0, len(snapshot.Files)),
|
||||
}
|
||||
|
||||
for _, file := range snapshot.Files {
|
||||
response.Files = append(response.Files, &agateGrpc.FileInfo{
|
||||
Path: file.Path,
|
||||
SizeBytes: file.Size,
|
||||
Sha256Hash: file.SHA256,
|
||||
IsDir: file.IsDir,
|
||||
})
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// DownloadFile реализует gRPC-метод DownloadFile.
|
||||
func (s *Server) DownloadFile(req *agateGrpc.DownloadFileRequest, stream agateGrpc.SnapshotService_DownloadFileServer) error {
|
||||
fileReader, err := s.manager.OpenFile(context.Background(), req.SnapshotId, req.FilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
defer fileReader.Close()
|
||||
|
||||
buffer := make([]byte, 64*1024)
|
||||
for {
|
||||
n, err := fileReader.Read(buffer)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file: %w", err)
|
||||
}
|
||||
if err := stream.Send(&agateGrpc.DownloadFileResponse{ChunkData: buffer[:n]}); err != nil {
|
||||
return fmt.Errorf("failed to send chunk: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DownloadSnapshotDiff реализует gRPC-метод DownloadSnapshotDiff.
|
||||
func (s *Server) DownloadSnapshotDiff(req *agateGrpc.DownloadSnapshotDiffRequest, stream agateGrpc.SnapshotService_DownloadSnapshotDiffServer) error {
|
||||
diffReader, err := s.manager.StreamSnapshotDiff(context.Background(), req.SnapshotId, req.LocalParentId, req.Offset)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stream snapshot diff: %w", err)
|
||||
}
|
||||
defer diffReader.Close()
|
||||
|
||||
buffer := make([]byte, 64*1024)
|
||||
for {
|
||||
n, err := diffReader.Read(buffer)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read from diff stream: %w", err)
|
||||
}
|
||||
if n > 0 {
|
||||
if err := stream.Send(&agateGrpc.DownloadFileResponse{ChunkData: buffer[:n]}); err != nil {
|
||||
return fmt.Errorf("failed to send diff chunk: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Вспомогательная функция для конвертации store.SnapshotInfo в grpc.SnapshotInfo
|
||||
func convertToGrpcSnapshotInfo(info store.SnapshotInfo) *agateGrpc.SnapshotInfo {
|
||||
return &agateGrpc.SnapshotInfo{
|
||||
Id: info.ID,
|
||||
Name: info.Name,
|
||||
ParentId: info.ParentID,
|
||||
CreationTime: timestamppb.New(info.CreationTime),
|
||||
}
|
||||
}
|
9
store/errors.go
Normal file
9
store/errors.go
Normal file
@ -0,0 +1,9 @@
|
||||
package store
|
||||
|
||||
import "errors"
|
||||
|
||||
// Common errors that can be used by store implementations
|
||||
var (
|
||||
// ErrNotFound means that a requested resource was not found
|
||||
ErrNotFound = errors.New("resource not found")
|
||||
)
|
@ -6,24 +6,35 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"unprism.ru/KRBL/agate"
|
||||
"unprism.ru/KRBL/agate/store"
|
||||
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
)
|
||||
|
||||
const blobExtension = ".zip"
|
||||
|
||||
// fileSystemStore реализует интерфейс store.BlobStore с использованием локальной файловой системы.
|
||||
type fileSystemStore struct {
|
||||
baseDir string // Директория для хранения блобов (архивов)
|
||||
baseDir string // Директория для хранения блобов (архивов)
|
||||
activeDir string // Директория для активных операций (создание и восстановление)
|
||||
}
|
||||
|
||||
// NewFileSystemStore создает новое хранилище блобов в указанной директории.
|
||||
func NewFileSystemStore(baseDir string) (store.BlobStore, error) {
|
||||
// Убедимся, что директория существует
|
||||
// Убедимся, что базовая директория существует
|
||||
if err := os.MkdirAll(baseDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create base directory %s for filesystem blob store: %w", baseDir, err)
|
||||
}
|
||||
return &fileSystemStore{baseDir: baseDir}, nil
|
||||
|
||||
// Создаем директорию для активных операций внутри базовой директории
|
||||
activeDir := filepath.Join(baseDir, "active")
|
||||
if err := os.MkdirAll(activeDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create active directory %s for filesystem blob store: %w", activeDir, err)
|
||||
}
|
||||
|
||||
return &fileSystemStore{
|
||||
baseDir: baseDir,
|
||||
activeDir: activeDir,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getBlobPath формирует полный путь к файлу блоба.
|
||||
@ -64,7 +75,7 @@ func (fs *fileSystemStore) RetrieveBlob(ctx context.Context, snapshotID string)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Если файл не найден, возвращаем кастомную ошибку
|
||||
return nil, agate.ErrNotFound
|
||||
return nil, store.ErrNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("failed to open blob file %s: %w", blobPath, err)
|
||||
}
|
||||
@ -98,7 +109,7 @@ func (fs *fileSystemStore) GetBlobPath(ctx context.Context, snapshotID string) (
|
||||
// Проверяем существование файла
|
||||
if _, err := os.Stat(blobPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", agate.ErrNotFound
|
||||
return "", store.ErrNotFound
|
||||
}
|
||||
return "", fmt.Errorf("failed to stat blob file %s: %w", blobPath, err)
|
||||
}
|
||||
@ -106,3 +117,32 @@ func (fs *fileSystemStore) GetBlobPath(ctx context.Context, snapshotID string) (
|
||||
// Файл существует, возвращаем путь
|
||||
return blobPath, nil
|
||||
}
|
||||
|
||||
// GetActiveDir возвращает путь к директории для активных операций.
|
||||
func (fs *fileSystemStore) GetBaseDir() string {
|
||||
return fs.baseDir
|
||||
}
|
||||
|
||||
// GetActiveDir возвращает путь к директории для активных операций.
|
||||
func (fs *fileSystemStore) GetActiveDir() string {
|
||||
return fs.activeDir
|
||||
}
|
||||
|
||||
// CleanActiveDir очищает директорию для активных операций.
|
||||
// Это полезно перед началом новых операций, чтобы избежать конфликтов.
|
||||
func (fs *fileSystemStore) CleanActiveDir(ctx context.Context) error {
|
||||
// Удаляем все файлы в активной директории, но сохраняем саму директорию
|
||||
entries, err := os.ReadDir(fs.activeDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read active directory: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
path := filepath.Join(fs.activeDir, entry.Name())
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return fmt.Errorf("failed to remove %s from active directory: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
228
store/filesystem/filesystem_test.go
Normal file
228
store/filesystem/filesystem_test.go
Normal file
@ -0,0 +1,228 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewFileSystemStore(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
store, err := NewFileSystemStore(tempDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||
}
|
||||
|
||||
// Check that directories were created
|
||||
if _, err := os.Stat(tempDir); os.IsNotExist(err) {
|
||||
t.Fatalf("Base directory was not created")
|
||||
}
|
||||
|
||||
// Check that the store's base directory matches the expected path
|
||||
if store.GetBaseDir() != tempDir {
|
||||
t.Fatalf("Store base directory does not match: got %s, want %s", store.GetBaseDir(), tempDir)
|
||||
}
|
||||
|
||||
activeDir := filepath.Join(tempDir, "active")
|
||||
if _, err := os.Stat(activeDir); os.IsNotExist(err) {
|
||||
t.Fatalf("Active directory was not created")
|
||||
}
|
||||
|
||||
// Check that the store's active directory matches the expected path
|
||||
if store.GetActiveDir() != activeDir {
|
||||
t.Fatalf("Store active directory does not match: got %s, want %s", store.GetActiveDir(), activeDir)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreAndRetrieveBlob(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
store, err := NewFileSystemStore(tempDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||
}
|
||||
|
||||
// Create test data
|
||||
testData := []byte("test data for blob")
|
||||
reader := bytes.NewReader(testData)
|
||||
ctx := context.Background()
|
||||
|
||||
// Store the blob
|
||||
snapshotID := "test-snapshot-id"
|
||||
path, err := store.StoreBlob(ctx, snapshotID, reader)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to store blob: %v", err)
|
||||
}
|
||||
|
||||
// Check that the file was created
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
t.Fatalf("Blob file was not created")
|
||||
}
|
||||
|
||||
// Retrieve the blob
|
||||
blobReader, err := store.RetrieveBlob(ctx, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve blob: %v", err)
|
||||
}
|
||||
defer blobReader.Close()
|
||||
|
||||
// Read the data
|
||||
retrievedData, err := io.ReadAll(blobReader)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read blob data: %v", err)
|
||||
}
|
||||
|
||||
// Check that the data matches
|
||||
if !bytes.Equal(testData, retrievedData) {
|
||||
t.Fatalf("Retrieved data does not match original data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteBlob(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
store, err := NewFileSystemStore(tempDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||
}
|
||||
|
||||
// Create test data
|
||||
testData := []byte("test data for blob")
|
||||
reader := bytes.NewReader(testData)
|
||||
ctx := context.Background()
|
||||
|
||||
// Store the blob
|
||||
snapshotID := "test-snapshot-id"
|
||||
path, err := store.StoreBlob(ctx, snapshotID, reader)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to store blob: %v", err)
|
||||
}
|
||||
|
||||
// Delete the blob
|
||||
err = store.DeleteBlob(ctx, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete blob: %v", err)
|
||||
}
|
||||
|
||||
// Check that the file was deleted
|
||||
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||
t.Fatalf("Blob file was not deleted")
|
||||
}
|
||||
|
||||
// Deleting a non-existent blob should not return an error
|
||||
err = store.DeleteBlob(ctx, "non-existent-id")
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteBlob returned an error for non-existent blob: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBlobPath(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
store, err := NewFileSystemStore(tempDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||
}
|
||||
|
||||
// Create test data
|
||||
testData := []byte("test data for blob")
|
||||
reader := bytes.NewReader(testData)
|
||||
ctx := context.Background()
|
||||
|
||||
// Store the blob
|
||||
snapshotID := "test-snapshot-id"
|
||||
expectedPath, err := store.StoreBlob(ctx, snapshotID, reader)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to store blob: %v", err)
|
||||
}
|
||||
|
||||
// Get the blob path
|
||||
path, err := store.GetBlobPath(ctx, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get blob path: %v", err)
|
||||
}
|
||||
|
||||
// Check that the path matches
|
||||
if path != expectedPath {
|
||||
t.Fatalf("GetBlobPath returned incorrect path: got %s, want %s", path, expectedPath)
|
||||
}
|
||||
|
||||
// Getting path for non-existent blob should return ErrNotFound
|
||||
_, err = store.GetBlobPath(ctx, "non-existent-id")
|
||||
if err == nil {
|
||||
t.Fatalf("GetBlobPath did not return an error for non-existent blob")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanActiveDir(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
store, err := NewFileSystemStore(tempDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create filesystem store: %v", err)
|
||||
}
|
||||
|
||||
// Get the active directory
|
||||
activeDir := store.GetActiveDir()
|
||||
|
||||
// Create some test files in the active directory
|
||||
testFile1 := filepath.Join(activeDir, "test1.txt")
|
||||
testFile2 := filepath.Join(activeDir, "test2.txt")
|
||||
|
||||
if err := os.WriteFile(testFile1, []byte("test1"), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(testFile2, []byte("test2"), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file: %v", err)
|
||||
}
|
||||
|
||||
// Clean the active directory
|
||||
ctx := context.Background()
|
||||
err = store.CleanActiveDir(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to clean active directory: %v", err)
|
||||
}
|
||||
|
||||
// Check that the files were deleted
|
||||
entries, err := os.ReadDir(activeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read active directory: %v", err)
|
||||
}
|
||||
if len(entries) > 0 {
|
||||
t.Fatalf("Active directory was not cleaned, %d files remain", len(entries))
|
||||
}
|
||||
}
|
@ -6,12 +6,11 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
"unprism.ru/KRBL/agate"
|
||||
"unprism.ru/KRBL/agate/store"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -131,7 +130,7 @@ func (s *sqliteStore) GetSnapshotMetadata(ctx context.Context, snapshotID string
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
// Если запись не найдена, возвращаем кастомную ошибку
|
||||
return nil, agate.ErrNotFound
|
||||
return nil, store.ErrNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("failed to query snapshot %s: %w", snapshotID, err)
|
||||
}
|
||||
@ -168,58 +167,83 @@ func (s *sqliteStore) GetSnapshotMetadata(ctx context.Context, snapshotID string
|
||||
return &snap, nil
|
||||
}
|
||||
|
||||
// ListSnapshotsMetadata извлекает краткую информацию обо всех снапшотах.
|
||||
func (s *sqliteStore) ListSnapshotsMetadata(ctx context.Context) ([]store.SnapshotInfo, error) {
|
||||
// Simplified implementation to debug the issue
|
||||
fmt.Println("ListSnapshotsMetadata called")
|
||||
// ListSnapshotsMetadata retrieves basic information about snapshots with filtering and pagination.
|
||||
func (s *sqliteStore) ListSnapshotsMetadata(ctx context.Context, opts store.ListOptions) ([]store.SnapshotInfo, error) {
|
||||
// Build the query with optional filtering
|
||||
var query string
|
||||
var args []interface{}
|
||||
|
||||
// Get all snapshot IDs first
|
||||
query := `SELECT id FROM snapshots ORDER BY creation_time DESC;`
|
||||
fmt.Println("Executing query:", query)
|
||||
if opts.FilterByName != "" {
|
||||
query = `SELECT id, name, parent_id, creation_time FROM snapshots WHERE name LIKE ? ORDER BY creation_time DESC`
|
||||
args = append(args, "%"+opts.FilterByName+"%")
|
||||
} else {
|
||||
query = `SELECT id, name, parent_id, creation_time FROM snapshots ORDER BY creation_time DESC`
|
||||
}
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query)
|
||||
// Add pagination if specified
|
||||
if opts.Limit > 0 {
|
||||
query += " LIMIT ?"
|
||||
args = append(args, opts.Limit)
|
||||
|
||||
if opts.Offset > 0 {
|
||||
query += " OFFSET ?"
|
||||
args = append(args, opts.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute the query
|
||||
rows, err := s.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query snapshot IDs: %w", err)
|
||||
return nil, fmt.Errorf("failed to query snapshots: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var snapshots []store.SnapshotInfo
|
||||
|
||||
// For each ID, get the full snapshot details
|
||||
// Iterate through the results
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan snapshot ID: %w", err)
|
||||
var info store.SnapshotInfo
|
||||
var parentID sql.NullString
|
||||
var creationTimeStr string
|
||||
|
||||
if err := rows.Scan(&info.ID, &info.Name, &parentID, &creationTimeStr); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan snapshot row: %w", err)
|
||||
}
|
||||
|
||||
// Get the full snapshot details
|
||||
snapshot, err := s.GetSnapshotMetadata(ctx, id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get snapshot details for ID %s: %w", id, err)
|
||||
// Set parent ID if not NULL
|
||||
if parentID.Valid {
|
||||
info.ParentID = parentID.String
|
||||
}
|
||||
|
||||
// Convert to SnapshotInfo
|
||||
info := store.SnapshotInfo{
|
||||
ID: snapshot.ID,
|
||||
Name: snapshot.Name,
|
||||
ParentID: snapshot.ParentID,
|
||||
CreationTime: snapshot.CreationTime,
|
||||
// Parse creation time
|
||||
const sqliteLayout = "2006-01-02 15:04:05" // Standard SQLite DATETIME format without timezone
|
||||
t, parseErr := time.Parse(sqliteLayout, creationTimeStr)
|
||||
if parseErr != nil {
|
||||
// Try format with milliseconds if the first one didn't work
|
||||
const sqliteLayoutWithMs = "2006-01-02 15:04:05.999999999"
|
||||
t, parseErr = time.Parse(sqliteLayoutWithMs, creationTimeStr)
|
||||
if parseErr != nil {
|
||||
// Try RFC3339 if saved as UTC().Format(time.RFC3339)
|
||||
t, parseErr = time.Parse(time.RFC3339, creationTimeStr)
|
||||
if parseErr != nil {
|
||||
return nil, fmt.Errorf("failed to parse creation time '%s' for snapshot %s: %w", creationTimeStr, info.ID, parseErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
info.CreationTime = t.UTC() // Store as UTC
|
||||
|
||||
snapshots = append(snapshots, info)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating snapshot IDs: %w", err)
|
||||
return nil, fmt.Errorf("error iterating snapshot rows: %w", err)
|
||||
}
|
||||
|
||||
// If no snapshots found, return an empty slice
|
||||
if len(snapshots) == 0 {
|
||||
fmt.Println("No snapshots found")
|
||||
return []store.SnapshotInfo{}, nil
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d snapshots\n", len(snapshots))
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
@ -241,3 +265,13 @@ func (s *sqliteStore) DeleteSnapshotMetadata(ctx context.Context, snapshotID str
|
||||
|
||||
return nil // Не возвращаем ошибку, если запись не найдена
|
||||
}
|
||||
|
||||
// UpdateSnapshotParentID обновляет ParentID для указанного снапшота.
|
||||
func (s *sqliteStore) UpdateSnapshotParentID(ctx context.Context, snapshotID, newParentID string) error {
|
||||
query := `UPDATE snapshots SET parent_id = ? WHERE id = ?;`
|
||||
_, err := s.db.ExecContext(ctx, query, newParentID, snapshotID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update parent ID for snapshot %s: %w", snapshotID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
399
store/sqlite/sqlite_test.go
Normal file
399
store/sqlite/sqlite_test.go
Normal file
@ -0,0 +1,399 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
)
|
||||
|
||||
func TestNewSQLiteStore(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
dbPath := filepath.Join(tempDir, "test.db")
|
||||
s, err := NewSQLiteStore(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Check that the database file was created
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
t.Fatalf("Database file was not created")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveAndGetSnapshotMetadata(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
dbPath := filepath.Join(tempDir, "test.db")
|
||||
s, err := NewSQLiteStore(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Create a test snapshot
|
||||
now := time.Now().UTC().Truncate(time.Second) // SQLite doesn't store nanoseconds
|
||||
testSnapshot := store.Snapshot{
|
||||
ID: "test-snapshot-id",
|
||||
Name: "Test Snapshot",
|
||||
ParentID: "parent-snapshot-id",
|
||||
CreationTime: now,
|
||||
Files: []store.FileInfo{
|
||||
{
|
||||
Path: "/test/file1.txt",
|
||||
Size: 100,
|
||||
IsDir: false,
|
||||
SHA256: "hash1",
|
||||
},
|
||||
{
|
||||
Path: "/test/dir1",
|
||||
Size: 0,
|
||||
IsDir: true,
|
||||
SHA256: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Save the snapshot
|
||||
ctx := context.Background()
|
||||
err = s.SaveSnapshotMetadata(ctx, testSnapshot)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save snapshot metadata: %v", err)
|
||||
}
|
||||
|
||||
// Retrieve the snapshot
|
||||
retrievedSnapshot, err := s.GetSnapshotMetadata(ctx, testSnapshot.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve snapshot metadata: %v", err)
|
||||
}
|
||||
|
||||
// Check that the retrieved snapshot matches the original
|
||||
if retrievedSnapshot.ID != testSnapshot.ID {
|
||||
t.Errorf("Retrieved snapshot ID does not match: got %s, want %s", retrievedSnapshot.ID, testSnapshot.ID)
|
||||
}
|
||||
if retrievedSnapshot.Name != testSnapshot.Name {
|
||||
t.Errorf("Retrieved snapshot name does not match: got %s, want %s", retrievedSnapshot.Name, testSnapshot.Name)
|
||||
}
|
||||
if retrievedSnapshot.ParentID != testSnapshot.ParentID {
|
||||
t.Errorf("Retrieved snapshot parent ID does not match: got %s, want %s", retrievedSnapshot.ParentID, testSnapshot.ParentID)
|
||||
}
|
||||
if !retrievedSnapshot.CreationTime.Equal(testSnapshot.CreationTime) {
|
||||
t.Errorf("Retrieved snapshot creation time does not match: got %v, want %v", retrievedSnapshot.CreationTime, testSnapshot.CreationTime)
|
||||
}
|
||||
if len(retrievedSnapshot.Files) != len(testSnapshot.Files) {
|
||||
t.Errorf("Retrieved snapshot has wrong number of files: got %d, want %d", len(retrievedSnapshot.Files), len(testSnapshot.Files))
|
||||
}
|
||||
}
|
||||
|
||||
func TestListSnapshotsMetadata(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
dbPath := filepath.Join(tempDir, "test.db")
|
||||
s, err := NewSQLiteStore(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Create test snapshots
|
||||
ctx := context.Background()
|
||||
now := time.Now().UTC().Truncate(time.Second)
|
||||
|
||||
testSnapshots := []store.Snapshot{
|
||||
{
|
||||
ID: "snapshot-1",
|
||||
Name: "Snapshot 1",
|
||||
ParentID: "",
|
||||
CreationTime: now.Add(-2 * time.Hour),
|
||||
Files: []store.FileInfo{},
|
||||
},
|
||||
{
|
||||
ID: "snapshot-2",
|
||||
Name: "Snapshot 2",
|
||||
ParentID: "snapshot-1",
|
||||
CreationTime: now.Add(-1 * time.Hour),
|
||||
Files: []store.FileInfo{},
|
||||
},
|
||||
{
|
||||
ID: "snapshot-3",
|
||||
Name: "Snapshot 3",
|
||||
ParentID: "snapshot-2",
|
||||
CreationTime: now,
|
||||
Files: []store.FileInfo{},
|
||||
},
|
||||
}
|
||||
|
||||
// Save the snapshots
|
||||
for _, snap := range testSnapshots {
|
||||
err = s.SaveSnapshotMetadata(ctx, snap)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save snapshot metadata: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// List the snapshots with empty options
|
||||
snapshots, err := s.ListSnapshotsMetadata(ctx, store.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots: %v", err)
|
||||
}
|
||||
|
||||
// Check that all snapshots are listed
|
||||
if len(snapshots) != len(testSnapshots) {
|
||||
t.Errorf("Wrong number of snapshots listed: got %d, want %d", len(snapshots), len(testSnapshots))
|
||||
}
|
||||
|
||||
// Check that the snapshots have the correct information
|
||||
for i, snap := range testSnapshots {
|
||||
found := false
|
||||
for _, listedSnap := range snapshots {
|
||||
if listedSnap.ID == snap.ID {
|
||||
found = true
|
||||
if listedSnap.Name != snap.Name {
|
||||
t.Errorf("Snapshot %d has wrong name: got %s, want %s", i, listedSnap.Name, snap.Name)
|
||||
}
|
||||
if listedSnap.ParentID != snap.ParentID {
|
||||
t.Errorf("Snapshot %d has wrong parent ID: got %s, want %s", i, listedSnap.ParentID, snap.ParentID)
|
||||
}
|
||||
if !listedSnap.CreationTime.Equal(snap.CreationTime) {
|
||||
t.Errorf("Snapshot %d has wrong creation time: got %v, want %v", i, listedSnap.CreationTime, snap.CreationTime)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Snapshot %d (%s) not found in listed snapshots", i, snap.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListSnapshotsMetadata_WithOptions(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
dbPath := filepath.Join(tempDir, "test.db")
|
||||
s, err := NewSQLiteStore(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Create test snapshots with different names
|
||||
ctx := context.Background()
|
||||
now := time.Now().UTC().Truncate(time.Second)
|
||||
|
||||
testSnapshots := []store.Snapshot{
|
||||
{
|
||||
ID: "alpha-1",
|
||||
Name: "alpha-1",
|
||||
ParentID: "",
|
||||
CreationTime: now.Add(-3 * time.Hour),
|
||||
Files: []store.FileInfo{},
|
||||
},
|
||||
{
|
||||
ID: "alpha-2",
|
||||
Name: "alpha-2",
|
||||
ParentID: "alpha-1",
|
||||
CreationTime: now.Add(-2 * time.Hour),
|
||||
Files: []store.FileInfo{},
|
||||
},
|
||||
{
|
||||
ID: "beta-1",
|
||||
Name: "beta-1",
|
||||
ParentID: "",
|
||||
CreationTime: now.Add(-1 * time.Hour),
|
||||
Files: []store.FileInfo{},
|
||||
},
|
||||
}
|
||||
|
||||
// Save the snapshots
|
||||
for _, snap := range testSnapshots {
|
||||
err = s.SaveSnapshotMetadata(ctx, snap)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save snapshot metadata: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test different ListOptions scenarios
|
||||
t.Run("FilterByName", func(t *testing.T) {
|
||||
// Filter snapshots by name "alpha"
|
||||
opts := store.ListOptions{
|
||||
FilterByName: "alpha",
|
||||
}
|
||||
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots with filter: %v", err)
|
||||
}
|
||||
|
||||
// Should return 2 snapshots (alpha-1 and alpha-2)
|
||||
if len(snapshots) != 2 {
|
||||
t.Errorf("Wrong number of snapshots returned: got %d, want %d", len(snapshots), 2)
|
||||
}
|
||||
|
||||
// Check that only alpha snapshots are returned
|
||||
for _, snap := range snapshots {
|
||||
if snap.ID != "alpha-1" && snap.ID != "alpha-2" {
|
||||
t.Errorf("Unexpected snapshot ID in filtered results: %s", snap.ID)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Limit", func(t *testing.T) {
|
||||
// Limit to 1 snapshot (should return the newest one)
|
||||
opts := store.ListOptions{
|
||||
Limit: 1,
|
||||
}
|
||||
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots with limit: %v", err)
|
||||
}
|
||||
|
||||
// Should return 1 snapshot
|
||||
if len(snapshots) != 1 {
|
||||
t.Errorf("Wrong number of snapshots returned: got %d, want %d", len(snapshots), 1)
|
||||
}
|
||||
|
||||
// The newest snapshot should be beta-1
|
||||
if snapshots[0].ID != "beta-1" {
|
||||
t.Errorf("Wrong snapshot returned with limit: got %s, want %s", snapshots[0].ID, "beta-1")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Offset", func(t *testing.T) {
|
||||
// Limit to 1 snapshot with offset 1 (should return the second newest)
|
||||
opts := store.ListOptions{
|
||||
Limit: 1,
|
||||
Offset: 1,
|
||||
}
|
||||
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots with offset: %v", err)
|
||||
}
|
||||
|
||||
// Should return 1 snapshot
|
||||
if len(snapshots) != 1 {
|
||||
t.Errorf("Wrong number of snapshots returned: got %d, want %d", len(snapshots), 1)
|
||||
}
|
||||
|
||||
// The second newest snapshot should be alpha-2
|
||||
if snapshots[0].ID != "alpha-2" {
|
||||
t.Errorf("Wrong snapshot returned with offset: got %s, want %s", snapshots[0].ID, "alpha-2")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("FilterAndPagination", func(t *testing.T) {
|
||||
// Filter by "alpha" with limit 1
|
||||
opts := store.ListOptions{
|
||||
FilterByName: "alpha",
|
||||
Limit: 1,
|
||||
}
|
||||
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots with filter and pagination: %v", err)
|
||||
}
|
||||
|
||||
// Should return 1 snapshot
|
||||
if len(snapshots) != 1 {
|
||||
t.Errorf("Wrong number of snapshots returned: got %d, want %d", len(snapshots), 1)
|
||||
}
|
||||
|
||||
// The newest alpha snapshot should be alpha-2
|
||||
if snapshots[0].ID != "alpha-2" {
|
||||
t.Errorf("Wrong snapshot returned with filter and limit: got %s, want %s", snapshots[0].ID, "alpha-2")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NoResults", func(t *testing.T) {
|
||||
// Filter by a name that doesn't exist
|
||||
opts := store.ListOptions{
|
||||
FilterByName: "gamma",
|
||||
}
|
||||
snapshots, err := s.ListSnapshotsMetadata(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list snapshots with non-matching filter: %v", err)
|
||||
}
|
||||
|
||||
// Should return 0 snapshots
|
||||
if len(snapshots) != 0 {
|
||||
t.Errorf("Expected 0 snapshots, got %d", len(snapshots))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteSnapshotMetadata(t *testing.T) {
|
||||
// Create a temporary directory for tests
|
||||
tempDir, err := os.MkdirTemp("", "agate-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create a new store
|
||||
dbPath := filepath.Join(tempDir, "test.db")
|
||||
s, err := NewSQLiteStore(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQLite store: %v", err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Create a test snapshot
|
||||
ctx := context.Background()
|
||||
testSnapshot := store.Snapshot{
|
||||
ID: "test-snapshot-id",
|
||||
Name: "Test Snapshot",
|
||||
ParentID: "",
|
||||
CreationTime: time.Now().UTC().Truncate(time.Second),
|
||||
Files: []store.FileInfo{},
|
||||
}
|
||||
|
||||
// Save the snapshot
|
||||
err = s.SaveSnapshotMetadata(ctx, testSnapshot)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save snapshot metadata: %v", err)
|
||||
}
|
||||
|
||||
// Delete the snapshot
|
||||
err = s.DeleteSnapshotMetadata(ctx, testSnapshot.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete snapshot metadata: %v", err)
|
||||
}
|
||||
|
||||
// Try to retrieve the deleted snapshot
|
||||
_, err = s.GetSnapshotMetadata(ctx, testSnapshot.ID)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when retrieving deleted snapshot, got nil")
|
||||
}
|
||||
|
||||
// Deleting a non-existent snapshot should not return an error
|
||||
err = s.DeleteSnapshotMetadata(ctx, "non-existent-id")
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteSnapshotMetadata returned an error for non-existent snapshot: %v", err)
|
||||
}
|
||||
}
|
@ -31,6 +31,13 @@ type SnapshotInfo struct {
|
||||
CreationTime time.Time // Время создания
|
||||
}
|
||||
|
||||
// ListOptions provides options for filtering and paginating snapshot lists
|
||||
type ListOptions struct {
|
||||
FilterByName string // Filter snapshots by name (substring match)
|
||||
Limit int // Maximum number of snapshots to return
|
||||
Offset int // Number of snapshots to skip
|
||||
}
|
||||
|
||||
// MetadataStore определяет интерфейс для хранения и извлечения метаданных снапшотов.
|
||||
type MetadataStore interface {
|
||||
// SaveSnapshotMetadata сохраняет полные метаданные снапшота, включая список файлов.
|
||||
@ -41,13 +48,16 @@ type MetadataStore interface {
|
||||
// Возвращает agate.ErrNotFound, если снапшот не найден.
|
||||
GetSnapshotMetadata(ctx context.Context, snapshotID string) (*Snapshot, error)
|
||||
|
||||
// ListSnapshotsMetadata извлекает краткую информацию обо всех снапшотах.
|
||||
ListSnapshotsMetadata(ctx context.Context) ([]SnapshotInfo, error)
|
||||
// ListSnapshotsMetadata извлекает краткую информацию о снапшотах с фильтрацией и пагинацией.
|
||||
ListSnapshotsMetadata(ctx context.Context, opts ListOptions) ([]SnapshotInfo, error)
|
||||
|
||||
// DeleteSnapshotMetadata удаляет метаданные снапшота по его ID.
|
||||
// Не должен возвращать ошибку, если снапшот не найден.
|
||||
DeleteSnapshotMetadata(ctx context.Context, snapshotID string) error
|
||||
|
||||
// UpdateSnapshotParentID обновляет ParentID для указанного снапшота.
|
||||
UpdateSnapshotParentID(ctx context.Context, snapshotID, newParentID string) error
|
||||
|
||||
// Close закрывает соединение с хранилищем метаданных.
|
||||
Close() error
|
||||
}
|
||||
@ -71,4 +81,14 @@ type BlobStore interface {
|
||||
// Это может быть полезно для функций пакета archive, которые работают с путями.
|
||||
// Возвращает agate.ErrNotFound, если блоб не найден.
|
||||
GetBlobPath(ctx context.Context, snapshotID string) (string, error)
|
||||
|
||||
// GetBaseDir возвращает путь к основной директории
|
||||
GetBaseDir() string
|
||||
|
||||
// GetActiveDir возвращает путь к директории для активных операций.
|
||||
GetActiveDir() string
|
||||
|
||||
// CleanActiveDir очищает директорию для активных операций.
|
||||
// Это полезно перед началом новых операций, чтобы избежать конфликтов.
|
||||
CleanActiveDir(ctx context.Context) error
|
||||
}
|
||||
|
@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"unprism.ru/KRBL/agate/store"
|
||||
"unprism.ru/KRBL/agate/store/filesystem"
|
||||
"unprism.ru/KRBL/agate/store/sqlite"
|
||||
"gitea.unprism.ru/KRBL/Agate/store"
|
||||
"gitea.unprism.ru/KRBL/Agate/store/filesystem"
|
||||
"gitea.unprism.ru/KRBL/Agate/store/sqlite"
|
||||
)
|
||||
|
||||
// NewDefaultMetadataStore creates a new SQLite-based metadata store.
|
||||
@ -39,4 +39,4 @@ func InitDefaultStores(baseDir string) (store.MetadataStore, store.BlobStore, er
|
||||
}
|
||||
|
||||
return metadataStore, blobStore, nil
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user