2023-12-06 04:47:41 +00:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
2023-12-16 14:19:53 +00:00
|
|
|
"errors"
|
2023-12-06 04:47:41 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2023-12-28 00:13:09 +00:00
|
|
|
"log/slog"
|
2023-12-06 04:47:41 +00:00
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2023-12-16 11:59:58 +00:00
|
|
|
"sync"
|
2023-12-28 00:13:09 +00:00
|
|
|
"time"
|
2023-12-06 04:47:41 +00:00
|
|
|
|
2023-12-28 00:13:09 +00:00
|
|
|
"github.com/avast/retry-go"
|
2023-12-16 11:59:58 +00:00
|
|
|
"github.com/puzpuzpuz/xsync/v3"
|
2023-12-06 04:47:41 +00:00
|
|
|
"github.com/spf13/viper"
|
|
|
|
|
|
|
|
"github.com/satisfactorymodding/ficsit-cli/utils"
|
|
|
|
)
|
|
|
|
|
2023-12-16 11:59:58 +00:00
|
|
|
type downloadGroup struct {
|
|
|
|
err error
|
|
|
|
wait chan bool
|
|
|
|
hash string
|
|
|
|
updates []chan<- utils.GenericProgress
|
|
|
|
size int64
|
|
|
|
}
|
|
|
|
|
|
|
|
var downloadSync = *xsync.NewMapOf[string, *downloadGroup]()
|
|
|
|
|
2023-12-07 16:57:31 +00:00
|
|
|
func DownloadOrCache(cacheKey string, hash string, url string, updates chan<- utils.GenericProgress, downloadSemaphore chan int) (*os.File, int64, error) {
|
2023-12-16 11:59:58 +00:00
|
|
|
group, loaded := downloadSync.LoadOrCompute(cacheKey, func() *downloadGroup {
|
|
|
|
return &downloadGroup{
|
|
|
|
hash: hash,
|
|
|
|
updates: make([]chan<- utils.GenericProgress, 0),
|
|
|
|
wait: make(chan bool),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2023-12-20 22:37:18 +00:00
|
|
|
if updates != nil {
|
|
|
|
_, _ = downloadSync.Compute(cacheKey, func(oldValue *downloadGroup, loaded bool) (*downloadGroup, bool) {
|
|
|
|
oldValue.updates = append(oldValue.updates, updates)
|
|
|
|
return oldValue, false
|
|
|
|
})
|
|
|
|
}
|
2023-12-16 11:59:58 +00:00
|
|
|
|
2023-12-06 04:47:41 +00:00
|
|
|
downloadCache := filepath.Join(viper.GetString("cache-dir"), "downloadCache")
|
|
|
|
if err := os.MkdirAll(downloadCache, 0o777); err != nil {
|
|
|
|
if !os.IsExist(err) {
|
2023-12-16 14:19:53 +00:00
|
|
|
return nil, 0, fmt.Errorf("failed creating download cache: %w", err)
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
location := filepath.Join(downloadCache, cacheKey)
|
|
|
|
|
2023-12-16 11:59:58 +00:00
|
|
|
if loaded {
|
|
|
|
if group.hash != hash {
|
|
|
|
return nil, 0, errors.New("hash mismatch in download group")
|
|
|
|
}
|
|
|
|
|
|
|
|
<-group.wait
|
|
|
|
|
|
|
|
if group.err != nil {
|
|
|
|
return nil, 0, group.err
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := os.Open(location)
|
|
|
|
if err != nil {
|
2023-12-16 14:19:53 +00:00
|
|
|
return nil, 0, fmt.Errorf("failed to open file: %s: %w", location, err)
|
2023-12-16 11:59:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return f, group.size, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
defer downloadSync.Delete(cacheKey)
|
|
|
|
|
|
|
|
upstreamUpdates := make(chan utils.GenericProgress)
|
|
|
|
defer close(upstreamUpdates)
|
|
|
|
|
|
|
|
upstreamWaiter := make(chan bool)
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
outer:
|
|
|
|
for {
|
|
|
|
select {
|
2023-12-28 00:13:09 +00:00
|
|
|
case update, ok := <-upstreamUpdates:
|
|
|
|
if !ok {
|
|
|
|
break outer
|
|
|
|
}
|
|
|
|
|
2023-12-16 11:59:58 +00:00
|
|
|
for _, u := range group.updates {
|
|
|
|
u <- update
|
|
|
|
}
|
|
|
|
case <-upstreamWaiter:
|
|
|
|
break outer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2023-12-28 00:13:09 +00:00
|
|
|
var size int64
|
|
|
|
|
|
|
|
err := retry.Do(func() error {
|
|
|
|
var err error
|
|
|
|
size, err = downloadInternal(cacheKey, location, hash, url, upstreamUpdates, downloadSemaphore)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("internal download error: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
retry.Attempts(5),
|
|
|
|
retry.Delay(time.Second),
|
|
|
|
retry.DelayType(retry.FixedDelay),
|
|
|
|
retry.OnRetry(func(n uint, err error) {
|
|
|
|
if n > 0 {
|
|
|
|
slog.Info("retrying download", slog.Uint64("n", uint64(n)), slog.String("cacheKey", cacheKey))
|
|
|
|
}
|
|
|
|
}),
|
|
|
|
)
|
2023-12-16 11:59:58 +00:00
|
|
|
if err != nil {
|
|
|
|
group.err = err
|
|
|
|
close(group.wait)
|
2023-12-28 00:13:09 +00:00
|
|
|
return nil, 0, err // nolint
|
2023-12-16 11:59:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
close(upstreamWaiter)
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
group.size = size
|
|
|
|
close(group.wait)
|
|
|
|
|
|
|
|
f, err := os.Open(location)
|
|
|
|
if err != nil {
|
2023-12-16 14:19:53 +00:00
|
|
|
return nil, 0, fmt.Errorf("failed to open file: %s: %w", location, err)
|
2023-12-16 11:59:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return f, size, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func downloadInternal(cacheKey string, location string, hash string, url string, updates chan<- utils.GenericProgress, downloadSemaphore chan int) (int64, error) {
|
2023-12-06 04:47:41 +00:00
|
|
|
stat, err := os.Stat(location)
|
|
|
|
if err == nil {
|
2023-12-29 15:15:18 +00:00
|
|
|
matches, err := compareHash(hash, location)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
|
2023-12-29 15:15:18 +00:00
|
|
|
if matches {
|
2023-12-16 11:59:58 +00:00
|
|
|
return stat.Size(), nil
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := os.Remove(location); err != nil {
|
2023-12-16 14:19:53 +00:00
|
|
|
return 0, fmt.Errorf("failed to delete file: %s: %w", location, err)
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
} else if !os.IsNotExist(err) {
|
2023-12-16 14:19:53 +00:00
|
|
|
return 0, fmt.Errorf("failed to stat file: %s: %w", location, err)
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
|
2023-12-06 23:39:34 +00:00
|
|
|
if updates != nil {
|
|
|
|
headResp, err := http.Head(url)
|
|
|
|
if err != nil {
|
2023-12-16 14:19:53 +00:00
|
|
|
return 0, fmt.Errorf("failed to head: %s: %w", url, err)
|
2023-12-06 23:39:34 +00:00
|
|
|
}
|
|
|
|
defer headResp.Body.Close()
|
|
|
|
updates <- utils.GenericProgress{Total: headResp.ContentLength}
|
|
|
|
}
|
|
|
|
|
|
|
|
if downloadSemaphore != nil {
|
|
|
|
downloadSemaphore <- 1
|
|
|
|
defer func() { <-downloadSemaphore }()
|
|
|
|
}
|
|
|
|
|
2023-12-06 04:47:41 +00:00
|
|
|
out, err := os.Create(location)
|
|
|
|
if err != nil {
|
2023-12-16 14:19:53 +00:00
|
|
|
return 0, fmt.Errorf("failed creating file at: %s: %w", location, err)
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
defer out.Close()
|
|
|
|
|
|
|
|
resp, err := http.Get(url)
|
|
|
|
if err != nil {
|
2023-12-16 14:19:53 +00:00
|
|
|
return 0, fmt.Errorf("failed to fetch: %s: %w", url, err)
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
2023-12-16 11:59:58 +00:00
|
|
|
return 0, fmt.Errorf("bad status: %s on url: %s", resp.Status, url)
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
|
2023-12-06 23:39:34 +00:00
|
|
|
progresser := &utils.Progresser{
|
|
|
|
Total: resp.ContentLength,
|
|
|
|
Updates: updates,
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
|
2023-12-28 00:13:09 +00:00
|
|
|
_, err = io.Copy(io.MultiWriter(out, progresser), resp.Body)
|
2023-12-06 04:47:41 +00:00
|
|
|
if err != nil {
|
2023-12-16 14:19:53 +00:00
|
|
|
return 0, fmt.Errorf("failed writing file to disk: %w", err)
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
|
2023-12-28 00:13:09 +00:00
|
|
|
_ = out.Sync()
|
|
|
|
|
2023-12-06 04:47:41 +00:00
|
|
|
if updates != nil {
|
2023-12-07 16:57:31 +00:00
|
|
|
updates <- utils.GenericProgress{Completed: resp.ContentLength, Total: resp.ContentLength}
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_, err = addFileToCache(cacheKey)
|
|
|
|
if err != nil {
|
2023-12-16 14:19:53 +00:00
|
|
|
return 0, fmt.Errorf("failed to add file to cache: %w", err)
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
|
|
|
|
2023-12-16 11:59:58 +00:00
|
|
|
return resp.ContentLength, nil
|
2023-12-06 04:47:41 +00:00
|
|
|
}
|
2023-12-29 15:15:18 +00:00
|
|
|
|
|
|
|
func compareHash(hash string, location string) (bool, error) {
|
|
|
|
existingHash := ""
|
|
|
|
|
|
|
|
if hash != "" {
|
|
|
|
f, err := os.Open(location)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("failed to open file: %s: %w", location, err)
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
existingHash, err = utils.SHA256Data(f)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("could not compute hash for file: %s: %w", location, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return hash == existingHash, nil
|
|
|
|
}
|