opdated operation pacman database craetion to form or update database file in memory, added download counter related to pkg.tar.zst files

This commit is contained in:
dancheg97 2023-06-24 13:37:33 +03:00
parent 41667b2dd1
commit e399ce8452
4 changed files with 112 additions and 125 deletions

View File

@ -4,16 +4,16 @@
package arch
import (
"archive/tar"
"bytes"
"compress/gzip"
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path"
"strconv"
"strings"
"time"
@ -209,56 +209,6 @@ func rmEmptyStrings(s []string) []string {
return r
}
// Function takes path to directory with pacman database and updates package
// it with current metadata.
func (m *Metadata) PutToDb(dir string, mode fs.FileMode) error {
descdir := path.Join(dir, m.Name+"-"+m.Version)
err := os.MkdirAll(descdir, mode)
if err != nil {
return err
}
return os.WriteFile(path.Join(descdir, "desc"), []byte(m.GetDbDesc()), mode)
}
// Function takes raw database archive bytes and destination directory as
// arguements and unpacks database contents to destination directory.
func UnpackDb(src, dst string) error {
return archiver.DefaultTarGz.Unarchive(src, dst)
}
// Function takes path to source directory with raw pacman description files
// for pacman database, creates db.tar.gz archive and related symlink for
// provided path.
func PackDb(src, dst string) error {
if !strings.HasSuffix(dst, ".db.tar.gz") {
return fmt.Errorf("dst should end with '.db.tar.gz': %s", dst)
}
symlink := strings.TrimSuffix(dst, ".tar.gz")
if _, err := os.Stat(dst); err == nil {
err = os.RemoveAll(dst)
if err != nil {
return err
}
err = os.RemoveAll(symlink)
if err != nil {
return err
}
}
des, err := os.ReadDir(src)
if err != nil {
return err
}
var pkgdescs []string
for _, de := range des {
pkgdescs = append(pkgdescs, path.Join(src, de.Name()))
}
err = archiver.DefaultTarGz.Archive(pkgdescs, dst)
if err != nil {
return err
}
return os.Symlink(dst, symlink)
}
// Join database or package names to prevent collisions with same packages in
// different user spaces. Skips empty strings and returns name joined with
// dots.
@ -276,3 +226,86 @@ func Join(s ...string) string {
}
return rez
}
// Add or update existing package entry in database archived data.
func UpdatePacmanDbEntry(db []byte, md *Metadata) ([]byte, error) {
// Read existing entries in archive.
entries, err := readEntries(db)
if err != nil {
return nil, err
}
// Add new package entry to list.
entries[md.Name+"-"+md.Version+"/desc"] = []byte(md.GetDbDesc())
fmt.Println(entries)
var out bytes.Buffer
// Write entries to new buffer and return it.
err = writeToArchive(entries, &out)
if err != nil {
return nil, err
}
return out.Bytes(), nil
}
// Read database entries containing in pacman archive.
func readEntries(dbarchive []byte) (map[string][]byte, error) {
gzf, err := gzip.NewReader(bytes.NewReader(dbarchive))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
var entries = map[string][]byte{}
tarReader := tar.NewReader(gzf)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if header.Typeflag == tar.TypeReg {
content, err := io.ReadAll(tarReader)
if err != nil {
return nil, err
}
entries[header.Name] = content
}
}
return entries, nil
}
// Write pacman package entries to empty buffer.
func writeToArchive(files map[string][]byte, buf io.Writer) error {
gw := gzip.NewWriter(buf)
defer gw.Close()
tw := tar.NewWriter(gw)
defer tw.Close()
for name, content := range files {
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
Mode: int64(os.ModePerm),
}
err := tw.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(tw, bytes.NewReader(content))
if err != nil {
return err
}
}
return nil
}

View File

@ -4,7 +4,6 @@
package packages
import (
"bytes"
"io"
"path"
"strings"
@ -64,17 +63,3 @@ func RelativePathToKey(relativePath string) (BlobHash256Key, error) {
return BlobHash256Key(parts[2]), nil
}
// Save data with specified string key.
func (s *ContentStore) SaveStrBytes(key string, data []byte) error {
return s.Save(BlobHash256Key(key), bytes.NewReader(data), int64(len(data)))
}
// Get data related to provided key.
func (s *ContentStore) GetStrBytes(key string) ([]byte, error) {
obj, err := s.Get(BlobHash256Key(key))
if err != nil {
return nil, err
}
return io.ReadAll(obj)
}

View File

@ -33,7 +33,7 @@ func Push(ctx *context.Context) {
return
}
// Read package to memory and create plain GPG message to validate signature.
// Read package to memory for signature validation.
pkgdata, err := io.ReadAll(ctx.Req.Body)
if err != nil {
apiError(ctx, http.StatusInternalServerError, err)
@ -48,7 +48,7 @@ func Push(ctx *context.Context) {
return
}
// Validate package signature with user's GnuPG key.
// Validate package signature with any of user's GnuPG keys.
err = arch_service.ValidatePackageSignature(ctx, pkgdata, sigdata, user)
if err != nil {
apiError(ctx, http.StatusUnauthorized, err)
@ -149,8 +149,11 @@ func Get(ctx *context.Context) {
apiError(ctx, http.StatusInternalServerError, err)
return
}
ctx.Resp.WriteHeader(http.StatusOK)
return
}
ctx.Resp.WriteHeader(http.StatusNotFound)
}

View File

@ -4,6 +4,7 @@
package arch
import (
"bytes"
"fmt"
"io"
"os"
@ -19,7 +20,8 @@ import (
"github.com/google/uuid"
)
// Get data related to provided file name and distribution.
// Get data related to provided file name and distribution, and update download
// counter if actual package file is retrieved from database.
func LoadPackageFile(ctx *context.Context, distro, file string) ([]byte, error) {
db := db.GetEngine(ctx)
@ -35,6 +37,13 @@ func LoadPackageFile(ctx *context.Context, distro, file string) ([]byte, error)
return nil, err
}
if strings.HasSuffix(file, ".pkg.tar.zst") {
err = pkg_mdl.IncrementDownloadCounter(ctx, pkgfile.VersionID)
if err != nil {
return nil, err
}
}
cs := packages.NewContentStore()
obj, err := cs.Get(packages.BlobHash256Key(blob.HashSHA256))
@ -47,12 +56,13 @@ func LoadPackageFile(ctx *context.Context, distro, file string) ([]byte, error)
// Get data related to pacman database file or symlink.
func LoadPacmanDatabase(ctx *context.Context, owner, distro, architecture, file string) ([]byte, error) {
cs := packages.NewContentStore()
file = strings.TrimPrefix(file, owner+".")
obj, err := cs.Get(packages.BlobHash256Key(arch.Join(owner, distro, architecture, file)))
dbname := strings.TrimSuffix(arch.Join(owner, distro, architecture, file), ".tar.gz")
obj, err := cs.Get(packages.BlobHash256Key(dbname))
if err != nil {
return nil, err
}
@ -72,7 +82,7 @@ func UpdatePacmanDatabases(ctx *context.Context, md *arch.Metadata, distro, owne
defer os.RemoveAll(tmpdir)
// If architecure is not specified or any, package will be automatically
// saved to databases with most popular architectures.
// saved to pacman databases with most popular architectures.
var architectures = md.Arch
if len(md.Arch) == 0 || md.Arch[0] == "any" {
architectures = []string{
@ -83,75 +93,31 @@ func UpdatePacmanDatabases(ctx *context.Context, md *arch.Metadata, distro, owne
cs := packages.NewContentStore()
// Update pacman database files for each architecture.
for _, architecture := range architectures {
var (
db = arch.Join(owner, distro, architecture, setting.Domain, "db.tar.gz")
dbpth = path.Join(tmpdir, db)
dbf = path.Join(tmpdir, db) + ".folder"
sbsl = strings.TrimSuffix(db, ".tar.gz")
slpth = path.Join(tmpdir, sbsl)
)
db := arch.Join(owner, distro, architecture, setting.Domain, "db")
dbkey := packages.BlobHash256Key(db)
// Get existing pacman database, or create empty folder for it.
dbdata, err := cs.GetStrBytes(db)
if err == nil {
err = os.WriteFile(dbpth, dbdata, os.ModePerm)
o, err := cs.Get(dbkey)
if err != nil {
return err
}
err = arch.UnpackDb(dbpth, dbf)
data, err := io.ReadAll(o)
if err != nil {
return err
}
}
udata, err := arch.UpdatePacmanDbEntry(data, md)
if err != nil {
err = os.MkdirAll(dbf, os.ModePerm)
return err
}
err = cs.Save(dbkey, bytes.NewReader(udata), int64(len(udata)))
if err != nil {
return err
}
}
// Update database folder with metadata for new package.
err = md.PutToDb(dbf, os.ModePerm)
if err != nil {
return err
}
// Create database archive and related symlink.
err = arch.PackDb(dbf, dbpth)
if err != nil {
return err
}
// Save database file.
f, err := os.Open(dbpth)
if err != nil {
return err
}
defer f.Close()
dbfi, err := f.Stat()
if err != nil {
return err
}
err = cs.Save(packages.BlobHash256Key(db), f, dbfi.Size())
if err != nil {
return err
}
// Save database symlink file.
f, err = os.Open(slpth)
if err != nil {
return err
}
defer f.Close()
dbarchivefi, err := f.Stat()
if err != nil {
return err
}
err = cs.Save(packages.BlobHash256Key(sbsl), f, dbarchivefi.Size())
if err != nil {
return err
}
}
return nil
}