Merge branch 'main' into lunny/commit_status_webhook

This commit is contained in:
Lunny Xiao 2024-06-13 09:31:22 +08:00 committed by GitHub
commit 5e39456e42
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
497 changed files with 6987 additions and 3553 deletions

View File

@ -10,7 +10,8 @@
"ghcr.io/devcontainers-contrib/features/poetry:2": {},
"ghcr.io/devcontainers/features/python:1": {
"version": "3.12"
}
},
"ghcr.io/warrenbuckley/codespace-features/sqlite:1": {}
},
"customizations": {
"vscode": {
@ -25,8 +26,9 @@
"Vue.volar",
"ms-azuretools.vscode-docker",
"vitest.explorer",
"qwtel.sqlite-viewer",
"GitHub.vscode-pull-request-github"
"cweijan.vscode-database-client2",
"GitHub.vscode-pull-request-github",
"Azurite.azurite"
]
}
},

View File

@ -324,7 +324,7 @@ rules:
jquery/no-sizzle: [2]
jquery/no-slide: [2]
jquery/no-submit: [2]
jquery/no-text: [0]
jquery/no-text: [2]
jquery/no-toggle: [2]
jquery/no-trigger: [0]
jquery/no-trim: [2]
@ -477,7 +477,7 @@ rules:
no-jquery/no-slide: [2]
no-jquery/no-sub: [2]
no-jquery/no-support: [2]
no-jquery/no-text: [0]
no-jquery/no-text: [2]
no-jquery/no-trigger: [0]
no-jquery/no-trim: [2]
no-jquery/no-type: [2]
@ -798,7 +798,7 @@ rules:
unicorn/prefer-object-has-own: [0]
unicorn/prefer-optional-catch-binding: [2]
unicorn/prefer-prototype-methods: [0]
unicorn/prefer-query-selector: [0]
unicorn/prefer-query-selector: [2]
unicorn/prefer-reflect-apply: [0]
unicorn/prefer-regexp-test: [2]
unicorn/prefer-set-has: [0]

View File

@ -3,7 +3,7 @@
<!--
1. Please speak English, this is the language all maintainers can speak and write.
2. Please ask questions or configuration/deploy problems on our Discord
server (https://discord.gg/gitea) or forum (https://discourse.gitea.io).
server (https://discord.gg/gitea) or forum (https://forum.gitea.com).
3. Please take a moment to check that your issue doesn't already exist.
4. Make sure it's not mentioned in the FAQ (https://docs.gitea.com/help/faq)
5. Please give all relevant information below for bug reports, because
@ -21,7 +21,7 @@
- [ ] MySQL
- [ ] MSSQL
- [ ] SQLite
- Can you reproduce the bug at https://try.gitea.io:
- Can you reproduce the bug at https://demo.gitea.com:
- [ ] Yes (provide example URL)
- [ ] No
- Log gist:

View File

@ -37,7 +37,7 @@ body:
label: Can you reproduce the bug on the Gitea demo site?
description: |
If so, please provide a URL in the Description field
URL of Gitea demo: https://try.gitea.io
URL of Gitea demo: https://demo.gitea.com
options:
- "Yes"
- "No"
@ -74,7 +74,7 @@ body:
attributes:
label: How are you running Gitea?
description: |
Please include information on whether you built Gitea yourself, used one of our downloads, are using https://try.gitea.io or are using some other package
Please include information on whether you built Gitea yourself, used one of our downloads, are using https://demo.gitea.com or are using some other package
Please also tell us how you are running Gitea, e.g. if it is being run from docker, a command-line, systemd etc.
If you are using a package or systemd tell us what distribution you are using
validations:

View File

@ -46,7 +46,7 @@ body:
label: Can you reproduce the bug on the Gitea demo site?
description: |
If so, please provide a URL in the Description field
URL of Gitea demo: https://try.gitea.io
URL of Gitea demo: https://demo.gitea.com
options:
- "Yes"
- "No"

View File

@ -119,6 +119,10 @@ jobs:
MINIO_SECRET_KEY: 12345678
ports:
- "9000:9000"
devstoreaccount1.azurite.local: # https://github.com/Azure/Azurite/issues/1583
image: mcr.microsoft.com/azure-storage/azurite:latest
ports:
- 10000:10000
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
@ -126,7 +130,7 @@ jobs:
go-version-file: go.mod
check-latest: true
- name: Add hosts to /etc/hosts
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 mysql elasticsearch meilisearch smtpimap" | sudo tee -a /etc/hosts'
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 minio devstoreaccount1.azurite.local mysql elasticsearch meilisearch smtpimap" | sudo tee -a /etc/hosts'
- run: make deps-backend
- run: make backend
env:
@ -204,6 +208,10 @@ jobs:
SA_PASSWORD: MwantsaSecurePassword1
ports:
- "1433:1433"
devstoreaccount1.azurite.local: # https://github.com/Azure/Azurite/issues/1583
image: mcr.microsoft.com/azure-storage/azurite:latest
ports:
- 10000:10000
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
@ -211,7 +219,7 @@ jobs:
go-version-file: go.mod
check-latest: true
- name: Add hosts to /etc/hosts
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 mssql" | sudo tee -a /etc/hosts'
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 mssql devstoreaccount1.azurite.local" | sudo tee -a /etc/hosts'
- run: make deps-backend
- run: make backend
env:

View File

@ -47,7 +47,7 @@ jobs:
run: |
REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\///' -e 's/release\/v//')
echo "Cleaned name is ${REF_NAME}"
echo "branch=${REF_NAME}" >> "$GITHUB_OUTPUT"
echo "branch=${REF_NAME}-nightly" >> "$GITHUB_OUTPUT"
- name: configure aws
uses: aws-actions/configure-aws-credentials@v4
with:

View File

@ -43,7 +43,7 @@ vscode:
- Vue.volar
- ms-azuretools.vscode-docker
- vitest.explorer
- qwtel.sqlite-viewer
- cweijan.vscode-database-client2
- GitHub.vscode-pull-request-github
ports:

View File

@ -22,6 +22,7 @@ linters:
- typecheck
- unconvert
- unused
- unparam
- wastedassign
run:
@ -29,6 +30,8 @@ run:
output:
sort-results: true
sort-order: [file]
show-stats: true
linters-settings:
stylecheck:
@ -40,11 +43,7 @@ linters-settings:
- ifElseChain
- singleCaseSwitch # Every time this occurred in the code, there was no other way.
revive:
ignore-generated-header: false
severity: warning
confidence: 0.8
errorCode: 1
warningCode: 1
severity: error
rules:
- name: atomic
- name: bare-return

View File

@ -77,7 +77,7 @@ If your issue has not been reported yet, [open an issue](https://github.com/go-g
and answer the questions so we can understand and reproduce the problematic behavior. \
Please write clear and concise instructions so that we can reproduce the behavior — even if it seems obvious. \
The more detailed and specific you are, the faster we can fix the issue. \
It is really helpful if you can reproduce your problem on a site running on the latest commits, i.e. <https://try.gitea.io>, as perhaps your problem has already been fixed on a current version. \
It is really helpful if you can reproduce your problem on a site running on the latest commits, i.e. <https://demo.gitea.com>, as perhaps your problem has already been fixed on a current version. \
Please follow the guidelines described in [How to Report Bugs Effectively](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) for your report.
Please be kind, remember that Gitea comes at no cost to you, and you're getting free help.
@ -362,7 +362,7 @@ If you add a new feature or change an existing aspect of Gitea, the documentatio
## API v1
The API is documented by [swagger](http://try.gitea.io/api/swagger) and is based on [the GitHub API](https://docs.github.com/en/rest).
The API is documented by [swagger](https://gitea.com/api/swagger) and is based on [the GitHub API](https://docs.github.com/en/rest).
### GitHub API compatibility

View File

@ -1,5 +1,5 @@
# Build stage
FROM docker.io/library/golang:1.22-alpine3.19 AS build-env
FROM docker.io/library/golang:1.22-alpine3.20 AS build-env
ARG GOPROXY
ENV GOPROXY ${GOPROXY:-direct}
@ -41,7 +41,7 @@ RUN chmod 755 /tmp/local/usr/bin/entrypoint \
/go/src/code.gitea.io/gitea/environment-to-ini
RUN chmod 644 /go/src/code.gitea.io/gitea/contrib/autocompletion/bash_autocomplete
FROM docker.io/library/alpine:3.19
FROM docker.io/library/alpine:3.20
LABEL maintainer="maintainers@gitea.io"
EXPOSE 22 3000

View File

@ -1,5 +1,5 @@
# Build stage
FROM docker.io/library/golang:1.22-alpine3.19 AS build-env
FROM docker.io/library/golang:1.22-alpine3.20 AS build-env
ARG GOPROXY
ENV GOPROXY ${GOPROXY:-direct}
@ -39,7 +39,7 @@ RUN chmod 755 /tmp/local/usr/local/bin/docker-entrypoint.sh \
/go/src/code.gitea.io/gitea/environment-to-ini
RUN chmod 644 /go/src/code.gitea.io/gitea/contrib/autocompletion/bash_autocomplete
FROM docker.io/library/alpine:3.19
FROM docker.io/library/alpine:3.20
LABEL maintainer="maintainers@gitea.io"
EXPOSE 2222 3000

View File

@ -25,10 +25,10 @@ COMMA := ,
XGO_VERSION := go-1.22.x
AIR_PACKAGE ?= github.com/cosmtrek/air@v1
AIR_PACKAGE ?= github.com/air-verse/air@v1
EDITORCONFIG_CHECKER_PACKAGE ?= github.com/editorconfig-checker/editorconfig-checker/cmd/editorconfig-checker@2.7.0
GOFUMPT_PACKAGE ?= mvdan.cc/gofumpt@v0.6.0
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.0
GXZ_PACKAGE ?= github.com/ulikunitz/xz/cmd/gxz@v0.5.11
MISSPELL_PACKAGE ?= github.com/golangci/misspell/cmd/misspell@v0.5.1
SWAGGER_PACKAGE ?= github.com/go-swagger/go-swagger/cmd/swagger@db51e79a0e37c572d8b59ae0c58bf2bbbbe53285
@ -36,6 +36,7 @@ XGO_PACKAGE ?= src.techknowlogick.com/xgo@latest
GO_LICENSES_PACKAGE ?= github.com/google/go-licenses@v1
GOVULNCHECK_PACKAGE ?= golang.org/x/vuln/cmd/govulncheck@v1
ACTIONLINT_PACKAGE ?= github.com/rhysd/actionlint/cmd/actionlint@v1
GOPLS_PACKAGE ?= golang.org/x/tools/gopls@v0.15.3
DOCKER_IMAGE ?= gitea/gitea
DOCKER_TAG ?= latest
@ -88,7 +89,7 @@ ifneq ($(GITHUB_REF_TYPE),branch)
GITEA_VERSION ?= $(VERSION)
else
ifneq ($(GITHUB_REF_NAME),)
VERSION ?= $(subst release/v,,$(GITHUB_REF_NAME))
VERSION ?= $(subst release/v,,$(GITHUB_REF_NAME))-nightly
else
VERSION ?= main
endif
@ -213,6 +214,7 @@ help:
@echo " - lint-go lint go files"
@echo " - lint-go-fix lint go files and fix issues"
@echo " - lint-go-vet lint go files with vet"
@echo " - lint-go-gopls lint go files with gopls"
@echo " - lint-js lint js files"
@echo " - lint-js-fix lint js files and fix issues"
@echo " - lint-css lint css files"
@ -366,7 +368,7 @@ lint-frontend: lint-js lint-css
lint-frontend-fix: lint-js-fix lint-css-fix
.PHONY: lint-backend
lint-backend: lint-go lint-go-vet lint-editorconfig
lint-backend: lint-go lint-go-vet lint-go-gopls lint-editorconfig
.PHONY: lint-backend-fix
lint-backend-fix: lint-go-fix lint-go-vet lint-editorconfig
@ -424,6 +426,11 @@ lint-go-vet:
@GOOS= GOARCH= $(GO) build code.gitea.io/gitea-vet
@$(GO) vet -vettool=gitea-vet ./...
.PHONY: lint-go-gopls
lint-go-gopls:
@echo "Running gopls check..."
@GO=$(GO) GOPLS_PACKAGE=$(GOPLS_PACKAGE) tools/lint-go-gopls.sh $(GO_SOURCES_NO_BINDATA)
.PHONY: lint-editorconfig
lint-editorconfig:
@$(GO) run $(EDITORCONFIG_CHECKER_PACKAGE) $(EDITORCONFIG_FILES)
@ -864,13 +871,14 @@ deps-tools:
$(GO) install $(GO_LICENSES_PACKAGE)
$(GO) install $(GOVULNCHECK_PACKAGE)
$(GO) install $(ACTIONLINT_PACKAGE)
$(GO) install $(GOPLS_PACKAGE)
node_modules: package-lock.json
npm install --no-save
@touch node_modules
.venv: poetry.lock
poetry install --no-root
poetry install
@touch .venv
.PHONY: update
@ -887,7 +895,7 @@ update-js: node-check | node_modules
update-py: node-check | node_modules
npx updates -u -f pyproject.toml
rm -rf .venv poetry.lock
poetry install --no-root
poetry install
@touch .venv
.PHONY: fomantic

View File

@ -26,7 +26,7 @@ This project has been
[forked](https://blog.gitea.com/welcome-to-gitea/) from
[Gogs](https://gogs.io) since November of 2016, but a lot has changed.
For online demonstrations, you can visit [try.gitea.io](https://try.gitea.io).
For online demonstrations, you can visit [demo.gitea.com](https://demo.gitea.com).
For accessing free Gitea service (with a limited number of repositories), you can visit [gitea.com](https://gitea.com/user/login).
@ -56,7 +56,7 @@ More info: https://docs.gitea.com/installation/install-from-source
./gitea web
> [!NOTE]
> If you're interested in using our APIs, we have experimental support with [documentation](https://try.gitea.io/api/swagger).
> If you're interested in using our APIs, we have experimental support with [documentation](https://docs.gitea.com/api).
## Contributing
@ -80,7 +80,7 @@ https://docs.gitea.com/contributing/localization
## Further information
For more information and instructions about how to install Gitea, please look at our [documentation](https://docs.gitea.com/).
If you have questions that are not covered by the documentation, you can get in contact with us on our [Discord server](https://discord.gg/Gitea) or create a post in the [discourse forum](https://discourse.gitea.io/).
If you have questions that are not covered by the documentation, you can get in contact with us on our [Discord server](https://discord.gg/Gitea) or create a post in the [discourse forum](https://forum.gitea.com/).
We maintain a list of Gitea-related projects at [gitea/awesome-gitea](https://gitea.com/gitea/awesome-gitea).

View File

@ -18,7 +18,7 @@
Gitea 的首要目标是创建一个极易安装,运行非常快速,安装和使用体验良好的自建 Git 服务。我们采用 Go 作为后端语言,这使我们只要生成一个可执行程序即可。并且他还支持跨平台,支持 Linux, macOS 和 Windows 以及各种架构,除了 x86amd64还包括 ARM 和 PowerPC。
如果你想试用在线演示,请访问 [try.gitea.io](https://try.gitea.io/)。
如果你想试用在线演示和报告问题,请访问 [demo.gitea.com](https://demo.gitea.com/)。
如果你想使用免费的 Gitea 服务(有仓库数量限制),请访问 [gitea.com](https://gitea.com/user/login)。

File diff suppressed because one or more lines are too long

View File

@ -5,7 +5,9 @@ package cmd
import (
"context"
"errors"
"fmt"
"io/fs"
"strings"
actions_model "code.gitea.io/gitea/models/actions"
@ -40,7 +42,7 @@ var CmdMigrateStorage = &cli.Command{
Name: "storage",
Aliases: []string{"s"},
Value: "",
Usage: "New storage type: local (default) or minio",
Usage: "New storage type: local (default), minio or azureblob",
},
&cli.StringFlag{
Name: "path",
@ -48,6 +50,7 @@ var CmdMigrateStorage = &cli.Command{
Value: "",
Usage: "New storage placement if store is local (leave blank for default)",
},
// Minio Storage special configurations
&cli.StringFlag{
Name: "minio-endpoint",
Value: "",
@ -91,6 +94,37 @@ var CmdMigrateStorage = &cli.Command{
Value: "",
Usage: "Minio checksum algorithm (default/md5)",
},
&cli.StringFlag{
Name: "minio-bucket-lookup-type",
Value: "",
Usage: "Minio bucket lookup type",
},
// Azure Blob Storage special configurations
&cli.StringFlag{
Name: "azureblob-endpoint",
Value: "",
Usage: "Azure Blob storage endpoint",
},
&cli.StringFlag{
Name: "azureblob-account-name",
Value: "",
Usage: "Azure Blob storage account name",
},
&cli.StringFlag{
Name: "azureblob-account-key",
Value: "",
Usage: "Azure Blob storage account key",
},
&cli.StringFlag{
Name: "azureblob-container",
Value: "",
Usage: "Azure Blob storage container",
},
&cli.StringFlag{
Name: "azureblob-base-path",
Value: "",
Usage: "Azure Blob storage base path",
},
},
}
@ -162,8 +196,20 @@ func migrateActionsLog(ctx context.Context, dstStorage storage.ObjectStorage) er
func migrateActionsArtifacts(ctx context.Context, dstStorage storage.ObjectStorage) error {
return db.Iterate(ctx, nil, func(ctx context.Context, artifact *actions_model.ActionArtifact) error {
_, err := storage.Copy(dstStorage, artifact.ArtifactPath, storage.ActionsArtifacts, artifact.ArtifactPath)
return err
if artifact.Status == int64(actions_model.ArtifactStatusExpired) {
return nil
}
_, err := storage.Copy(dstStorage, artifact.StoragePath, storage.ActionsArtifacts, artifact.StoragePath)
if err != nil {
// ignore files that do not exist
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}
return nil
})
}
@ -220,6 +266,19 @@ func runMigrateStorage(ctx *cli.Context) error {
UseSSL: ctx.Bool("minio-use-ssl"),
InsecureSkipVerify: ctx.Bool("minio-insecure-skip-verify"),
ChecksumAlgorithm: ctx.String("minio-checksum-algorithm"),
BucketLookUpType: ctx.String("minio-bucket-lookup-type"),
},
})
case string(setting.AzureBlobStorageType):
dstStorage, err = storage.NewAzureBlobStorage(
stdCtx,
&setting.Storage{
AzureBlobConfig: setting.AzureBlobStorageConfig{
Endpoint: ctx.String("azureblob-endpoint"),
AccountName: ctx.String("azureblob-account-name"),
AccountKey: ctx.String("azureblob-account-key"),
Container: ctx.String("azureblob-container"),
BasePath: ctx.String("azureblob-base-path"),
},
})
default:

View File

@ -1334,6 +1334,9 @@ LEVEL = Info
;;
;; Maximum allowed file size in bytes to render CSV files as table. (Set to 0 for no limit).
;MAX_FILE_SIZE = 524288
;;
;; Maximum allowed rows to render CSV files. (Set to 0 for no limit)
;MAX_ROWS = 2500
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@ -1687,6 +1690,16 @@ LEVEL = Info
;; convert \r\n to \n for Sendmail
;SENDMAIL_CONVERT_CRLF = true
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;[mailer.override_header]
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; This is empty by default, use it only if you know what you need it for.
;Reply-To = test@example.com, test2@example.com
;Content-Type = text/html; charset=utf-8
;In-Reply-To =
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;[email.incoming]
@ -1862,7 +1875,7 @@ LEVEL = Info
;STORAGE_TYPE = local
;;
;; Allows the storage driver to redirect to authenticated URLs to serve files directly
;; Currently, only `minio` is supported.
;; Currently, only `minio` and `azureblob` is supported.
;SERVE_DIRECT = false
;;
;; Path for attachments. Defaults to `attachments`. Only available when STORAGE_TYPE is `local`
@ -1872,7 +1885,10 @@ LEVEL = Info
;; Minio endpoint to connect only available when STORAGE_TYPE is `minio`
;MINIO_ENDPOINT = localhost:9000
;;
;; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`
;; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`.
;; If not provided and STORAGE_TYPE is `minio`, will search for credentials in known
;; environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files
;; (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
;MINIO_ACCESS_KEY_ID =
;;
;; Minio secretAccessKey to connect only available when STORAGE_TYPE is `minio`
@ -1895,6 +1911,24 @@ LEVEL = Info
;;
;; Minio checksum algorithm: default (for MinIO or AWS S3) or md5 (for Cloudflare or Backblaze)
;MINIO_CHECKSUM_ALGORITHM = default
;;
;; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
;MINIO_BUCKET_LOOKUP_TYPE = auto
;; Azure Blob endpoint to connect only available when STORAGE_TYPE is `azureblob`,
;; e.g. https://accountname.blob.core.windows.net or http://127.0.0.1:10000/devstoreaccount1
;AZURE_BLOB_ENDPOINT =
;;
;; Azure Blob account name to connect only available when STORAGE_TYPE is `azureblob`
;AZURE_BLOB_ACCOUNT_NAME =
;;
;; Azure Blob account key to connect only available when STORAGE_TYPE is `azureblob`
;AZURE_BLOB_ACCOUNT_KEY =
;;
;; Azure Blob container to store the attachments only available when STORAGE_TYPE is `azureblob`
;AZURE_BLOB_CONTAINER = gitea
;;
;; override the azure blob base path if storage type is azureblob
;AZURE_BLOB_BASE_PATH = attachments/
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@ -2033,6 +2067,17 @@ LEVEL = Info
;; or only create new users if UPDATE_EXISTING is set to false
;UPDATE_EXISTING = true
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Cleanup expired actions assets
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;[cron.cleanup_actions]
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;ENABLED = true
;RUN_AT_START = true
;SCHEDULE = @midnight
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Clean-up deleted branches
@ -2443,6 +2488,11 @@ LEVEL = Info
;STORAGE_TYPE = local
;; override the minio base path if storage type is minio
;MINIO_BASE_PATH = packages/
;; override the azure blob base path if storage type is azureblob
;AZURE_BLOB_BASE_PATH = packages/
;; Allows the storage driver to redirect to authenticated URLs to serve files directly
;; Currently, only `minio` and `azureblob` is supported.
;SERVE_DIRECT = false
;;
;; Path for chunked uploads. Defaults to APP_DATA_PATH + `tmp/package-upload`
;CHUNKED_UPLOAD_PATH = tmp/package-upload
@ -2516,6 +2566,8 @@ LEVEL = Info
;;
;; override the minio base path if storage type is minio
;MINIO_BASE_PATH = repo-archive/
;; override the azure blob base path if storage type is azureblob
;AZURE_BLOB_BASE_PATH = repo-archive/
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@ -2537,8 +2589,15 @@ LEVEL = Info
;; Where your lfs files reside, default is data/lfs.
;PATH = data/lfs
;;
;; Allows the storage driver to redirect to authenticated URLs to serve files directly
;; Currently, only `minio` and `azureblob` is supported.
;SERVE_DIRECT = false
;;
;; override the minio base path if storage type is minio
;MINIO_BASE_PATH = lfs/
;;
;; override the azure blob base path if storage type is azureblob
;AZURE_BLOB_BASE_PATH = lfs/
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@ -2553,13 +2612,16 @@ LEVEL = Info
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; customize storage
;[storage.my_minio]
;[storage.minio]
;STORAGE_TYPE = minio
;;
;; Minio endpoint to connect only available when STORAGE_TYPE is `minio`
;MINIO_ENDPOINT = localhost:9000
;;
;; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`
;; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`.
;; If not provided and STORAGE_TYPE is `minio`, will search for credentials in known
;; environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files
;; (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
;MINIO_ACCESS_KEY_ID =
;;
;; Minio secretAccessKey to connect only available when STORAGE_TYPE is `minio`
@ -2576,6 +2638,25 @@ LEVEL = Info
;;
;; Minio skip SSL verification available when STORAGE_TYPE is `minio`
;MINIO_INSECURE_SKIP_VERIFY = false
;;
;; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
;MINIO_BUCKET_LOOKUP_TYPE = auto
;[storage.azureblob]
;STORAGE_TYPE = azureblob
;;
;; Azure Blob endpoint to connect only available when STORAGE_TYPE is `azureblob`,
;; e.g. https://accountname.blob.core.windows.net or http://127.0.0.1:10000/devstoreaccount1
;AZURE_BLOB_ENDPOINT =
;;
;; Azure Blob account name to connect only available when STORAGE_TYPE is `azureblob`
;AZURE_BLOB_ACCOUNT_NAME =
;;
;; Azure Blob account key to connect only available when STORAGE_TYPE is `azureblob`
;AZURE_BLOB_ACCOUNT_KEY =
;;
;; Azure Blob container to store the attachments only available when STORAGE_TYPE is `azureblob`
;AZURE_BLOB_CONTAINER = gitea
;[proxy]
;; Enable the proxy, all requests to external via HTTP will be affected

View File

@ -1,8 +1,5 @@
# Gitea: Docs
[![Join the chat at https://img.shields.io/discord/322538954119184384.svg](https://img.shields.io/discord/322538954119184384.svg)](https://discord.gg/Gitea)
[![](https://images.microbadger.com/badges/image/gitea/docs.svg)](http://microbadger.com/images/gitea/docs "Get your own image badge on microbadger.com")
These docs are ingested by our [docs repo](https://gitea.com/gitea/gitea-docusaurus).
## Authors
@ -18,5 +15,5 @@ for the full license text.
## Copyright
```
Copyright (c) 2016 The Gitea Authors <https://gitea.io>
Copyright (c) 2016 The Gitea Authors
```

View File

@ -1,9 +1,5 @@
# Gitea: 文档
[![Build Status](http://drone.gitea.io/api/badges/go-gitea/docs/status.svg)](http://drone.gitea.io/go-gitea/docs)
[![Join the chat at https://img.shields.io/discord/322538954119184384.svg](https://img.shields.io/discord/322538954119184384.svg)](https://discord.gg/Gitea)
[![](https://images.microbadger.com/badges/image/gitea/docs.svg)](http://microbadger.com/images/gitea/docs "Get your own image badge on microbadger.com")
https://gitea.com/gitea/gitea-docusaurus
## 关于我们
@ -18,5 +14,5 @@ https://gitea.com/gitea/gitea-docusaurus
## 版权声明
```
Copyright (c) 2016 The Gitea Authors <https://gitea.io>
Copyright (c) 2016 The Gitea Authors
```

View File

@ -724,11 +724,13 @@ Define allowed algorithms and their minimum key length (use -1 to disable a type
## Mailer (`mailer`)
⚠️ This section is for Gitea 1.18 and later. If you are using Gitea 1.17 or older,
:::warning
This section is for Gitea 1.18 and later. If you are using Gitea 1.17 or older,
please refer to
[Gitea 1.17 app.ini example](https://github.com/go-gitea/gitea/blob/release/v1.17/custom/conf/app.example.ini)
and
[Gitea 1.17 configuration document](https://github.com/go-gitea/gitea/blob/release/v1.17/docs/content/doc/advanced/config-cheat-sheet.en-us.md)
:::
- `ENABLED`: **false**: Enable to use a mail service.
- `PROTOCOL`: **_empty_**: Mail server protocol. One of "smtp", "smtps", "smtp+starttls", "smtp+unix", "sendmail", "dummy". _Before 1.18, this was inferred from a combination of `MAILER_TYPE` and `IS_TLS_ENABLED`._
@ -761,6 +763,21 @@ and
- `SEND_BUFFER_LEN`: **100**: Buffer length of mailing queue. **DEPRECATED** use `LENGTH` in `[queue.mailer]`
- `SEND_AS_PLAIN_TEXT`: **false**: Send mails only in plain text, without HTML alternative.
## Override Email Headers (`mailer.override_header`)
:::warning
This is empty by default, use it only if you know what you need it for.
:::
examples would be:
```ini
[mailer.override_header]
Reply-To = test@example.com, test2@example.com
Content-Type = text/html; charset=utf-8
In-Reply-To =
```
## Incoming Email (`email.incoming`)
- `ENABLED`: **false**: Enable handling of incoming emails.
@ -828,7 +845,7 @@ and
## Project (`project`)
Default templates for project boards:
Default templates for project board view:
- `PROJECT_BOARD_BASIC_KANBAN_TYPE`: **To Do, In Progress, Done**
- `PROJECT_BOARD_BUG_TRIAGE_TYPE`: **Needs Triage, High Priority, Low Priority, Closed**
@ -843,7 +860,7 @@ Default templates for project boards:
- `SERVE_DIRECT`: **false**: Allows the storage driver to redirect to authenticated URLs to serve files directly. Currently, only Minio/S3 is supported via signed URLs, local does nothing.
- `PATH`: **attachments**: Path to store attachments only available when STORAGE_TYPE is `local`, relative paths will be resolved to `${AppDataPath}/${attachment.PATH}`.
- `MINIO_ENDPOINT`: **localhost:9000**: Minio endpoint to connect only available when STORAGE_TYPE is `minio`
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`. If not provided and STORAGE_TYPE is `minio`, will search for credentials in known environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
- `MINIO_SECRET_ACCESS_KEY`: Minio secretAccessKey to connect only available when STORAGE_TYPE is `minio`
- `MINIO_BUCKET`: **gitea**: Minio bucket to store the attachments only available when STORAGE_TYPE is `minio`
- `MINIO_LOCATION`: **us-east-1**: Minio location to create bucket only available when STORAGE_TYPE is `minio`
@ -851,6 +868,7 @@ Default templates for project boards:
- `MINIO_USE_SSL`: **false**: Minio enabled ssl only available when STORAGE_TYPE is `minio`
- `MINIO_INSECURE_SKIP_VERIFY`: **false**: Minio skip SSL verification available when STORAGE_TYPE is `minio`
- `MINIO_CHECKSUM_ALGORITHM`: **default**: Minio checksum algorithm: `default` (for MinIO or AWS S3) or `md5` (for Cloudflare or Backblaze)
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
## Log (`log`)
@ -974,12 +992,20 @@ Default templates for project boards:
- `SCHEDULE`: **@midnight** : Interval as a duration between each synchronization, it will always attempt synchronization when the instance starts.
- `UPDATE_EXISTING`: **true**: Create new users, update existing user data and disable users that are not in external source anymore (default) or only create new users if UPDATE_EXISTING is set to false.
## Cron - Cleanup Expired Actions Assets (`cron.cleanup_actions`)
#### Cron - Cleanup Expired Actions Assets (`cron.cleanup_actions`)
- `ENABLED`: **true**: Enable cleanup expired actions assets job.
- `RUN_AT_START`: **true**: Run job at start time (if ENABLED).
- `SCHEDULE`: **@midnight** : Cron syntax for the job.
#### Cron - Cleanup Deleted Branches (`cron.deleted_branches_cleanup`)
- `ENABLED`: **true**: Enable deleted branches cleanup.
- `RUN_AT_START`: **true**: Run job at start time (if ENABLED).
- `NOTICE_ON_SUCCESS`: **false**: Set to true to log a success message.
- `SCHEDULE`: **@midnight**: Cron syntax for scheduling deleted branches cleanup.
- `OLDER_THAN`: **24h**: Branches deleted OLDER_THAN ago will be cleaned up.
### Extended cron tasks (not enabled by default)
#### Cron - Garbage collect all repositories (`cron.git_gc_repos`)
@ -1265,27 +1291,35 @@ is `data/lfs` and the default of `MINIO_BASE_PATH` is `lfs/`.
- `SERVE_DIRECT`: **false**: Allows the storage driver to redirect to authenticated URLs to serve files directly. Currently, only Minio/S3 is supported via signed URLs, local does nothing.
- `PATH`: **./data/lfs**: Where to store LFS files, only available when `STORAGE_TYPE` is `local`. If not set it fall back to deprecated LFS_CONTENT_PATH value in [server] section.
- `MINIO_ENDPOINT`: **localhost:9000**: Minio endpoint to connect only available when `STORAGE_TYPE` is `minio`
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when `STORAGE_TYPE` is `minio`
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`. If not provided and STORAGE_TYPE is `minio`, will search for credentials in known environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
- `MINIO_SECRET_ACCESS_KEY`: Minio secretAccessKey to connect only available when `STORAGE_TYPE is` `minio`
- `MINIO_BUCKET`: **gitea**: Minio bucket to store the lfs only available when `STORAGE_TYPE` is `minio`
- `MINIO_LOCATION`: **us-east-1**: Minio location to create bucket only available when `STORAGE_TYPE` is `minio`
- `MINIO_BASE_PATH`: **lfs/**: Minio base path on the bucket only available when `STORAGE_TYPE` is `minio`
- `MINIO_USE_SSL`: **false**: Minio enabled ssl only available when `STORAGE_TYPE` is `minio`
- `MINIO_INSECURE_SKIP_VERIFY`: **false**: Minio skip SSL verification available when STORAGE_TYPE is `minio`
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
## Storage (`storage`)
Default storage configuration for attachments, lfs, avatars, repo-avatars, repo-archive, packages, actions_log, actions_artifact.
- `STORAGE_TYPE`: **local**: Storage type, `local` for local disk or `minio` for s3 compatible object storage service.
- `STORAGE_TYPE`: **local**: Storage type, `local` for local disk, `minio` for s3 compatible object storage service, `azureblob` for azure blob storage service.
- `SERVE_DIRECT`: **false**: Allows the storage driver to redirect to authenticated URLs to serve files directly. Currently, only Minio/S3 is supported via signed URLs, local does nothing.
- `MINIO_ENDPOINT`: **localhost:9000**: Minio endpoint to connect only available when `STORAGE_TYPE` is `minio`
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when `STORAGE_TYPE` is `minio`
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`. If not provided and STORAGE_TYPE is `minio`, will search for credentials in known environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
- `MINIO_SECRET_ACCESS_KEY`: Minio secretAccessKey to connect only available when `STORAGE_TYPE is` `minio`
- `MINIO_BUCKET`: **gitea**: Minio bucket to store the data only available when `STORAGE_TYPE` is `minio`
- `MINIO_LOCATION`: **us-east-1**: Minio location to create bucket only available when `STORAGE_TYPE` is `minio`
- `MINIO_USE_SSL`: **false**: Minio enabled ssl only available when `STORAGE_TYPE` is `minio`
- `MINIO_INSECURE_SKIP_VERIFY`: **false**: Minio skip SSL verification available when STORAGE_TYPE is `minio`
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
- `AZURE_BLOB_ENDPOINT`: **_empty_**: Azure Blob endpoint to connect only available when STORAGE_TYPE is `azureblob`,
e.g. https://accountname.blob.core.windows.net or http://127.0.0.1:10000/devstoreaccount1
- `AZURE_BLOB_ACCOUNT_NAME`: **_empty_**: Azure Blob account name to connect only available when STORAGE_TYPE is `azureblob`
- `AZURE_BLOB_ACCOUNT_KEY`: **_empty_**: Azure Blob account key to connect only available when STORAGE_TYPE is `azureblob`
- `AZURE_BLOB_CONTAINER`: **gitea**: Azure Blob container to store the data only available when STORAGE_TYPE is `azureblob`
The recommended storage configuration for minio like below:
@ -1294,7 +1328,10 @@ The recommended storage configuration for minio like below:
STORAGE_TYPE = minio
; Minio endpoint to connect only available when STORAGE_TYPE is `minio`
MINIO_ENDPOINT = localhost:9000
; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`
; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`.
; If not provided and STORAGE_TYPE is `minio`, will search for credentials in known
; environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files
; (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
MINIO_ACCESS_KEY_ID =
; Minio secretAccessKey to connect only available when STORAGE_TYPE is `minio`
MINIO_SECRET_ACCESS_KEY =
@ -1307,6 +1344,8 @@ MINIO_USE_SSL = false
; Minio skip SSL verification available when STORAGE_TYPE is `minio`
MINIO_INSECURE_SKIP_VERIFY = false
SERVE_DIRECT = true
; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
MINIO_BUCKET_LOOKUP_TYPE = auto
```
Defaultly every storage has their default base path like below
@ -1341,7 +1380,10 @@ STORAGE_TYPE = my_minio
STORAGE_TYPE = minio
; Minio endpoint to connect only available when STORAGE_TYPE is `minio`
MINIO_ENDPOINT = localhost:9000
; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`
; Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`.
; If not provided and STORAGE_TYPE is `minio`, will search for credentials in known
; environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files
; (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
MINIO_ACCESS_KEY_ID =
; Minio secretAccessKey to connect only available when STORAGE_TYPE is `minio`
MINIO_SECRET_ACCESS_KEY =
@ -1353,6 +1395,8 @@ MINIO_LOCATION = us-east-1
MINIO_USE_SSL = false
; Minio skip SSL verification available when STORAGE_TYPE is `minio`
MINIO_INSECURE_SKIP_VERIFY = false
; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
MINIO_BUCKET_LOOKUP_TYPE = auto
```
## Repository Archive Storage (`storage.repo-archive`)
@ -1365,13 +1409,14 @@ is `data/repo-archive` and the default of `MINIO_BASE_PATH` is `repo-archive/`.
- `SERVE_DIRECT`: **false**: Allows the storage driver to redirect to authenticated URLs to serve files directly. Currently, only Minio/S3 is supported via signed URLs, local does nothing.
- `PATH`: **./data/repo-archive**: Where to store archive files, only available when `STORAGE_TYPE` is `local`.
- `MINIO_ENDPOINT`: **localhost:9000**: Minio endpoint to connect only available when `STORAGE_TYPE` is `minio`
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when `STORAGE_TYPE` is `minio`
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`. If not provided and STORAGE_TYPE is `minio`, will search for credentials in known environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
- `MINIO_SECRET_ACCESS_KEY`: Minio secretAccessKey to connect only available when `STORAGE_TYPE is` `minio`
- `MINIO_BUCKET`: **gitea**: Minio bucket to store the lfs only available when `STORAGE_TYPE` is `minio`
- `MINIO_LOCATION`: **us-east-1**: Minio location to create bucket only available when `STORAGE_TYPE` is `minio`
- `MINIO_BASE_PATH`: **repo-archive/**: Minio base path on the bucket only available when `STORAGE_TYPE` is `minio`
- `MINIO_USE_SSL`: **false**: Minio enabled ssl only available when `STORAGE_TYPE` is `minio`
- `MINIO_INSECURE_SKIP_VERIFY`: **false**: Minio skip SSL verification available when STORAGE_TYPE is `minio`
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
## Repository Archives (`repo-archive`)

View File

@ -796,6 +796,7 @@ Gitea 创建以下非唯一队列:
- `MINIO_USE_SSL`: **false**: Minio 启用 SSL仅当 STORAGE_TYPE 为 `minio` 时可用。
- `MINIO_INSECURE_SKIP_VERIFY`: **false**: Minio 跳过 SSL 验证,仅当 STORAGE_TYPE 为 `minio` 时可用。
- `MINIO_CHECKSUM_ALGORITHM`: **default**: Minio 校验算法:`default`(适用于 MinIO 或 AWS S3`md5`(适用于 Cloudflare 或 Backblaze
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio的bucket查找方式默认为`auto`模式,可将其设置为`dns`(虚拟托管样式)或`path`(路径样式),仅当`STORAGE_TYPE`为`minio`时可用。
## 日志 (`log`)
@ -1201,12 +1202,13 @@ ALLOW_DATA_URI_IMAGES = true
- `MINIO_BASE_PATH`**lfs/**:桶上的 Minio 基本路径,仅在 `STORAGE_TYPE``minio` 时可用。
- `MINIO_USE_SSL`**false**Minio 启用 ssl仅在 `STORAGE_TYPE``minio` 时可用。
- `MINIO_INSECURE_SKIP_VERIFY`**false**Minio 跳过 SSL 验证,仅在 `STORAGE_TYPE``minio` 时可用。
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio的bucket查找方式默认为`auto`模式,可将其设置为`dns`(虚拟托管样式)或`path`(路径样式),仅当`STORAGE_TYPE`为`minio`时可用。
## 存储 (`storage`)
默认的附件、lfs、头像、仓库头像、仓库归档、软件包、操作日志、操作艺术品的存储配置。
- `STORAGE_TYPE`**local**:存储类型,`local` 表示本地磁盘,`minio` 表示 S3 兼容的对象存储服务
- `STORAGE_TYPE`**local**:存储类型,`local` 表示本地磁盘,`minio` 表示 S3`azureblob` 表示 azure 对象存储
- `SERVE_DIRECT`**false**:允许存储驱动程序重定向到经过身份验证的 URL 以直接提供文件。目前,仅支持通过签名的 URL 提供 Minio/S3本地不执行任何操作。
- `MINIO_ENDPOINT`**localhost:9000**:连接的 Minio 终端点,仅在 `STORAGE_TYPE``minio` 时可用。
- `MINIO_ACCESS_KEY_ID`Minio 的 accessKeyID仅在 `STORAGE_TYPE``minio` 时可用。
@ -1215,6 +1217,12 @@ ALLOW_DATA_URI_IMAGES = true
- `MINIO_LOCATION`**us-east-1**:创建桶的 Minio 位置,仅在 `STORAGE_TYPE``minio` 时可用。
- `MINIO_USE_SSL`**false**Minio 启用 ssl仅在 `STORAGE_TYPE``minio` 时可用。
- `MINIO_INSECURE_SKIP_VERIFY`**false**Minio 跳过 SSL 验证,仅在 `STORAGE_TYPE``minio` 时可用。
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio的bucket查找方式默认为`auto`模式,可将其设置为`dns`(虚拟托管样式)或`path`(路径样式),仅当`STORAGE_TYPE`为`minio`时可用。
- `AZURE_BLOB_ENDPOINT`: **_empty_**: Azure Blob 终端点,仅在 `STORAGE_TYPE``azureblob` 时可用。例如https://accountname.blob.core.windows.net 或 http://127.0.0.1:10000/devstoreaccount1
- `AZURE_BLOB_ACCOUNT_NAME`: **_empty_**: Azure Blob 账号名,仅在 `STORAGE_TYPE``azureblob` 时可用。
- `AZURE_BLOB_ACCOUNT_KEY`: **_empty_**: Azure Blob 访问密钥,仅在 `STORAGE_TYPE``azureblob` 时可用。
- `AZURE_BLOB_CONTAINER`: **gitea**: 用于存储数据的 Azure Blob 容器名,仅在 `STORAGE_TYPE``azureblob` 时可用。
建议的 minio 存储配置如下:
@ -1236,6 +1244,8 @@ MINIO_USE_SSL = false
; Minio skip SSL verification available when STORAGE_TYPE is `minio`
MINIO_INSECURE_SKIP_VERIFY = false
SERVE_DIRECT = true
; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
MINIO_BUCKET_LOOKUP_TYPE = auto
```
默认情况下,每个存储都有其默认的基本路径,如下所示:
@ -1282,6 +1292,8 @@ MINIO_LOCATION = us-east-1
MINIO_USE_SSL = false
; Minio skip SSL verification available when STORAGE_TYPE is `minio`
MINIO_INSECURE_SKIP_VERIFY = false
; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
MINIO_BUCKET_LOOKUP_TYPE = auto
```
### 存储库归档存储 (`storage.repo-archive`)
@ -1299,6 +1311,7 @@ MINIO_INSECURE_SKIP_VERIFY = false
- `MINIO_BASE_PATH`: **repo-archive/**存储桶上的Minio基本路径仅在`STORAGE_TYPE`为`minio`时可用。
- `MINIO_USE_SSL`: **false**启用Minio的SSL仅在`STORAGE_TYPE`为`minio`时可用。
- `MINIO_INSECURE_SKIP_VERIFY`: **false**跳过Minio的SSL验证仅在`STORAGE_TYPE`为`minio`时可用。
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio的bucket查找方式默认为`auto`模式,可将其设置为`dns`(虚拟托管样式)或`path`(路径样式),仅当`STORAGE_TYPE`为`minio`时可用。
### 存储库归档 (`repo-archive`)

View File

@ -38,12 +38,10 @@ FROM gitea/gitea:@version@
COPY custom/app.ini /data/gitea/conf/app.ini
[...]
RUN apk --no-cache add asciidoctor freetype freetype-dev gcc g++ libpng libffi-dev py-pip python3-dev py3-pip py3-pyzmq
RUN apk --no-cache add asciidoctor freetype freetype-dev gcc g++ libpng libffi-dev pandoc python3-dev py3-pyzmq pipx
# install any other package you need for your external renderers
RUN pip3 install --upgrade pip
RUN pip3 install -U setuptools
RUN pip3 install jupyter docutils
RUN pipx install jupyter docutils --include-deps
# add above any other python package you may need to install
```

View File

@ -37,12 +37,10 @@ FROM gitea/gitea:@version@
COPY custom/app.ini /data/gitea/conf/app.ini
[...]
RUN apk --no-cache add asciidoctor freetype freetype-dev gcc g++ libpng libffi-dev py-pip python3-dev py3-pip py3-pyzmq
RUN apk --no-cache add asciidoctor freetype freetype-dev gcc g++ libpng libffi-dev pandoc python3-dev py3-pyzmq pipx
# 安装其他您需要的外部渲染器的软件包
RUN pip3 install --upgrade pip
RUN pip3 install -U setuptools
RUN pip3 install jupyter docutils
RUN pipx install jupyter docutils --include-deps
# 在上面添加您需要安装的任何其他 Python 软件包
```

View File

@ -17,15 +17,35 @@ menu:
# Reverse Proxies
## General configuration
1. Set `[server] ROOT_URL = https://git.example.com/` in your `app.ini` file.
2. Make the reverse-proxy pass `https://git.example.com/foo` to `http://gitea:3000/foo`.
3. Make sure the reverse-proxy does not decode the URI. The request `https://git.example.com/a%2Fb` should be passed as `http://gitea:3000/a%2Fb`.
4. Make sure `Host` and `X-Fowarded-Proto` headers are correctly passed to Gitea to make Gitea see the real URL being visited.
### Use a sub-path
Usually it's **not recommended** to put Gitea in a sub-path, it's not widely used and may have some issues in rare cases.
To make Gitea work with a sub-path (eg: `https://common.example.com/gitea/`),
there are some extra requirements besides the general configuration above:
1. Use `[server] ROOT_URL = https://common.example.com/gitea/` in your `app.ini` file.
2. Make the reverse-proxy pass `https://common.example.com/gitea/foo` to `http://gitea:3000/foo`.
3. The container registry requires a fixed sub-path `/v2` at the root level which must be configured:
- Make the reverse-proxy pass `https://common.example.com/v2` to `http://gitea:3000/v2`.
- Make sure the URI and headers are also correctly passed (see the general configuration above).
## Nginx
If you want Nginx to serve your Gitea instance, add the following `server` section to the `http` section of `nginx.conf`:
If you want Nginx to serve your Gitea instance, add the following `server` section to the `http` section of `nginx.conf`.
```
Make sure `client_max_body_size` is large enough, otherwise there would be "413 Request Entity Too Large" error when uploading large files.
```nginx
server {
listen 80;
server_name git.example.com;
...
location / {
client_max_body_size 512M;
proxy_pass http://localhost:3000;
@ -39,37 +59,35 @@ server {
}
```
### Resolving Error: 413 Request Entity Too Large
This error indicates nginx is configured to restrict the file upload size,
it affects attachment uploading, form posting, package uploading and LFS pushing, etc.
You can fine tune the `client_max_body_size` option according to [nginx document](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size).
## Nginx with a sub-path
In case you already have a site, and you want Gitea to share the domain name, you can setup Nginx to serve Gitea under a sub-path by adding the following `server` section inside the `http` section of `nginx.conf`:
In case you already have a site, and you want Gitea to share the domain name,
you can setup Nginx to serve Gitea under a sub-path by adding the following `server` section
into the `http` section of `nginx.conf`:
```
```nginx
server {
listen 80;
server_name git.example.com;
# Note: Trailing slash
location /gitea/ {
...
location ~ ^/(gitea|v2)($|/) {
client_max_body_size 512M;
# make nginx use unescaped URI, keep "%2F" as is
# make nginx use unescaped URI, keep "%2F" as-is, remove the "/gitea" sub-path prefix, pass "/v2" as-is.
rewrite ^ $request_uri;
rewrite ^/gitea(/.*) $1 break;
rewrite ^(/gitea)?(/.*) $2 break;
proxy_pass http://127.0.0.1:3000$uri;
# other common HTTP headers, see the "Nginx" config section above
proxy_set_header ...
proxy_set_header Connection $http_connection;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
```
Then you **MUST** set something like `[server] ROOT_URL = http://git.example.com/git/` correctly in your configuration.
Then you **MUST** set something like `[server] ROOT_URL = http://git.example.com/gitea/` correctly in your configuration.
## Nginx and serve static resources directly
@ -93,7 +111,7 @@ or use a cdn for the static files.
Set `[server] STATIC_URL_PREFIX = /_/static` in your configuration.
```apacheconf
```nginx
server {
listen 80;
server_name git.example.com;
@ -112,7 +130,7 @@ server {
Set `[server] STATIC_URL_PREFIX = http://cdn.example.com/gitea` in your configuration.
```apacheconf
```nginx
# application server running Gitea
server {
listen 80;
@ -124,7 +142,7 @@ server {
}
```
```apacheconf
```nginx
# static content delivery server
server {
listen 80;
@ -151,6 +169,7 @@ If you want Apache HTTPD to serve your Gitea instance, you can add the following
ProxyRequests off
AllowEncodedSlashes NoDecode
ProxyPass / http://localhost:3000/ nocanon
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
</VirtualHost>
```
@ -172,6 +191,8 @@ In case you already have a site, and you want Gitea to share the domain name, yo
AllowEncodedSlashes NoDecode
# Note: no trailing slash after either /git or port
ProxyPass /git http://localhost:3000 nocanon
ProxyPreserveHost On
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
</VirtualHost>
```
@ -183,7 +204,7 @@ Note: The following Apache HTTPD mods must be enabled: `proxy`, `proxy_http`.
If you want Caddy to serve your Gitea instance, you can add the following server block to your Caddyfile:
```apacheconf
```
git.example.com {
reverse_proxy localhost:3000
}
@ -193,7 +214,7 @@ git.example.com {
In case you already have a site, and you want Gitea to share the domain name, you can setup Caddy to serve Gitea under a sub-path by adding the following to your server block in your Caddyfile:
```apacheconf
```
git.example.com {
route /git/* {
uri strip_prefix /git
@ -371,19 +392,3 @@ gitea:
This config assumes that you are handling HTTPS on the traefik side and using HTTP between Gitea and traefik.
Then you **MUST** set something like `[server] ROOT_URL = http://example.com/gitea/` correctly in your configuration.
## General sub-path configuration
Usually it's not recommended to put Gitea in a sub-path, it's not widely used and may have some issues in rare cases.
If you really need to do so, to make Gitea works with sub-path (eg: `http://example.com/gitea/`), here are the requirements:
1. Set `[server] ROOT_URL = http://example.com/gitea/` in your `app.ini` file.
2. Make the reverse-proxy pass `http://example.com/gitea/foo` to `http://gitea-server:3000/foo`.
3. Make sure the reverse-proxy not decode the URI, the request `http://example.com/gitea/a%2Fb` should be passed as `http://gitea-server:3000/a%2Fb`.
## Docker / Container Registry
The container registry uses a fixed sub-path `/v2` which can't be changed.
Even if you deploy Gitea with a different sub-path, `/v2` will be used by the `docker` client.
Therefore you may need to add an additional route to your reverse proxy configuration.

View File

@ -47,7 +47,7 @@ We recommend [Google HTML/CSS Style Guide](https://google.github.io/styleguide/h
9. Avoid unnecessary `!important` in CSS, add comments to explain why it's necessary if it can't be avoided.
10. Avoid mixing different events in one event listener, prefer to use individual event listeners for every event.
11. Custom event names are recommended to use `ce-` prefix.
12. Prefer using Tailwind CSS which is available via `tw-` prefix, e.g. `tw-relative`. Gitea's helper CSS classes use `gt-` prefix (`gt-word-break`), while Gitea's own private framework-level CSS classes use `g-` prefix (`g-modal-confirm`).
12. Prefer using Tailwind CSS which is available via `tw-` prefix, e.g. `tw-relative`. Gitea's helper CSS classes use `gt-` prefix (`gt-ellipsis`), while Gitea's own private framework-level CSS classes use `g-` prefix (`g-modal-confirm`).
13. Avoid inline scripts & styles as much as possible, it's recommended to put JS code into JS files and use CSS classes. If inline scripts & styles are unavoidable, explain the reason why it can't be avoided.
### Accessibility / ARIA

View File

@ -47,7 +47,7 @@ HTML 页面由[Go HTML Template](https://pkg.go.dev/html/template)渲染。
9. 避免在 CSS 中使用不必要的`!important`,如果无法避免,添加注释解释为什么需要它。
10. 避免在一个事件监听器中混合不同的事件,优先为每个事件使用独立的事件监听器。
11. 推荐使用自定义事件名称前缀`ce-`。
12. 建议使用 Tailwind CSS它可以通过 `tw-` 前缀获得,例如 `tw-relative`. Gitea 自身的助手类 CSS 使用 `gt-` 前缀(`gt-word-break`Gitea 自身的私有框架级 CSS 类使用 `g-` 前缀(`g-modal-confirm`)。
12. 建议使用 Tailwind CSS它可以通过 `tw-` 前缀获得,例如 `tw-relative`. Gitea 自身的助手类 CSS 使用 `gt-` 前缀(`gt-ellipsis`Gitea 自身的私有框架级 CSS 类使用 `g-` 前缀(`g-modal-confirm`)。
13. 尽量避免内联脚本和样式建议将JS代码放入JS文件中并使用CSS类。如果内联脚本和样式不可避免请解释无法避免的原因。
### 可访问性 / ARIA

View File

@ -117,7 +117,7 @@ curl -v "http://localhost/api/v1/repos/search?limit=1"
API Reference guide is auto-generated by swagger and available on:
`https://gitea.your.host/api/swagger`
or on the
[Gitea demo instance](https://try.gitea.io/api/swagger)
[Gitea instance](https://gitea.com/api/swagger)
The OpenAPI document is at:
`https://gitea.your.host/swagger.v1.json`

View File

@ -45,7 +45,7 @@ To migrate from GitHub to Gitea, you can use Gitea's built-in migration form.
In order to migrate items such as issues, pull requests, etc. you will need to input at least your username.
[Example (requires login)](https://try.gitea.io/repo/migrate)
[Example (requires login)](https://demo.gitea.com/repo/migrate)
To migrate from GitLab to Gitea, you can use this non-affiliated tool:
@ -137,9 +137,9 @@ All Gitea instances have the built-in API and there is no way to disable it comp
You can, however, disable showing its documentation by setting `ENABLE_SWAGGER` to `false` in the `api` section of your `app.ini`.
For more information, refer to Gitea's [API docs](development/api-usage.md).
You can see the latest API (for example) on https://try.gitea.io/api/swagger
You can see the latest API (for example) on https://gitea.com/api/swagger
You can also see an example of the `swagger.json` file at https://try.gitea.io/swagger.v1.json
You can also see an example of the `swagger.json` file at https://gitea.com/swagger.v1.json
## Adjusting your server for public/private use

View File

@ -47,7 +47,7 @@ menu:
为了迁移诸如问题、拉取请求等项目,您需要至少输入您的用户名。
[Example (requires login)](https://try.gitea.io/repo/migrate)
[Example (requires login)](https://demo.gitea.com/repo/migrate)
要从GitLab迁移到Gitea您可以使用这个非关联的工具
@ -141,9 +141,9 @@ Gitea不提供内置的Pages服务器。您需要一个专用的域名来提供
但是您可以在app.ini的api部分将ENABLE_SWAGGER设置为false以禁用其文档显示。
有关更多信息请参阅Gitea的[API文档](development/api-usage.md)。
您可以在上查看最新的API例如https://try.gitea.io/api/swagger
您可以在上查看最新的API例如https://gitea.com/api/swagger
您还可以在上查看`swagger.json`文件的示例 https://try.gitea.io/swagger.v1.json
您还可以在上查看`swagger.json`文件的示例 https://gitea.com/swagger.v1.json
## 调整服务器用于公共/私有使用

View File

@ -19,11 +19,11 @@ menu:
- [Paid Commercial Support](https://about.gitea.com/)
- [Discord](https://discord.gg/Gitea)
- [Discourse Forum](https://discourse.gitea.io/)
- [Forum](https://forum.gitea.com/)
- [Matrix](https://matrix.to/#/#gitea-space:matrix.org)
- NOTE: Most of the Matrix channels are bridged with their counterpart in Discord and may experience some degree of flakiness with the bridge process.
- Chinese Support
- [Discourse Chinese Category](https://discourse.gitea.io/c/5-category/5)
- [Discourse Chinese Category](https://forum.gitea.com/c/5-category/5)
- QQ Group 328432459
# Bug Report
@ -39,7 +39,7 @@ If you found a bug, please [create an issue on GitHub](https://github.com/go-git
- When using systemd, use `journalctl --lines 1000 --unit gitea` to collect logs.
- When using docker, use `docker logs --tail 1000 <gitea-container>` to collect logs.
4. Reproducible steps so that others could reproduce and understand the problem more quickly and easily.
- [try.gitea.io](https://try.gitea.io) could be used to reproduce the problem.
- [demo.gitea.com](https://demo.gitea.com) could be used to reproduce the problem.
5. If you encounter slow/hanging/deadlock problems, please report the stacktrace when the problem occurs.
Go to the "Site Admin" -> "Monitoring" -> "Stacktrace" -> "Download diagnosis report".

View File

@ -19,11 +19,11 @@ menu:
- [付费商业支持](https://about.gitea.com/)
- [Discord](https://discord.gg/Gitea)
- [Discourse 论坛](https://discourse.gitea.io/)
- [论坛](https://forum.gitea.com/)
- [Matrix](https://matrix.to/#/#gitea-space:matrix.org)
- 注意:大多数 Matrix 频道都与 Discord 中的对应频道桥接,可能在桥接过程中会出现一定程度的不稳定性。
- 中文支持
- [Discourse 中文分类](https://discourse.gitea.io/c/5-category/5)
- [Discourse 中文分类](https://forum.gitea.com/c/5-category/5)
- QQ 群 328432459
# Bug 报告
@ -39,7 +39,7 @@ menu:
- 在使用 systemd 时,使用 `journalctl --lines 1000 --unit gitea` 收集日志。
- 在使用 Docker 时,使用 `docker logs --tail 1000 <gitea-container>` 收集日志。
4. 可重现的步骤,以便他人能够更快速、更容易地重现和理解问题。
- [try.gitea.io](https://try.gitea.io) 可用于重现问题。
- [demo.gitea.com](https://demo.gitea.com) 可用于重现问题。
5. 如果遇到慢速/挂起/死锁等问题,请在出现问题时报告堆栈跟踪。
转到 "Site Admin" -> "Monitoring" -> "Stacktrace" -> "Download diagnosis report"。

View File

@ -21,7 +21,7 @@ up a self-hosted Git service.
With Go, this can be done platform-independently across
**all platforms** which Go supports, including Linux, macOS, and Windows,
on x86, amd64, ARM and PowerPC architectures.
You can try it out using [the online demo](https://try.gitea.io/).
You can try it out using [the online demo](https://demo.gitea.com).
## Features
@ -37,7 +37,7 @@ You can try it out using [the online demo](https://try.gitea.io/).
- CI/CD: Gitea Actions supports CI/CD functionality, compatible with GitHub Actions. Users can write workflows in familiar YAML format and reuse a variety of existing Actions plugins. Actions plugins support downloading from any Git website.
- Project Management: Gitea tracks project requirements, features, and bugs through boards and issues. Issues support features like branches, tags, milestones, assignments, time tracking, due dates, dependencies, and more.
- Project Management: Gitea tracks project requirements, features, and bugs through columns and issues. Issues support features like branches, tags, milestones, assignments, time tracking, due dates, dependencies, and more.
- Artifact Repository: Gitea supports over 20 different types of public or private software package management, including Cargo, Chef, Composer, Conan, Conda, Container, Helm, Maven, npm, NuGet, Pub, PyPI, RubyGems, Vagrant, and more.

View File

@ -104,7 +104,7 @@ _Symbols used in table:_
| Comment reactions | ✓ | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ | ✘ |
| Lock Discussion | ✓ | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ | ✘ |
| Batch issue handling | ✓ | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ | ✘ |
| Issue Boards (Kanban) | [/](https://github.com/go-gitea/gitea/issues/14710) | ✘ | ✘ | ✓ | ✓ | ✘ | ✘ | ✘ |
| Projects | [/](https://github.com/go-gitea/gitea/issues/14710) | ✘ | ✘ | ✓ | ✓ | ✘ | ✘ | ✘ |
| Create branch from issue | [](https://github.com/go-gitea/gitea/issues/20226) | ✘ | ✘ | ✓ | ✓ | ✘ | ✘ | ✘ |
| Convert comment to new issue | ✓ | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ | ✘ |
| Issue search | ✓ | ✘ | ✓ | ✓ | ✓ | ✓ | ✘ | ✘ |

View File

@ -5,11 +5,9 @@ slug: "badge"
sidebar_position: 11
toc: false
draft: false
aliases:
- /en-us/badge
menu:
sidebar:
parent: "usage"
parent: "actions"
name: "Badge"
sidebar_position: 11
identifier: "Badge"
@ -27,7 +25,7 @@ It is designed to be compatible with [GitHub Actions workflow badge](https://doc
You can use the following URL to get the badge:
```
https://your-gitea-instance.com/{owner}/{repo}/actions/workflows/{workflow_file}?branch={branch}&event={event}
https://your-gitea-instance.com/{owner}/{repo}/actions/workflows/{workflow_file}/badge.svg?branch={branch}&event={event}
```
- `{owner}`: The owner of the repository.

View File

@ -108,6 +108,10 @@ See [Creating an annotation for an error](https://docs.github.com/en/actions/usi
It's ignored by Gitea Actions now.
### Expressions
For [expressions](https://docs.github.com/en/actions/learn-github-actions/expressions), only [`always()`](https://docs.github.com/en/actions/learn-github-actions/expressions#always) is supported.
## Missing UI features
### Pre and Post steps

View File

@ -108,6 +108,10 @@ Gitea Actions目前不支持此功能。
Gitea Actions目前不支持此功能。
### 表达式
对于 [表达式](https://docs.github.com/en/actions/learn-github-actions/expressions), 当前仅 [`always()`](https://docs.github.com/en/actions/learn-github-actions/expressions#always) 被支持。
## 缺失的UI功能
### 预处理和后处理步骤

View File

@ -5,11 +5,9 @@ slug: "secrets"
sidebar_position: 50
draft: false
toc: false
aliases:
- /en-us/secrets
menu:
sidebar:
parent: "usage"
parent: "actions"
name: "Secrets"
sidebar_position: 50
identifier: "usage-secrets"

View File

@ -5,11 +5,9 @@ slug: "secrets"
sidebar_position: 50
draft: false
toc: false
aliases:
- /zh-cn/secrets
menu:
sidebar:
parent: "usage"
parent: "actions"
name: "密钥管理"
sidebar_position: 50
identifier: "usage-secrets"

View File

@ -236,7 +236,7 @@ configure this, set the fields below:
- Restrict what domains can log in if using a public SMTP host or SMTP host
with multiple domains.
- Example: `gitea.io,mydomain.com,mydomain2.com`
- Example: `gitea.com,mydomain.com,mydomain2.com`
- Force SMTPS

View File

@ -194,7 +194,7 @@ PAM提供了一种机制通过对用户进行PAM认证来自动将其添加
- 如果使用公共 SMTP 主机或有多个域的 SMTP 主机,限制哪些域可以登录
限制哪些域可以登录。
- 示例: `gitea.io,mydomain.com,mydomain2.com`
- 示例: `gitea.com,mydomain.com,mydomain2.com`
- 强制使用 SMTPS
- 默认情况下将使用SMTPS连接到端口465.如果您希望将smtp用于其他端口自行设置

View File

@ -308,7 +308,7 @@ This is a example for a issue config file
blank_issues_enabled: true
contact_links:
- name: Gitea
url: https://gitea.io
url: https://gitea.com
about: Visit the Gitea Website
```

View File

@ -44,6 +44,8 @@ You can use the following variables enclosed in `${}` inside these templates whi
- PullRequestIndex: Pull request's index number
- PullRequestReference: Pull request's reference char with index number. i.e. #1, !2
- ClosingIssues: return a string contains all issues which will be closed by this pull request i.e. `close #1, close #2`
- ReviewedOn: Which pull request this commit belongs to. For example `Reviewed-on: https://gitea.com/foo/bar/pulls/1`
- ReviewedBy: Who approved the pull request before the merge. For example `Reviewed-by: Jane Doe <jane.doe@example.com>`
## Rebase

View File

@ -48,7 +48,7 @@ With different permissions, people could do different things with these units.
| Wiki | View wiki pages. Clone the wiki repository. | Create/Edit wiki pages, push | - |
| ExternalWiki | Link to an external wiki | - | - |
| ExternalTracker | Link to an external issue tracker | - | - |
| Projects | View the boards | Change issues across boards | - |
| Projects | View the columns of projects | Change issues across columns | - |
| Packages | View the packages | Upload/Delete packages | - |
| Actions | View the Actions logs | Approve / Cancel / Restart | - |
| Settings | - | - | Manage the repository |

61
flake.lock generated Normal file
View File

@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1717974879,
"narHash": "sha256-GTO3C88+5DX171F/gVS3Qga/hOs/eRMxPFpiHq2t+D8=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "c7b821ba2e1e635ba5a76d299af62821cbcb09f3",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

38
flake.nix Normal file
View File

@ -0,0 +1,38 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs =
{ nixpkgs, flake-utils, ... }:
flake-utils.lib.eachDefaultSystem (
system:
let
pkgs = nixpkgs.legacyPackages.${system};
in
{
devShells.default = pkgs.mkShell {
buildInputs = with pkgs; [
# generic
git
git-lfs
gnumake
gnused
gnutar
gzip
# frontend
nodejs_20
# linting
python312
poetry
# backend
go_1_22
gofumpt
];
};
}
);
}

22
go.mod
View File

@ -15,10 +15,12 @@ require (
gitea.com/lunny/dingtalk_webhook v0.0.0-20171025031554-e3534c89ef96
gitea.com/lunny/levelqueue v0.4.2-0.20230414023320-3c0159fe0fe4
github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
github.com/ProtonMail/go-crypto v1.0.0
github.com/PuerkitoBio/goquery v1.9.1
github.com/alecthomas/chroma/v2 v2.13.0
github.com/alecthomas/chroma/v2 v2.14.0
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb
github.com/blevesearch/bleve/v2 v2.3.10
github.com/buildkite/terminal-to-html/v3 v3.11.0
@ -106,13 +108,13 @@ require (
github.com/yuin/goldmark v1.7.0
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
github.com/yuin/goldmark-meta v1.1.0
golang.org/x/crypto v0.22.0
golang.org/x/crypto v0.24.0
golang.org/x/image v0.15.0
golang.org/x/net v0.24.0
golang.org/x/net v0.26.0
golang.org/x/oauth2 v0.18.0
golang.org/x/sys v0.19.0
golang.org/x/text v0.14.0
golang.org/x/tools v0.19.0
golang.org/x/sys v0.21.0
golang.org/x/text v0.16.0
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d
google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.33.0
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
@ -130,6 +132,7 @@ require (
dario.cat/mergo v1.0.0 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
git.sr.ht/~mariusor/go-xsd-duration v0.0.0-20220703122237-02e73435a078 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
github.com/ClickHouse/ch-go v0.61.5 // indirect
github.com/ClickHouse/clickhouse-go/v2 v2.22.0 // indirect
github.com/DataDog/zstd v1.5.5 // indirect
@ -290,8 +293,8 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
@ -308,6 +311,9 @@ replace github.com/nektos/act => gitea.com/gitea/act v0.259.1
replace github.com/gorilla/feeds => github.com/yardenshoham/feeds v0.0.0-20240110072658-f3d0c21c0bd5
// TODO: This could be removed after https://github.com/mholt/archiver/pull/396 merged
replace github.com/mholt/archiver/v3 => github.com/anchore/archiver/v3 v3.5.2
exclude github.com/gofrs/uuid v3.2.0+incompatible
exclude github.com/gofrs/uuid v4.0.0+incompatible

57
go.sum
View File

@ -38,16 +38,20 @@ github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121 h1:r3qt8PCHnfjOv9PN3H
github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121/go.mod h1:Ock8XgA7pvULhIaHGAk/cDnRfNrF9Jey81nPcc403iU=
github.com/6543/go-version v1.3.1 h1:HvOp+Telns7HWJ2Xo/05YXQSB2bE0WmVgbHqwMPZT4U=
github.com/6543/go-version v1.3.1/go.mod h1:oqFAHCwtLVUTLdhQmVZWYvaHXTdsbB4SY85at64SQEo=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
@ -78,16 +82,18 @@ github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06
github.com/RoaringBitmap/roaring v0.7.1/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I=
github.com/RoaringBitmap/roaring v1.9.0 h1:lwKhr90/j0jVXJyh5X+vQN1VVn77rQFfYnh6RDRGCcE=
github.com/RoaringBitmap/roaring v1.9.0/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU=
github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs=
github.com/alecthomas/chroma/v2 v2.13.0 h1:VP72+99Fb2zEcYM0MeaWJmV+xQvz5v5cxRHd+ooU1lI=
github.com/alecthomas/chroma/v2 v2.13.0/go.mod h1:BUGjjsD+ndS6eX37YgTchSEG+Jg9Jv1GiZs9sqPqztk=
github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E=
github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I=
github.com/alecthomas/repr v0.0.0-20220113201626-b1b626ac65ae/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8=
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/anchore/archiver/v3 v3.5.2 h1:Bjemm2NzuRhmHy3m0lRe5tNoClB9A4zYyDV58PaB6aA=
github.com/anchore/archiver/v3 v3.5.2/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4=
github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
@ -227,6 +233,8 @@ github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55k
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
@ -558,8 +566,6 @@ github.com/meilisearch/meilisearch-go v0.26.2 h1:3gTlmiV1dHHumVUhYdJbvh3camiNiyq
github.com/meilisearch/meilisearch-go v0.26.2/go.mod h1:SxuSqDcPBIykjWz1PX+KzsYzArNLSCadQodWs8extS0=
github.com/mholt/acmez v1.2.0 h1:1hhLxSgY5FvH5HCnGUuwbKY2VQVo8IU7rxXKSnZ7F30=
github.com/mholt/acmez v1.2.0/go.mod h1:VT9YwH1xgNX1kmYY89gY8xPJC84BFAisjo8Egigt4kE=
github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo=
github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4=
github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58=
github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs=
github.com/microsoft/go-mssqldb v1.7.0 h1:sgMPW0HA6Ihd37Yx0MzHyKD726C2kY/8KJsQtXHNaAs=
@ -860,8 +866,8 @@ golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2Uz
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f h1:3CW0unweImhOzd5FmYuRsD4Y4oQFKZIjAnKbjV4WIrw=
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
golang.org/x/image v0.15.0 h1:kOELfmgrmJlw4Cdb7g/QGuB3CvDrXbqEIww/pNtNBm8=
@ -872,8 +878,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -894,8 +900,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -906,8 +912,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -945,8 +951,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
@ -956,8 +962,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -969,8 +975,9 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -985,8 +992,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -54,7 +54,6 @@ type FindTaskOptions struct {
UpdatedBefore timeutil.TimeStamp
StartedBefore timeutil.TimeStamp
RunnerID int64
IDOrderDesc bool
}
func (opts FindTaskOptions) ToConds() builder.Cond {
@ -84,8 +83,5 @@ func (opts FindTaskOptions) ToConds() builder.Cond {
}
func (opts FindTaskOptions) ToOrders() string {
if opts.IDOrderDesc {
return "`id` DESC"
}
return ""
return "`id` DESC"
}

View File

@ -30,7 +30,7 @@ type Statistic struct {
Mirror, Release, AuthSource, Webhook,
Milestone, Label, HookTask,
Team, UpdateTask, Project,
ProjectBoard, Attachment,
ProjectColumn, Attachment,
Branches, Tags, CommitStatus int64
IssueByLabel []IssueByLabelCount
IssueByRepository []IssueByRepositoryCount
@ -115,6 +115,6 @@ func GetStatistic(ctx context.Context) (stats Statistic) {
stats.Counter.Team, _ = e.Count(new(organization.Team))
stats.Counter.Attachment, _ = e.Count(new(repo_model.Attachment))
stats.Counter.Project, _ = e.Count(new(project_model.Project))
stats.Counter.ProjectBoard, _ = e.Count(new(project_model.Board))
stats.Counter.ProjectColumn, _ = e.Count(new(project_model.Column))
return stats
}

View File

@ -215,16 +215,15 @@ func fileTimestampToTime(timestamp int64) time.Time {
return time.UnixMicro(timestamp)
}
func (f *file) loadMetaByPath() (*dbfsMeta, error) {
func (f *file) loadMetaByPath() error {
var fileMeta dbfsMeta
if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil {
return nil, err
return err
} else if ok {
f.metaID = fileMeta.ID
f.blockSize = fileMeta.BlockSize
return &fileMeta, nil
}
return nil, nil
return nil
}
func (f *file) open(flag int) (err error) {
@ -288,10 +287,7 @@ func (f *file) createEmpty() error {
if err != nil {
return err
}
if _, err = f.loadMetaByPath(); err != nil {
return err
}
return nil
return f.loadMetaByPath()
}
func (f *file) truncate() error {
@ -368,8 +364,5 @@ func buildPath(path string) string {
func newDbFile(ctx context.Context, path string) (*file, error) {
path = buildPath(path)
f := &file{ctx: ctx, fullPath: path, blockSize: defaultFileBlockSize}
if _, err := f.loadMetaByPath(); err != nil {
return nil, err
}
return f, nil
return f, f.loadMetaByPath()
}

View File

@ -45,3 +45,39 @@
is_deleted: false
deleted_by_id: 0
deleted_unix: 0
-
id: 5
repo_id: 10
name: 'master'
commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'
commit_message: 'Initial commit'
commit_time: 1489927679
pusher_id: 12
is_deleted: false
deleted_by_id: 0
deleted_unix: 0
-
id: 6
repo_id: 10
name: 'outdated-new-branch'
commit_id: 'cb24c347e328d83c1e0c3c908a6b2c0a2fcb8a3d'
commit_message: 'add'
commit_time: 1489927679
pusher_id: 12
is_deleted: false
deleted_by_id: 0
deleted_unix: 0
-
id: 14
repo_id: 11
name: 'master'
commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'
commit_message: 'Initial commit'
commit_time: 1489927679
pusher_id: 13
is_deleted: false
deleted_by_id: 0
deleted_unix: 0

View File

@ -1,27 +1,35 @@
-
group_id: 1
max_index: 5
-
group_id: 2
max_index: 2
-
group_id: 3
max_index: 2
-
group_id: 10
max_index: 1
-
group_id: 32
max_index: 2
-
group_id: 48
max_index: 1
-
group_id: 42
max_index: 1
-
group_id: 50
max_index: 1
-
group_id: 51
max_index: 1

View File

@ -117,3 +117,15 @@
uid: 40
org_id: 41
is_public: true
-
id: 21
uid: 12
org_id: 25
is_public: true
-
id: 22
uid: 2
org_id: 35
is_public: true

View File

@ -327,7 +327,7 @@
is_archived: false
is_mirror: false
status: 0
is_fork: false
is_fork: true
fork_id: 10
is_template: false
template_id: 0

View File

@ -239,3 +239,25 @@
num_members: 2
includes_all_repositories: false
can_create_org_repo: false
-
id: 23
org_id: 25
lower_name: owners
name: Owners
authorize: 4 # owner
num_repos: 0
num_members: 1
includes_all_repositories: false
can_create_org_repo: true
-
id: 24
org_id: 35
lower_name: team24
name: team24
authorize: 2 # write
num_repos: 0
num_members: 1
includes_all_repositories: true
can_create_org_repo: false

View File

@ -322,3 +322,21 @@
team_id: 22
type: 3
access_mode: 1
-
id: 55
team_id: 18
type: 1 # code
access_mode: 4
-
id: 56
team_id: 23
type: 1 # code
access_mode: 4
-
id: 57
team_id: 24
type: 1 # code
access_mode: 2

View File

@ -147,3 +147,15 @@
org_id: 41
team_id: 22
uid: 39
-
id: 26
org_id: 25
team_id: 23
uid: 12
-
id: 27
org_id: 35
team_id: 24
uid: 2

View File

@ -918,8 +918,8 @@
num_following: 0
num_stars: 0
num_repos: 0
num_teams: 1
num_members: 1
num_teams: 2
num_members: 2
visibility: 0
repo_admin_change_team_access: false
theme: ""
@ -1289,8 +1289,8 @@
num_following: 0
num_stars: 0
num_repos: 0
num_teams: 1
num_members: 1
num_teams: 2
num_members: 2
visibility: 2
repo_admin_change_team_access: false
theme: ""

View File

@ -10,9 +10,11 @@ import (
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/optional"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
@ -102,8 +104,9 @@ func (err ErrBranchesEqual) Unwrap() error {
// for pagination, keyword search and filtering
type Branch struct {
ID int64
RepoID int64 `xorm:"UNIQUE(s)"`
Name string `xorm:"UNIQUE(s) NOT NULL"` // git's ref-name is case-sensitive internally, however, in some databases (mssql, mysql, by default), it's case-insensitive at the moment
RepoID int64 `xorm:"UNIQUE(s)"`
Repo *repo_model.Repository `xorm:"-"`
Name string `xorm:"UNIQUE(s) NOT NULL"` // git's ref-name is case-sensitive internally, however, in some databases (mssql, mysql, by default), it's case-insensitive at the moment
CommitID string
CommitMessage string `xorm:"TEXT"` // it only stores the message summary (the first line)
PusherID int64
@ -139,6 +142,14 @@ func (b *Branch) LoadPusher(ctx context.Context) (err error) {
return err
}
func (b *Branch) LoadRepo(ctx context.Context) (err error) {
if b.Repo != nil || b.RepoID == 0 {
return nil
}
b.Repo, err = repo_model.GetRepositoryByID(ctx, b.RepoID)
return err
}
func init() {
db.RegisterModel(new(Branch))
db.RegisterModel(new(RenamedBranch))
@ -400,24 +411,111 @@ func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to str
return committer.Commit()
}
// FindRecentlyPushedNewBranches return at most 2 new branches pushed by the user in 6 hours which has no opened PRs created
// except the indicate branch
func FindRecentlyPushedNewBranches(ctx context.Context, repoID, userID int64, excludeBranchName string) (BranchList, error) {
branches := make(BranchList, 0, 2)
subQuery := builder.Select("head_branch").From("pull_request").
InnerJoin("issue", "issue.id = pull_request.issue_id").
Where(builder.Eq{
"pull_request.head_repo_id": repoID,
"issue.is_closed": false,
})
err := db.GetEngine(ctx).
Where("pusher_id=? AND is_deleted=?", userID, false).
And("name <> ?", excludeBranchName).
And("repo_id = ?", repoID).
And("commit_time >= ?", time.Now().Add(-time.Hour*6).Unix()).
NotIn("name", subQuery).
OrderBy("branch.commit_time DESC").
Limit(2).
Find(&branches)
return branches, err
type FindRecentlyPushedNewBranchesOptions struct {
Repo *repo_model.Repository
BaseRepo *repo_model.Repository
CommitAfterUnix int64
MaxCount int
}
type RecentlyPushedNewBranch struct {
BranchDisplayName string
BranchLink string
BranchCompareURL string
CommitTime timeutil.TimeStamp
}
// FindRecentlyPushedNewBranches return at most 2 new branches pushed by the user in 2 hours which has no opened PRs created
// if opts.CommitAfterUnix is 0, we will find the branches that were committed to in the last 2 hours
// if opts.ListOptions is not set, we will only display top 2 latest branch
func FindRecentlyPushedNewBranches(ctx context.Context, doer *user_model.User, opts *FindRecentlyPushedNewBranchesOptions) ([]*RecentlyPushedNewBranch, error) {
if doer == nil {
return []*RecentlyPushedNewBranch{}, nil
}
// find all related repo ids
repoOpts := repo_model.SearchRepoOptions{
Actor: doer,
Private: true,
AllPublic: false, // Include also all public repositories of users and public organisations
AllLimited: false, // Include also all public repositories of limited organisations
Fork: optional.Some(true),
ForkFrom: opts.BaseRepo.ID,
Archived: optional.Some(false),
}
repoCond := repo_model.SearchRepositoryCondition(&repoOpts).And(repo_model.AccessibleRepositoryCondition(doer, unit.TypeCode))
if opts.Repo.ID == opts.BaseRepo.ID {
// should also include the base repo's branches
repoCond = repoCond.Or(builder.Eq{"id": opts.BaseRepo.ID})
} else {
// in fork repo, we only detect the fork repo's branch
repoCond = repoCond.And(builder.Eq{"id": opts.Repo.ID})
}
repoIDs := builder.Select("id").From("repository").Where(repoCond)
if opts.CommitAfterUnix == 0 {
opts.CommitAfterUnix = time.Now().Add(-time.Hour * 2).Unix()
}
baseBranch, err := GetBranch(ctx, opts.BaseRepo.ID, opts.BaseRepo.DefaultBranch)
if err != nil {
return nil, err
}
// find all related branches, these branches may already created PRs, we will check later
var branches []*Branch
if err := db.GetEngine(ctx).
Where(builder.And(
builder.Eq{
"pusher_id": doer.ID,
"is_deleted": false,
},
builder.Gte{"commit_time": opts.CommitAfterUnix},
builder.In("repo_id", repoIDs),
// newly created branch have no changes, so skip them
builder.Neq{"commit_id": baseBranch.CommitID},
)).
OrderBy(db.SearchOrderByRecentUpdated.String()).
Find(&branches); err != nil {
return nil, err
}
newBranches := make([]*RecentlyPushedNewBranch, 0, len(branches))
if opts.MaxCount == 0 {
// by default we display 2 recently pushed new branch
opts.MaxCount = 2
}
for _, branch := range branches {
// whether branch have already created PR
count, err := db.GetEngine(ctx).Table("pull_request").
// we should not only use branch name here, because if there are branches with same name in other repos,
// we can not detect them correctly
Where(builder.Eq{"head_repo_id": branch.RepoID, "head_branch": branch.Name}).Count()
if err != nil {
return nil, err
}
// if no PR, we add to the result
if count == 0 {
if err := branch.LoadRepo(ctx); err != nil {
return nil, err
}
branchDisplayName := branch.Name
if branch.Repo.ID != opts.BaseRepo.ID && branch.Repo.ID != opts.Repo.ID {
branchDisplayName = fmt.Sprintf("%s:%s", branch.Repo.FullName(), branchDisplayName)
}
newBranches = append(newBranches, &RecentlyPushedNewBranch{
BranchDisplayName: branchDisplayName,
BranchLink: fmt.Sprintf("%s/src/branch/%s", branch.Repo.Link(), util.PathEscapeSegments(branch.Name)),
BranchCompareURL: branch.Repo.ComposeBranchCompareURL(opts.BaseRepo, branch.Name),
CommitTime: branch.CommitTime,
})
}
if len(newBranches) == opts.MaxCount {
break
}
}
return newBranches, nil
}

View File

@ -7,6 +7,7 @@ import (
"context"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/optional"
@ -59,6 +60,24 @@ func (branches BranchList) LoadPusher(ctx context.Context) error {
return nil
}
func (branches BranchList) LoadRepo(ctx context.Context) error {
ids := container.FilterSlice(branches, func(branch *Branch) (int64, bool) {
return branch.RepoID, branch.RepoID > 0 && branch.Repo == nil
})
reposMap := make(map[int64]*repo_model.Repository, len(ids))
if err := db.GetEngine(ctx).In("id", ids).Find(&reposMap); err != nil {
return err
}
for _, branch := range branches {
if branch.RepoID <= 0 || branch.Repo != nil {
continue
}
branch.Repo = reposMap[branch.RepoID]
}
return nil
}
type FindBranchOptions struct {
db.ListOptions
RepoID int64
@ -88,17 +107,13 @@ func (opts FindBranchOptions) ToConds() builder.Cond {
func (opts FindBranchOptions) ToOrders() string {
orderBy := opts.OrderBy
if opts.IsDeletedBranch.ValueOrDefault(true) { // if deleted branch included, put them at the end
if orderBy != "" {
orderBy += ", "
}
orderBy += "is_deleted ASC"
}
if orderBy == "" {
// the commit_time might be the same, so add the "name" to make sure the order is stable
return "commit_time DESC, name ASC"
orderBy = "commit_time DESC, name ASC"
}
if opts.IsDeletedBranch.ValueOrDefault(true) { // if deleted branch included, put them at the beginning
orderBy = "is_deleted ASC, " + orderBy
}
return orderBy
}

View File

@ -27,23 +27,27 @@ func init() {
// LoadAssignees load assignees of this issue.
func (issue *Issue) LoadAssignees(ctx context.Context) (err error) {
if issue.isAssigneeLoaded || len(issue.Assignees) > 0 {
return nil
}
// Reset maybe preexisting assignees
issue.Assignees = []*user_model.User{}
issue.Assignee = nil
err = db.GetEngine(ctx).Table("`user`").
if err = db.GetEngine(ctx).Table("`user`").
Join("INNER", "issue_assignees", "assignee_id = `user`.id").
Where("issue_assignees.issue_id = ?", issue.ID).
Find(&issue.Assignees)
if err != nil {
Find(&issue.Assignees); err != nil {
return err
}
issue.isAssigneeLoaded = true
// Check if we have at least one assignee and if yes put it in as `Assignee`
if len(issue.Assignees) > 0 {
issue.Assignee = issue.Assignees[0]
}
return err
return nil
}
// GetAssigneeIDsByIssue returns the IDs of users assigned to an issue

View File

@ -52,6 +52,8 @@ func (err ErrCommentNotExist) Unwrap() error {
return util.ErrNotExist
}
var ErrCommentAlreadyChanged = util.NewInvalidArgumentErrorf("the comment is already changed")
// CommentType defines whether a comment is just a simple comment, an action (like close) or a reference.
type CommentType int
@ -100,8 +102,8 @@ const (
CommentTypeMergePull // 28 merge pull request
CommentTypePullRequestPush // 29 push to PR head branch
CommentTypeProject // 30 Project changed
CommentTypeProjectBoard // 31 Project board changed
CommentTypeProject // 30 Project changed
CommentTypeProjectColumn // 31 Project column changed
CommentTypeDismissReview // 32 Dismiss Review
@ -146,7 +148,7 @@ var commentStrings = []string{
"merge_pull",
"pull_push",
"project",
"project_board",
"project_board", // FIXME: the name should be project_column
"dismiss_review",
"change_issue_ref",
"pull_scheduled_merge",
@ -262,6 +264,7 @@ type Comment struct {
Line int64 // - previous line / + proposed line
TreePath string
Content string `xorm:"LONGTEXT"`
ContentVersion int `xorm:"NOT NULL DEFAULT 0"`
RenderedContent template.HTML `xorm:"-"`
// Path represents the 4 lines of code cemented by this comment
@ -1111,7 +1114,7 @@ func UpdateCommentInvalidate(ctx context.Context, c *Comment) error {
}
// UpdateComment updates information of comment.
func UpdateComment(ctx context.Context, c *Comment, doer *user_model.User) error {
func UpdateComment(ctx context.Context, c *Comment, contentVersion int, doer *user_model.User) error {
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return err
@ -1119,9 +1122,15 @@ func UpdateComment(ctx context.Context, c *Comment, doer *user_model.User) error
defer committer.Close()
sess := db.GetEngine(ctx)
if _, err := sess.ID(c.ID).AllCols().Update(c); err != nil {
c.ContentVersion = contentVersion + 1
affected, err := sess.ID(c.ID).AllCols().Where("content_version = ?", contentVersion).Update(c)
if err != nil {
return err
}
if affected == 0 {
return ErrCommentAlreadyChanged
}
if err := c.LoadIssue(ctx); err != nil {
return err
}

View File

@ -113,7 +113,8 @@ func findCodeComments(ctx context.Context, opts FindCommentsOptions, issue *Issu
var err error
if comment.RenderedContent, err = markdown.RenderString(&markup.RenderContext{
Ctx: ctx,
Ctx: ctx,
Repo: issue.Repo,
Links: markup.Links{
Base: issue.Repo.Link(),
},

View File

@ -16,25 +16,25 @@ import (
// CommentList defines a list of comments
type CommentList []*Comment
func (comments CommentList) getPosterIDs() []int64 {
return container.FilterSlice(comments, func(c *Comment) (int64, bool) {
return c.PosterID, c.PosterID > 0
})
}
// LoadPosters loads posters
func (comments CommentList) LoadPosters(ctx context.Context) error {
if len(comments) == 0 {
return nil
}
posterMaps, err := getPosters(ctx, comments.getPosterIDs())
posterIDs := container.FilterSlice(comments, func(c *Comment) (int64, bool) {
return c.PosterID, c.Poster == nil && c.PosterID > 0
})
posterMaps, err := getPostersByIDs(ctx, posterIDs)
if err != nil {
return err
}
for _, comment := range comments {
comment.Poster = getPoster(comment.PosterID, posterMaps)
if comment.Poster == nil {
comment.Poster = getPoster(comment.PosterID, posterMaps)
}
}
return nil
}

View File

@ -94,33 +94,39 @@ func (err ErrIssueWasClosed) Error() string {
return fmt.Sprintf("Issue [%d] %d was already closed", err.ID, err.Index)
}
var ErrIssueAlreadyChanged = util.NewInvalidArgumentErrorf("the issue is already changed")
// Issue represents an issue or pull request of repository.
type Issue struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"`
Repo *repo_model.Repository `xorm:"-"`
Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository.
PosterID int64 `xorm:"INDEX"`
Poster *user_model.User `xorm:"-"`
OriginalAuthor string
OriginalAuthorID int64 `xorm:"index"`
Title string `xorm:"name"`
Content string `xorm:"LONGTEXT"`
RenderedContent template.HTML `xorm:"-"`
Labels []*Label `xorm:"-"`
MilestoneID int64 `xorm:"INDEX"`
Milestone *Milestone `xorm:"-"`
Project *project_model.Project `xorm:"-"`
Priority int
AssigneeID int64 `xorm:"-"`
Assignee *user_model.User `xorm:"-"`
IsClosed bool `xorm:"INDEX"`
IsRead bool `xorm:"-"`
IsPull bool `xorm:"INDEX"` // Indicates whether is a pull request or not.
PullRequest *PullRequest `xorm:"-"`
NumComments int
Ref string
PinOrder int `xorm:"DEFAULT 0"`
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"`
Repo *repo_model.Repository `xorm:"-"`
Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository.
PosterID int64 `xorm:"INDEX"`
Poster *user_model.User `xorm:"-"`
OriginalAuthor string
OriginalAuthorID int64 `xorm:"index"`
Title string `xorm:"name"`
Content string `xorm:"LONGTEXT"`
RenderedContent template.HTML `xorm:"-"`
ContentVersion int `xorm:"NOT NULL DEFAULT 0"`
Labels []*Label `xorm:"-"`
isLabelsLoaded bool `xorm:"-"`
MilestoneID int64 `xorm:"INDEX"`
Milestone *Milestone `xorm:"-"`
isMilestoneLoaded bool `xorm:"-"`
Project *project_model.Project `xorm:"-"`
Priority int
AssigneeID int64 `xorm:"-"`
Assignee *user_model.User `xorm:"-"`
isAssigneeLoaded bool `xorm:"-"`
IsClosed bool `xorm:"INDEX"`
IsRead bool `xorm:"-"`
IsPull bool `xorm:"INDEX"` // Indicates whether is a pull request or not.
PullRequest *PullRequest `xorm:"-"`
NumComments int
Ref string
PinOrder int `xorm:"DEFAULT 0"`
DeadlineUnix timeutil.TimeStamp `xorm:"INDEX"`
@ -128,11 +134,12 @@ type Issue struct {
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
ClosedUnix timeutil.TimeStamp `xorm:"INDEX"`
Attachments []*repo_model.Attachment `xorm:"-"`
Comments CommentList `xorm:"-"`
Reactions ReactionList `xorm:"-"`
TotalTrackedTime int64 `xorm:"-"`
Assignees []*user_model.User `xorm:"-"`
Attachments []*repo_model.Attachment `xorm:"-"`
isAttachmentsLoaded bool `xorm:"-"`
Comments CommentList `xorm:"-"`
Reactions ReactionList `xorm:"-"`
TotalTrackedTime int64 `xorm:"-"`
Assignees []*user_model.User `xorm:"-"`
// IsLocked limits commenting abilities to users on an issue
// with write access
@ -184,6 +191,19 @@ func (issue *Issue) LoadRepo(ctx context.Context) (err error) {
return nil
}
func (issue *Issue) LoadAttachments(ctx context.Context) (err error) {
if issue.isAttachmentsLoaded || issue.Attachments != nil {
return nil
}
issue.Attachments, err = repo_model.GetAttachmentsByIssueID(ctx, issue.ID)
if err != nil {
return fmt.Errorf("getAttachmentsByIssueID [%d]: %w", issue.ID, err)
}
issue.isAttachmentsLoaded = true
return nil
}
// IsTimetrackerEnabled returns true if the repo enables timetracking
func (issue *Issue) IsTimetrackerEnabled(ctx context.Context) bool {
if err := issue.LoadRepo(ctx); err != nil {
@ -284,11 +304,12 @@ func (issue *Issue) loadReactions(ctx context.Context) (err error) {
// LoadMilestone load milestone of this issue.
func (issue *Issue) LoadMilestone(ctx context.Context) (err error) {
if (issue.Milestone == nil || issue.Milestone.ID != issue.MilestoneID) && issue.MilestoneID > 0 {
if !issue.isMilestoneLoaded && (issue.Milestone == nil || issue.Milestone.ID != issue.MilestoneID) && issue.MilestoneID > 0 {
issue.Milestone, err = GetMilestoneByRepoID(ctx, issue.RepoID, issue.MilestoneID)
if err != nil && !IsErrMilestoneNotExist(err) {
return fmt.Errorf("getMilestoneByRepoID [repo_id: %d, milestone_id: %d]: %w", issue.RepoID, issue.MilestoneID, err)
}
issue.isMilestoneLoaded = true
}
return nil
}
@ -324,11 +345,8 @@ func (issue *Issue) LoadAttributes(ctx context.Context) (err error) {
return err
}
if issue.Attachments == nil {
issue.Attachments, err = repo_model.GetAttachmentsByIssueID(ctx, issue.ID)
if err != nil {
return fmt.Errorf("getAttachmentsByIssueID [%d]: %w", issue.ID, err)
}
if err = issue.LoadAttachments(ctx); err != nil {
return err
}
if err = issue.loadComments(ctx); err != nil {
@ -347,6 +365,13 @@ func (issue *Issue) LoadAttributes(ctx context.Context) (err error) {
return issue.loadReactions(ctx)
}
func (issue *Issue) ResetAttributesLoaded() {
issue.isLabelsLoaded = false
issue.isMilestoneLoaded = false
issue.isAttachmentsLoaded = false
issue.isAssigneeLoaded = false
}
// GetIsRead load the `IsRead` field of the issue
func (issue *Issue) GetIsRead(ctx context.Context, userID int64) error {
issueUser := &IssueUser{IssueID: issue.ID, UID: userID}

View File

@ -111,6 +111,7 @@ func NewIssueLabel(ctx context.Context, issue *Issue, label *Label, doer *user_m
return err
}
issue.isLabelsLoaded = false
issue.Labels = nil
if err = issue.LoadLabels(ctx); err != nil {
return err
@ -160,6 +161,8 @@ func NewIssueLabels(ctx context.Context, issue *Issue, labels []*Label, doer *us
return err
}
// reload all labels
issue.isLabelsLoaded = false
issue.Labels = nil
if err = issue.LoadLabels(ctx); err != nil {
return err
@ -325,11 +328,12 @@ func FixIssueLabelWithOutsideLabels(ctx context.Context) (int64, error) {
// LoadLabels loads labels
func (issue *Issue) LoadLabels(ctx context.Context) (err error) {
if issue.Labels == nil && issue.ID != 0 {
if !issue.isLabelsLoaded && issue.Labels == nil && issue.ID != 0 {
issue.Labels, err = GetLabelsByIssueID(ctx, issue.ID)
if err != nil {
return fmt.Errorf("getLabelsByIssueID [%d]: %w", issue.ID, err)
}
issue.isLabelsLoaded = true
}
return nil
}

View File

@ -72,29 +72,29 @@ func (issues IssueList) LoadRepositories(ctx context.Context) (repo_model.Reposi
return repo_model.ValuesRepository(repoMaps), nil
}
func (issues IssueList) getPosterIDs() []int64 {
return container.FilterSlice(issues, func(issue *Issue) (int64, bool) {
return issue.PosterID, true
})
}
func (issues IssueList) loadPosters(ctx context.Context) error {
func (issues IssueList) LoadPosters(ctx context.Context) error {
if len(issues) == 0 {
return nil
}
posterMaps, err := getPosters(ctx, issues.getPosterIDs())
posterIDs := container.FilterSlice(issues, func(issue *Issue) (int64, bool) {
return issue.PosterID, issue.Poster == nil && issue.PosterID > 0
})
posterMaps, err := getPostersByIDs(ctx, posterIDs)
if err != nil {
return err
}
for _, issue := range issues {
issue.Poster = getPoster(issue.PosterID, posterMaps)
if issue.Poster == nil {
issue.Poster = getPoster(issue.PosterID, posterMaps)
}
}
return nil
}
func getPosters(ctx context.Context, posterIDs []int64) (map[int64]*user_model.User, error) {
func getPostersByIDs(ctx context.Context, posterIDs []int64) (map[int64]*user_model.User, error) {
posterMaps := make(map[int64]*user_model.User, len(posterIDs))
left := len(posterIDs)
for left > 0 {
@ -136,7 +136,7 @@ func (issues IssueList) getIssueIDs() []int64 {
return ids
}
func (issues IssueList) loadLabels(ctx context.Context) error {
func (issues IssueList) LoadLabels(ctx context.Context) error {
if len(issues) == 0 {
return nil
}
@ -168,7 +168,7 @@ func (issues IssueList) loadLabels(ctx context.Context) error {
err = rows.Scan(&labelIssue)
if err != nil {
if err1 := rows.Close(); err1 != nil {
return fmt.Errorf("IssueList.loadLabels: Close: %w", err1)
return fmt.Errorf("IssueList.LoadLabels: Close: %w", err1)
}
return err
}
@ -177,7 +177,7 @@ func (issues IssueList) loadLabels(ctx context.Context) error {
// When there are no rows left and we try to close it.
// Since that is not relevant for us, we can safely ignore it.
if err1 := rows.Close(); err1 != nil {
return fmt.Errorf("IssueList.loadLabels: Close: %w", err1)
return fmt.Errorf("IssueList.LoadLabels: Close: %w", err1)
}
left -= limit
issueIDs = issueIDs[limit:]
@ -185,6 +185,7 @@ func (issues IssueList) loadLabels(ctx context.Context) error {
for _, issue := range issues {
issue.Labels = issueLabels[issue.ID]
issue.isLabelsLoaded = true
}
return nil
}
@ -195,7 +196,7 @@ func (issues IssueList) getMilestoneIDs() []int64 {
})
}
func (issues IssueList) loadMilestones(ctx context.Context) error {
func (issues IssueList) LoadMilestones(ctx context.Context) error {
milestoneIDs := issues.getMilestoneIDs()
if len(milestoneIDs) == 0 {
return nil
@ -220,6 +221,7 @@ func (issues IssueList) loadMilestones(ctx context.Context) error {
for _, issue := range issues {
issue.Milestone = milestoneMaps[issue.MilestoneID]
issue.isMilestoneLoaded = true
}
return nil
}
@ -263,7 +265,7 @@ func (issues IssueList) LoadProjects(ctx context.Context) error {
return nil
}
func (issues IssueList) loadAssignees(ctx context.Context) error {
func (issues IssueList) LoadAssignees(ctx context.Context) error {
if len(issues) == 0 {
return nil
}
@ -310,6 +312,10 @@ func (issues IssueList) loadAssignees(ctx context.Context) error {
for _, issue := range issues {
issue.Assignees = assignees[issue.ID]
if len(issue.Assignees) > 0 {
issue.Assignee = issue.Assignees[0]
}
issue.isAssigneeLoaded = true
}
return nil
}
@ -413,6 +419,7 @@ func (issues IssueList) LoadAttachments(ctx context.Context) (err error) {
for _, issue := range issues {
issue.Attachments = attachments[issue.ID]
issue.isAttachmentsLoaded = true
}
return nil
}
@ -538,23 +545,23 @@ func (issues IssueList) LoadAttributes(ctx context.Context) error {
return fmt.Errorf("issue.loadAttributes: LoadRepositories: %w", err)
}
if err := issues.loadPosters(ctx); err != nil {
return fmt.Errorf("issue.loadAttributes: loadPosters: %w", err)
if err := issues.LoadPosters(ctx); err != nil {
return fmt.Errorf("issue.loadAttributes: LoadPosters: %w", err)
}
if err := issues.loadLabels(ctx); err != nil {
return fmt.Errorf("issue.loadAttributes: loadLabels: %w", err)
if err := issues.LoadLabels(ctx); err != nil {
return fmt.Errorf("issue.loadAttributes: LoadLabels: %w", err)
}
if err := issues.loadMilestones(ctx); err != nil {
return fmt.Errorf("issue.loadAttributes: loadMilestones: %w", err)
if err := issues.LoadMilestones(ctx); err != nil {
return fmt.Errorf("issue.loadAttributes: LoadMilestones: %w", err)
}
if err := issues.LoadProjects(ctx); err != nil {
return fmt.Errorf("issue.loadAttributes: loadProjects: %w", err)
}
if err := issues.loadAssignees(ctx); err != nil {
if err := issues.LoadAssignees(ctx); err != nil {
return fmt.Errorf("issue.loadAttributes: loadAssignees: %w", err)
}

View File

@ -37,22 +37,22 @@ func (issue *Issue) projectID(ctx context.Context) int64 {
return ip.ProjectID
}
// ProjectBoardID return project board id if issue was assigned to one
func (issue *Issue) ProjectBoardID(ctx context.Context) int64 {
// ProjectColumnID return project column id if issue was assigned to one
func (issue *Issue) ProjectColumnID(ctx context.Context) int64 {
var ip project_model.ProjectIssue
has, err := db.GetEngine(ctx).Where("issue_id=?", issue.ID).Get(&ip)
if err != nil || !has {
return 0
}
return ip.ProjectBoardID
return ip.ProjectColumnID
}
// LoadIssuesFromBoard load issues assigned to this board
func LoadIssuesFromBoard(ctx context.Context, b *project_model.Board) (IssueList, error) {
// LoadIssuesFromColumn load issues assigned to this column
func LoadIssuesFromColumn(ctx context.Context, b *project_model.Column) (IssueList, error) {
issueList, err := Issues(ctx, &IssuesOptions{
ProjectBoardID: b.ID,
ProjectID: b.ProjectID,
SortType: "project-column-sorting",
ProjectColumnID: b.ID,
ProjectID: b.ProjectID,
SortType: "project-column-sorting",
})
if err != nil {
return nil, err
@ -60,9 +60,9 @@ func LoadIssuesFromBoard(ctx context.Context, b *project_model.Board) (IssueList
if b.Default {
issues, err := Issues(ctx, &IssuesOptions{
ProjectBoardID: db.NoConditionID,
ProjectID: b.ProjectID,
SortType: "project-column-sorting",
ProjectColumnID: db.NoConditionID,
ProjectID: b.ProjectID,
SortType: "project-column-sorting",
})
if err != nil {
return nil, err
@ -77,11 +77,11 @@ func LoadIssuesFromBoard(ctx context.Context, b *project_model.Board) (IssueList
return issueList, nil
}
// LoadIssuesFromBoardList load issues assigned to the boards
func LoadIssuesFromBoardList(ctx context.Context, bs project_model.BoardList) (map[int64]IssueList, error) {
// LoadIssuesFromColumnList load issues assigned to the columns
func LoadIssuesFromColumnList(ctx context.Context, bs project_model.ColumnList) (map[int64]IssueList, error) {
issuesMap := make(map[int64]IssueList, len(bs))
for i := range bs {
il, err := LoadIssuesFromBoard(ctx, bs[i])
il, err := LoadIssuesFromColumn(ctx, bs[i])
if err != nil {
return nil, err
}
@ -110,7 +110,7 @@ func IssueAssignOrRemoveProject(ctx context.Context, issue *Issue, doer *user_mo
return util.NewPermissionDeniedErrorf("issue %d can't be accessed by project %d", issue.ID, newProject.ID)
}
if newColumnID == 0 {
newDefaultColumn, err := newProject.GetDefaultBoard(ctx)
newDefaultColumn, err := newProject.GetDefaultColumn(ctx)
if err != nil {
return err
}
@ -153,10 +153,10 @@ func IssueAssignOrRemoveProject(ctx context.Context, issue *Issue, doer *user_mo
}
newSorting := util.Iif(res.IssueCount > 0, res.MaxSorting+1, 0)
return db.Insert(ctx, &project_model.ProjectIssue{
IssueID: issue.ID,
ProjectID: newProjectID,
ProjectBoardID: newColumnID,
Sorting: newSorting,
IssueID: issue.ID,
ProjectID: newProjectID,
ProjectColumnID: newColumnID,
Sorting: newSorting,
})
})
}

View File

@ -33,7 +33,7 @@ type IssuesOptions struct { //nolint
SubscriberID int64
MilestoneIDs []int64
ProjectID int64
ProjectBoardID int64
ProjectColumnID int64
IsClosed optional.Option[bool]
IsPull optional.Option[bool]
LabelIDs []int64
@ -99,9 +99,9 @@ func applySorts(sess *xorm.Session, sortType string, priorityRepoID int64) {
}
}
func applyLimit(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
func applyLimit(sess *xorm.Session, opts *IssuesOptions) {
if opts.Paginator == nil || opts.Paginator.IsListAll() {
return sess
return
}
start := 0
@ -109,11 +109,9 @@ func applyLimit(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
start = (opts.Paginator.Page - 1) * opts.Paginator.PageSize
}
sess.Limit(opts.Paginator.PageSize, start)
return sess
}
func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) {
if len(opts.LabelIDs) > 0 {
if opts.LabelIDs[0] == 0 {
sess.Where("issue.id NOT IN (SELECT issue_id FROM issue_label)")
@ -136,11 +134,9 @@ func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session
if len(opts.ExcludedLabelNames) > 0 {
sess.And(builder.NotIn("issue.id", BuildLabelNamesIssueIDsCondition(opts.ExcludedLabelNames)))
}
return sess
}
func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) {
if len(opts.MilestoneIDs) == 1 && opts.MilestoneIDs[0] == db.NoConditionID {
sess.And("issue.milestone_id = 0")
} else if len(opts.MilestoneIDs) > 0 {
@ -153,11 +149,9 @@ func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Sess
From("milestone").
Where(builder.In("name", opts.IncludeMilestones)))
}
return sess
}
func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) {
if opts.ProjectID > 0 { // specific project
sess.Join("INNER", "project_issue", "issue.id = project_issue.issue_id").
And("project_issue.project_id=?", opts.ProjectID)
@ -166,21 +160,19 @@ func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Sessio
}
// opts.ProjectID == 0 means all projects,
// do not need to apply any condition
return sess
}
func applyProjectBoardCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
// opts.ProjectBoardID == 0 means all project boards,
func applyProjectColumnCondition(sess *xorm.Session, opts *IssuesOptions) {
// opts.ProjectColumnID == 0 means all project columns,
// do not need to apply any condition
if opts.ProjectBoardID > 0 {
sess.In("issue.id", builder.Select("issue_id").From("project_issue").Where(builder.Eq{"project_board_id": opts.ProjectBoardID}))
} else if opts.ProjectBoardID == db.NoConditionID {
if opts.ProjectColumnID > 0 {
sess.In("issue.id", builder.Select("issue_id").From("project_issue").Where(builder.Eq{"project_board_id": opts.ProjectColumnID}))
} else if opts.ProjectColumnID == db.NoConditionID {
sess.In("issue.id", builder.Select("issue_id").From("project_issue").Where(builder.Eq{"project_board_id": 0}))
}
return sess
}
func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) {
if len(opts.RepoIDs) == 1 {
opts.RepoCond = builder.Eq{"issue.repo_id": opts.RepoIDs[0]}
} else if len(opts.RepoIDs) > 1 {
@ -195,10 +187,9 @@ func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session
if opts.RepoCond != nil {
sess.And(opts.RepoCond)
}
return sess
}
func applyConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
func applyConditions(sess *xorm.Session, opts *IssuesOptions) {
if len(opts.IssueIDs) > 0 {
sess.In("issue.id", opts.IssueIDs)
}
@ -246,7 +237,7 @@ func applyConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
applyProjectCondition(sess, opts)
applyProjectBoardCondition(sess, opts)
applyProjectColumnCondition(sess, opts)
if opts.IsPull.Has() {
sess.And("issue.is_pull=?", opts.IsPull.Value())
@ -261,8 +252,6 @@ func applyConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
if opts.User != nil {
sess.And(issuePullAccessibleRepoCond("issue.repo_id", opts.User.ID, opts.Org, opts.Team, opts.IsPull.Value()))
}
return sess
}
// teamUnitsRepoCond returns query condition for those repo id in the special org team with special units access
@ -339,22 +328,22 @@ func issuePullAccessibleRepoCond(repoIDstr string, userID int64, org *organizati
return cond
}
func applyAssigneeCondition(sess *xorm.Session, assigneeID int64) *xorm.Session {
return sess.Join("INNER", "issue_assignees", "issue.id = issue_assignees.issue_id").
func applyAssigneeCondition(sess *xorm.Session, assigneeID int64) {
sess.Join("INNER", "issue_assignees", "issue.id = issue_assignees.issue_id").
And("issue_assignees.assignee_id = ?", assigneeID)
}
func applyPosterCondition(sess *xorm.Session, posterID int64) *xorm.Session {
return sess.And("issue.poster_id=?", posterID)
func applyPosterCondition(sess *xorm.Session, posterID int64) {
sess.And("issue.poster_id=?", posterID)
}
func applyMentionedCondition(sess *xorm.Session, mentionedID int64) *xorm.Session {
return sess.Join("INNER", "issue_user", "issue.id = issue_user.issue_id").
func applyMentionedCondition(sess *xorm.Session, mentionedID int64) {
sess.Join("INNER", "issue_user", "issue.id = issue_user.issue_id").
And("issue_user.is_mentioned = ?", true).
And("issue_user.uid = ?", mentionedID)
}
func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64) *xorm.Session {
func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64) {
existInTeamQuery := builder.Select("team_user.team_id").
From("team_user").
Where(builder.Eq{"team_user.uid": reviewRequestedID})
@ -375,11 +364,11 @@ func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64)
),
builder.In("review.id", maxReview),
))
return sess.Where("issue.poster_id <> ?", reviewRequestedID).
sess.Where("issue.poster_id <> ?", reviewRequestedID).
And(builder.In("issue.id", subQuery))
}
func applyReviewedCondition(sess *xorm.Session, reviewedID int64) *xorm.Session {
func applyReviewedCondition(sess *xorm.Session, reviewedID int64) {
// Query for pull requests where you are a reviewer or commenter, excluding
// any pull requests already returned by the review requested filter.
notPoster := builder.Neq{"issue.poster_id": reviewedID}
@ -406,11 +395,11 @@ func applyReviewedCondition(sess *xorm.Session, reviewedID int64) *xorm.Session
builder.In("type", CommentTypeComment, CommentTypeCode, CommentTypeReview),
)),
)
return sess.And(notPoster, builder.Or(reviewed, commented))
sess.And(notPoster, builder.Or(reviewed, commented))
}
func applySubscribedCondition(sess *xorm.Session, subscriberID int64) *xorm.Session {
return sess.And(
func applySubscribedCondition(sess *xorm.Session, subscriberID int64) {
sess.And(
builder.
NotIn("issue.id",
builder.Select("issue_id").

View File

@ -235,7 +235,7 @@ func UpdateIssueAttachments(ctx context.Context, issueID int64, uuids []string)
}
// ChangeIssueContent changes issue content, as the given user.
func ChangeIssueContent(ctx context.Context, issue *Issue, doer *user_model.User, content string) (err error) {
func ChangeIssueContent(ctx context.Context, issue *Issue, doer *user_model.User, content string, contentVersion int) (err error) {
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return err
@ -254,9 +254,14 @@ func ChangeIssueContent(ctx context.Context, issue *Issue, doer *user_model.User
}
issue.Content = content
issue.ContentVersion = contentVersion + 1
if err = UpdateIssueCols(ctx, issue, "content"); err != nil {
return fmt.Errorf("UpdateIssueCols: %w", err)
affected, err := db.GetEngine(ctx).ID(issue.ID).Cols("content", "content_version").Where("content_version = ?", contentVersion).Update(issue)
if err != nil {
return err
}
if affected == 0 {
return ErrIssueAlreadyChanged
}
if err = SaveIssueContentHistory(ctx, doer.ID, issue.ID, 0,

View File

@ -159,10 +159,11 @@ type PullRequest struct {
ChangedProtectedFiles []string `xorm:"TEXT JSON"`
IssueID int64 `xorm:"INDEX"`
Issue *Issue `xorm:"-"`
Index int64
RequestedReviewers []*user_model.User `xorm:"-"`
IssueID int64 `xorm:"INDEX"`
Issue *Issue `xorm:"-"`
Index int64
RequestedReviewers []*user_model.User `xorm:"-"`
isRequestedReviewersLoaded bool `xorm:"-"`
HeadRepoID int64 `xorm:"INDEX"`
HeadRepo *repo_model.Repository `xorm:"-"`
@ -289,7 +290,7 @@ func (pr *PullRequest) LoadHeadRepo(ctx context.Context) (err error) {
// LoadRequestedReviewers loads the requested reviewers.
func (pr *PullRequest) LoadRequestedReviewers(ctx context.Context) error {
if len(pr.RequestedReviewers) > 0 {
if pr.isRequestedReviewersLoaded || len(pr.RequestedReviewers) > 0 {
return nil
}
@ -297,10 +298,10 @@ func (pr *PullRequest) LoadRequestedReviewers(ctx context.Context) error {
if err != nil {
return err
}
if err = reviews.LoadReviewers(ctx); err != nil {
return err
}
pr.isRequestedReviewersLoaded = true
for _, review := range reviews {
pr.RequestedReviewers = append(pr.RequestedReviewers, review.Reviewer)
}
@ -430,6 +431,21 @@ func (pr *PullRequest) GetGitHeadBranchRefName() string {
return fmt.Sprintf("%s%s", git.BranchPrefix, pr.HeadBranch)
}
// GetReviewCommentsCount returns the number of review comments made on the diff of a PR review (not including comments on commits or issues in a PR)
func (pr *PullRequest) GetReviewCommentsCount(ctx context.Context) int {
opts := FindCommentsOptions{
Type: CommentTypeReview,
IssueID: pr.IssueID,
}
conds := opts.ToConds()
count, err := db.GetEngine(ctx).Where(conds).Count(new(Comment))
if err != nil {
return 0
}
return int(count)
}
// IsChecking returns true if this pull request is still checking conflict.
func (pr *PullRequest) IsChecking() bool {
return pr.Status == PullRequestStatusChecking

View File

@ -9,8 +9,10 @@ import (
"code.gitea.io/gitea/models/db"
access_model "code.gitea.io/gitea/models/perm/access"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/util"
@ -26,7 +28,7 @@ type PullRequestsOptions struct {
MilestoneID int64
}
func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) (*xorm.Session, error) {
func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) *xorm.Session {
sess := db.GetEngine(ctx).Where("pull_request.base_repo_id=?", baseRepoID)
sess.Join("INNER", "issue", "pull_request.issue_id = issue.id")
@ -44,7 +46,7 @@ func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullR
sess.And("issue.milestone_id=?", opts.MilestoneID)
}
return sess, nil
return sess
}
// GetUnmergedPullRequestsByHeadInfo returns all pull requests that are open and has not been merged
@ -123,28 +125,20 @@ func GetPullRequestIDsByCheckStatus(ctx context.Context, status PullRequestStatu
}
// PullRequests returns all pull requests for a base Repo by the given conditions
func PullRequests(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) ([]*PullRequest, int64, error) {
func PullRequests(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) (PullRequestList, int64, error) {
if opts.Page <= 0 {
opts.Page = 1
}
countSession, err := listPullRequestStatement(ctx, baseRepoID, opts)
if err != nil {
log.Error("listPullRequestStatement: %v", err)
return nil, 0, err
}
countSession := listPullRequestStatement(ctx, baseRepoID, opts)
maxResults, err := countSession.Count(new(PullRequest))
if err != nil {
log.Error("Count PRs: %v", err)
return nil, maxResults, err
}
findSession, err := listPullRequestStatement(ctx, baseRepoID, opts)
findSession := listPullRequestStatement(ctx, baseRepoID, opts)
applySorts(findSession, opts.SortType, 0)
if err != nil {
log.Error("listPullRequestStatement: %v", err)
return nil, maxResults, err
}
findSession = db.SetSessionPagination(findSession, opts)
prs := make([]*PullRequest, 0, opts.PageSize)
return prs, maxResults, findSession.Find(&prs)
@ -153,50 +147,93 @@ func PullRequests(ctx context.Context, baseRepoID int64, opts *PullRequestsOptio
// PullRequestList defines a list of pull requests
type PullRequestList []*PullRequest
func (prs PullRequestList) LoadAttributes(ctx context.Context) error {
if len(prs) == 0 {
return nil
func (prs PullRequestList) getRepositoryIDs() []int64 {
repoIDs := make(container.Set[int64])
for _, pr := range prs {
if pr.BaseRepo == nil && pr.BaseRepoID > 0 {
repoIDs.Add(pr.BaseRepoID)
}
if pr.HeadRepo == nil && pr.HeadRepoID > 0 {
repoIDs.Add(pr.HeadRepoID)
}
}
return repoIDs.Values()
}
// Load issues.
issueIDs := prs.GetIssueIDs()
issues := make([]*Issue, 0, len(issueIDs))
func (prs PullRequestList) LoadRepositories(ctx context.Context) error {
repoIDs := prs.getRepositoryIDs()
reposMap := make(map[int64]*repo_model.Repository, len(repoIDs))
if err := db.GetEngine(ctx).
Where("id > 0").
In("id", issueIDs).
Find(&issues); err != nil {
return fmt.Errorf("find issues: %w", err)
}
set := make(map[int64]*Issue)
for i := range issues {
set[issues[i].ID] = issues[i]
In("id", repoIDs).
Find(&reposMap); err != nil {
return fmt.Errorf("find repos: %w", err)
}
for _, pr := range prs {
pr.Issue = set[pr.IssueID]
/*
Old code:
pr.Issue.PullRequest = pr // panic here means issueIDs and prs are not in sync
It's worth panic because it's almost impossible to happen under normal use.
But in integration testing, an asynchronous task could read a database that has been reset.
So returning an error would make more sense, let the caller has a choice to ignore it.
*/
if pr.Issue == nil {
return fmt.Errorf("issues and prs may be not in sync: cannot find issue %v for pr %v: %w", pr.IssueID, pr.ID, util.ErrNotExist)
if pr.BaseRepo == nil {
pr.BaseRepo = reposMap[pr.BaseRepoID]
}
if pr.HeadRepo == nil {
pr.HeadRepo = reposMap[pr.HeadRepoID]
pr.isHeadRepoLoaded = true
}
pr.Issue.PullRequest = pr
}
return nil
}
func (prs PullRequestList) LoadAttributes(ctx context.Context) error {
if _, err := prs.LoadIssues(ctx); err != nil {
return err
}
return nil
}
func (prs PullRequestList) LoadIssues(ctx context.Context) (IssueList, error) {
if len(prs) == 0 {
return nil, nil
}
// Load issues.
issueIDs := prs.GetIssueIDs()
issues := make(map[int64]*Issue, len(issueIDs))
if err := db.GetEngine(ctx).
In("id", issueIDs).
Find(&issues); err != nil {
return nil, fmt.Errorf("find issues: %w", err)
}
issueList := make(IssueList, 0, len(prs))
for _, pr := range prs {
if pr.Issue == nil {
pr.Issue = issues[pr.IssueID]
/*
Old code:
pr.Issue.PullRequest = pr // panic here means issueIDs and prs are not in sync
It's worth panic because it's almost impossible to happen under normal use.
But in integration testing, an asynchronous task could read a database that has been reset.
So returning an error would make more sense, let the caller has a choice to ignore it.
*/
if pr.Issue == nil {
return nil, fmt.Errorf("issues and prs may be not in sync: cannot find issue %v for pr %v: %w", pr.IssueID, pr.ID, util.ErrNotExist)
}
}
pr.Issue.PullRequest = pr
if pr.Issue.Repo == nil {
pr.Issue.Repo = pr.BaseRepo
}
issueList = append(issueList, pr.Issue)
}
return issueList, nil
}
// GetIssueIDs returns all issue ids
func (prs PullRequestList) GetIssueIDs() []int64 {
issueIDs := make([]int64, 0, len(prs))
for i := range prs {
issueIDs = append(issueIDs, prs[i].IssueID)
}
return issueIDs
return container.FilterSlice(prs, func(pr *PullRequest) (int64, bool) {
if pr.Issue == nil {
return pr.IssueID, pr.IssueID > 0
}
return 0, false
})
}
// HasMergedPullRequestInRepo returns whether the user(poster) has merged pull-request in the repo

View File

@ -155,14 +155,14 @@ func (r *Review) LoadCodeComments(ctx context.Context) (err error) {
if r.CodeComments != nil {
return err
}
if err = r.loadIssue(ctx); err != nil {
if err = r.LoadIssue(ctx); err != nil {
return err
}
r.CodeComments, err = fetchCodeCommentsByReview(ctx, r.Issue, nil, r, false)
return err
}
func (r *Review) loadIssue(ctx context.Context) (err error) {
func (r *Review) LoadIssue(ctx context.Context) (err error) {
if r.Issue != nil {
return err
}
@ -199,7 +199,7 @@ func (r *Review) LoadReviewerTeam(ctx context.Context) (err error) {
// LoadAttributes loads all attributes except CodeComments
func (r *Review) LoadAttributes(ctx context.Context) (err error) {
if err = r.loadIssue(ctx); err != nil {
if err = r.LoadIssue(ctx); err != nil {
return err
}
if err = r.LoadCodeComments(ctx); err != nil {

View File

@ -21,6 +21,7 @@ import (
"code.gitea.io/gitea/models/migrations/v1_20"
"code.gitea.io/gitea/models/migrations/v1_21"
"code.gitea.io/gitea/models/migrations/v1_22"
"code.gitea.io/gitea/models/migrations/v1_23"
"code.gitea.io/gitea/models/migrations/v1_6"
"code.gitea.io/gitea/models/migrations/v1_7"
"code.gitea.io/gitea/models/migrations/v1_8"
@ -587,6 +588,9 @@ var migrations = []Migration{
NewMigration("Drop wrongly created table o_auth2_application", v1_22.DropWronglyCreatedTable),
// Gitea 1.22.0-rc1 ends at 299
// v299 -> v300
NewMigration("Add content version to issue and comment table", v1_23.AddContentVersionToIssueAndComment),
}
// GetCurrentDBVersion returns the current db version

View File

@ -92,7 +92,7 @@ func addObjectFormatNameToRepository(x *xorm.Engine) error {
// Here to catch weird edge-cases where column constraints above are
// not applied by the DB backend
_, err := x.Exec("UPDATE repository set object_format_name = 'sha1' WHERE object_format_name = '' or object_format_name IS NULL")
_, err := x.Exec("UPDATE `repository` set `object_format_name` = 'sha1' WHERE `object_format_name` = '' or `object_format_name` IS NULL")
return err
}

View File

@ -15,7 +15,7 @@ import (
func Test_CheckProjectColumnsConsistency(t *testing.T) {
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(project.Project), new(project.Board))
x, deferable := base.PrepareTestEnv(t, 0, new(project.Project), new(project.Column))
defer deferable()
if x == nil || t.Failed() {
return
@ -23,22 +23,22 @@ func Test_CheckProjectColumnsConsistency(t *testing.T) {
assert.NoError(t, CheckProjectColumnsConsistency(x))
// check if default board was added
var defaultBoard project.Board
has, err := x.Where("project_id=? AND `default` = ?", 1, true).Get(&defaultBoard)
// check if default column was added
var defaultColumn project.Column
has, err := x.Where("project_id=? AND `default` = ?", 1, true).Get(&defaultColumn)
assert.NoError(t, err)
assert.True(t, has)
assert.Equal(t, int64(1), defaultBoard.ProjectID)
assert.True(t, defaultBoard.Default)
assert.Equal(t, int64(1), defaultColumn.ProjectID)
assert.True(t, defaultColumn.Default)
// check if multiple defaults, previous were removed and last will be kept
expectDefaultBoard, err := project.GetBoard(db.DefaultContext, 2)
expectDefaultColumn, err := project.GetColumn(db.DefaultContext, 2)
assert.NoError(t, err)
assert.Equal(t, int64(2), expectDefaultBoard.ProjectID)
assert.False(t, expectDefaultBoard.Default)
assert.Equal(t, int64(2), expectDefaultColumn.ProjectID)
assert.False(t, expectDefaultColumn.Default)
expectNonDefaultBoard, err := project.GetBoard(db.DefaultContext, 3)
expectNonDefaultColumn, err := project.GetColumn(db.DefaultContext, 3)
assert.NoError(t, err)
assert.Equal(t, int64(2), expectNonDefaultBoard.ProjectID)
assert.True(t, expectNonDefaultBoard.Default)
assert.Equal(t, int64(2), expectNonDefaultColumn.ProjectID)
assert.True(t, expectNonDefaultColumn.Default)
}

View File

@ -0,0 +1,18 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_23 //nolint
import "xorm.io/xorm"
func AddContentVersionToIssueAndComment(x *xorm.Engine) error {
type Issue struct {
ContentVersion int `xorm:"NOT NULL DEFAULT 0"`
}
type Comment struct {
ContentVersion int `xorm:"NOT NULL DEFAULT 0"`
}
return x.Sync(new(Comment), new(Issue))
}

View File

@ -81,7 +81,7 @@ func TestUserListIsPublicMember(t *testing.T) {
{3, map[int64]bool{2: true, 4: false, 28: true}},
{6, map[int64]bool{5: true, 28: true}},
{7, map[int64]bool{5: false}},
{25, map[int64]bool{24: true}},
{25, map[int64]bool{12: true, 24: true}},
{22, map[int64]bool{}},
}
for _, v := range tt {
@ -108,8 +108,8 @@ func TestUserListIsUserOrgOwner(t *testing.T) {
{3, map[int64]bool{2: true, 4: false, 28: false}},
{6, map[int64]bool{5: true, 28: false}},
{7, map[int64]bool{5: true}},
{25, map[int64]bool{24: false}}, // ErrTeamNotExist
{22, map[int64]bool{}}, // No member
{25, map[int64]bool{12: true, 24: false}}, // ErrTeamNotExist
{22, map[int64]bool{}}, // No member
}
for _, v := range tt {
t.Run(fmt.Sprintf("IsUserOrgOwnerOfOrgId%d", v.orgid), func(t *testing.T) {

View File

@ -1,389 +0,0 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package project
import (
"context"
"errors"
"fmt"
"regexp"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
)
type (
// BoardType is used to represent a project board type
BoardType uint8
// CardType is used to represent a project board card type
CardType uint8
// BoardList is a list of all project boards in a repository
BoardList []*Board
)
const (
// BoardTypeNone is a project board type that has no predefined columns
BoardTypeNone BoardType = iota
// BoardTypeBasicKanban is a project board type that has basic predefined columns
BoardTypeBasicKanban
// BoardTypeBugTriage is a project board type that has predefined columns suited to hunting down bugs
BoardTypeBugTriage
)
const (
// CardTypeTextOnly is a project board card type that is text only
CardTypeTextOnly CardType = iota
// CardTypeImagesAndText is a project board card type that has images and text
CardTypeImagesAndText
)
// BoardColorPattern is a regexp witch can validate BoardColor
var BoardColorPattern = regexp.MustCompile("^#[0-9a-fA-F]{6}$")
// Board is used to represent boards on a project
type Board struct {
ID int64 `xorm:"pk autoincr"`
Title string
Default bool `xorm:"NOT NULL DEFAULT false"` // issues not assigned to a specific board will be assigned to this board
Sorting int8 `xorm:"NOT NULL DEFAULT 0"`
Color string `xorm:"VARCHAR(7)"`
ProjectID int64 `xorm:"INDEX NOT NULL"`
CreatorID int64 `xorm:"NOT NULL"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}
// TableName return the real table name
func (Board) TableName() string {
return "project_board"
}
// NumIssues return counter of all issues assigned to the board
func (b *Board) NumIssues(ctx context.Context) int {
c, err := db.GetEngine(ctx).Table("project_issue").
Where("project_id=?", b.ProjectID).
And("project_board_id=?", b.ID).
GroupBy("issue_id").
Cols("issue_id").
Count()
if err != nil {
return 0
}
return int(c)
}
func (b *Board) GetIssues(ctx context.Context) ([]*ProjectIssue, error) {
issues := make([]*ProjectIssue, 0, 5)
if err := db.GetEngine(ctx).Where("project_id=?", b.ProjectID).
And("project_board_id=?", b.ID).
OrderBy("sorting, id").
Find(&issues); err != nil {
return nil, err
}
return issues, nil
}
func init() {
db.RegisterModel(new(Board))
}
// IsBoardTypeValid checks if the project board type is valid
func IsBoardTypeValid(p BoardType) bool {
switch p {
case BoardTypeNone, BoardTypeBasicKanban, BoardTypeBugTriage:
return true
default:
return false
}
}
// IsCardTypeValid checks if the project board card type is valid
func IsCardTypeValid(p CardType) bool {
switch p {
case CardTypeTextOnly, CardTypeImagesAndText:
return true
default:
return false
}
}
func createBoardsForProjectsType(ctx context.Context, project *Project) error {
var items []string
switch project.BoardType {
case BoardTypeBugTriage:
items = setting.Project.ProjectBoardBugTriageType
case BoardTypeBasicKanban:
items = setting.Project.ProjectBoardBasicKanbanType
case BoardTypeNone:
fallthrough
default:
return nil
}
board := Board{
CreatedUnix: timeutil.TimeStampNow(),
CreatorID: project.CreatorID,
Title: "Backlog",
ProjectID: project.ID,
Default: true,
}
if err := db.Insert(ctx, board); err != nil {
return err
}
if len(items) == 0 {
return nil
}
boards := make([]Board, 0, len(items))
for _, v := range items {
boards = append(boards, Board{
CreatedUnix: timeutil.TimeStampNow(),
CreatorID: project.CreatorID,
Title: v,
ProjectID: project.ID,
})
}
return db.Insert(ctx, boards)
}
// maxProjectColumns max columns allowed in a project, this should not bigger than 127
// because sorting is int8 in database
const maxProjectColumns = 20
// NewBoard adds a new project board to a given project
func NewBoard(ctx context.Context, board *Board) error {
if len(board.Color) != 0 && !BoardColorPattern.MatchString(board.Color) {
return fmt.Errorf("bad color code: %s", board.Color)
}
res := struct {
MaxSorting int64
ColumnCount int64
}{}
if _, err := db.GetEngine(ctx).Select("max(sorting) as max_sorting, count(*) as column_count").Table("project_board").
Where("project_id=?", board.ProjectID).Get(&res); err != nil {
return err
}
if res.ColumnCount >= maxProjectColumns {
return fmt.Errorf("NewBoard: maximum number of columns reached")
}
board.Sorting = int8(util.Iif(res.ColumnCount > 0, res.MaxSorting+1, 0))
_, err := db.GetEngine(ctx).Insert(board)
return err
}
// DeleteBoardByID removes all issues references to the project board.
func DeleteBoardByID(ctx context.Context, boardID int64) error {
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return err
}
defer committer.Close()
if err := deleteBoardByID(ctx, boardID); err != nil {
return err
}
return committer.Commit()
}
func deleteBoardByID(ctx context.Context, boardID int64) error {
board, err := GetBoard(ctx, boardID)
if err != nil {
if IsErrProjectBoardNotExist(err) {
return nil
}
return err
}
if board.Default {
return fmt.Errorf("deleteBoardByID: cannot delete default board")
}
// move all issues to the default column
project, err := GetProjectByID(ctx, board.ProjectID)
if err != nil {
return err
}
defaultColumn, err := project.GetDefaultBoard(ctx)
if err != nil {
return err
}
if err = board.moveIssuesToAnotherColumn(ctx, defaultColumn); err != nil {
return err
}
if _, err := db.GetEngine(ctx).ID(board.ID).NoAutoCondition().Delete(board); err != nil {
return err
}
return nil
}
func deleteBoardByProjectID(ctx context.Context, projectID int64) error {
_, err := db.GetEngine(ctx).Where("project_id=?", projectID).Delete(&Board{})
return err
}
// GetBoard fetches the current board of a project
func GetBoard(ctx context.Context, boardID int64) (*Board, error) {
board := new(Board)
has, err := db.GetEngine(ctx).ID(boardID).Get(board)
if err != nil {
return nil, err
} else if !has {
return nil, ErrProjectBoardNotExist{BoardID: boardID}
}
return board, nil
}
// UpdateBoard updates a project board
func UpdateBoard(ctx context.Context, board *Board) error {
var fieldToUpdate []string
if board.Sorting != 0 {
fieldToUpdate = append(fieldToUpdate, "sorting")
}
if board.Title != "" {
fieldToUpdate = append(fieldToUpdate, "title")
}
if len(board.Color) != 0 && !BoardColorPattern.MatchString(board.Color) {
return fmt.Errorf("bad color code: %s", board.Color)
}
fieldToUpdate = append(fieldToUpdate, "color")
_, err := db.GetEngine(ctx).ID(board.ID).Cols(fieldToUpdate...).Update(board)
return err
}
// GetBoards fetches all boards related to a project
func (p *Project) GetBoards(ctx context.Context) (BoardList, error) {
boards := make([]*Board, 0, 5)
if err := db.GetEngine(ctx).Where("project_id=?", p.ID).OrderBy("sorting, id").Find(&boards); err != nil {
return nil, err
}
return boards, nil
}
// GetDefaultBoard return default board and ensure only one exists
func (p *Project) GetDefaultBoard(ctx context.Context) (*Board, error) {
var board Board
has, err := db.GetEngine(ctx).
Where("project_id=? AND `default` = ?", p.ID, true).
Desc("id").Get(&board)
if err != nil {
return nil, err
}
if has {
return &board, nil
}
// create a default board if none is found
board = Board{
ProjectID: p.ID,
Default: true,
Title: "Uncategorized",
CreatorID: p.CreatorID,
}
if _, err := db.GetEngine(ctx).Insert(&board); err != nil {
return nil, err
}
return &board, nil
}
// SetDefaultBoard represents a board for issues not assigned to one
func SetDefaultBoard(ctx context.Context, projectID, boardID int64) error {
return db.WithTx(ctx, func(ctx context.Context) error {
if _, err := GetBoard(ctx, boardID); err != nil {
return err
}
if _, err := db.GetEngine(ctx).Where(builder.Eq{
"project_id": projectID,
"`default`": true,
}).Cols("`default`").Update(&Board{Default: false}); err != nil {
return err
}
_, err := db.GetEngine(ctx).ID(boardID).
Where(builder.Eq{"project_id": projectID}).
Cols("`default`").Update(&Board{Default: true})
return err
})
}
// UpdateBoardSorting update project board sorting
func UpdateBoardSorting(ctx context.Context, bs BoardList) error {
return db.WithTx(ctx, func(ctx context.Context) error {
for i := range bs {
if _, err := db.GetEngine(ctx).ID(bs[i].ID).Cols(
"sorting",
).Update(bs[i]); err != nil {
return err
}
}
return nil
})
}
func GetColumnsByIDs(ctx context.Context, projectID int64, columnsIDs []int64) (BoardList, error) {
columns := make([]*Board, 0, 5)
if err := db.GetEngine(ctx).
Where("project_id =?", projectID).
In("id", columnsIDs).
OrderBy("sorting").Find(&columns); err != nil {
return nil, err
}
return columns, nil
}
// MoveColumnsOnProject sorts columns in a project
func MoveColumnsOnProject(ctx context.Context, project *Project, sortedColumnIDs map[int64]int64) error {
return db.WithTx(ctx, func(ctx context.Context) error {
sess := db.GetEngine(ctx)
columnIDs := util.ValuesOfMap(sortedColumnIDs)
movedColumns, err := GetColumnsByIDs(ctx, project.ID, columnIDs)
if err != nil {
return err
}
if len(movedColumns) != len(sortedColumnIDs) {
return errors.New("some columns do not exist")
}
for _, column := range movedColumns {
if column.ProjectID != project.ID {
return fmt.Errorf("column[%d]'s projectID is not equal to project's ID [%d]", column.ProjectID, project.ID)
}
}
for sorting, columnID := range sortedColumnIDs {
if _, err := sess.Exec("UPDATE `project_board` SET sorting=? WHERE id=?", sorting, columnID); err != nil {
return err
}
}
return nil
})
}

359
models/project/column.go Normal file
View File

@ -0,0 +1,359 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package project
import (
"context"
"errors"
"fmt"
"regexp"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
)
type (
// CardType is used to represent a project column card type
CardType uint8
// ColumnList is a list of all project columns in a repository
ColumnList []*Column
)
const (
// CardTypeTextOnly is a project column card type that is text only
CardTypeTextOnly CardType = iota
// CardTypeImagesAndText is a project column card type that has images and text
CardTypeImagesAndText
)
// ColumnColorPattern is a regexp witch can validate ColumnColor
var ColumnColorPattern = regexp.MustCompile("^#[0-9a-fA-F]{6}$")
// Column is used to represent column on a project
type Column struct {
ID int64 `xorm:"pk autoincr"`
Title string
Default bool `xorm:"NOT NULL DEFAULT false"` // issues not assigned to a specific column will be assigned to this column
Sorting int8 `xorm:"NOT NULL DEFAULT 0"`
Color string `xorm:"VARCHAR(7)"`
ProjectID int64 `xorm:"INDEX NOT NULL"`
CreatorID int64 `xorm:"NOT NULL"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}
// TableName return the real table name
func (Column) TableName() string {
return "project_board" // TODO: the legacy table name should be project_column
}
// NumIssues return counter of all issues assigned to the column
func (c *Column) NumIssues(ctx context.Context) int {
total, err := db.GetEngine(ctx).Table("project_issue").
Where("project_id=?", c.ProjectID).
And("project_board_id=?", c.ID).
GroupBy("issue_id").
Cols("issue_id").
Count()
if err != nil {
return 0
}
return int(total)
}
func (c *Column) GetIssues(ctx context.Context) ([]*ProjectIssue, error) {
issues := make([]*ProjectIssue, 0, 5)
if err := db.GetEngine(ctx).Where("project_id=?", c.ProjectID).
And("project_board_id=?", c.ID).
OrderBy("sorting, id").
Find(&issues); err != nil {
return nil, err
}
return issues, nil
}
func init() {
db.RegisterModel(new(Column))
}
// IsCardTypeValid checks if the project column card type is valid
func IsCardTypeValid(p CardType) bool {
switch p {
case CardTypeTextOnly, CardTypeImagesAndText:
return true
default:
return false
}
}
func createDefaultColumnsForProject(ctx context.Context, project *Project) error {
var items []string
switch project.TemplateType {
case TemplateTypeBugTriage:
items = setting.Project.ProjectBoardBugTriageType
case TemplateTypeBasicKanban:
items = setting.Project.ProjectBoardBasicKanbanType
case TemplateTypeNone:
fallthrough
default:
return nil
}
return db.WithTx(ctx, func(ctx context.Context) error {
column := Column{
CreatedUnix: timeutil.TimeStampNow(),
CreatorID: project.CreatorID,
Title: "Backlog",
ProjectID: project.ID,
Default: true,
}
if err := db.Insert(ctx, column); err != nil {
return err
}
if len(items) == 0 {
return nil
}
columns := make([]Column, 0, len(items))
for _, v := range items {
columns = append(columns, Column{
CreatedUnix: timeutil.TimeStampNow(),
CreatorID: project.CreatorID,
Title: v,
ProjectID: project.ID,
})
}
return db.Insert(ctx, columns)
})
}
// maxProjectColumns max columns allowed in a project, this should not bigger than 127
// because sorting is int8 in database
const maxProjectColumns = 20
// NewColumn adds a new project column to a given project
func NewColumn(ctx context.Context, column *Column) error {
if len(column.Color) != 0 && !ColumnColorPattern.MatchString(column.Color) {
return fmt.Errorf("bad color code: %s", column.Color)
}
res := struct {
MaxSorting int64
ColumnCount int64
}{}
if _, err := db.GetEngine(ctx).Select("max(sorting) as max_sorting, count(*) as column_count").Table("project_board").
Where("project_id=?", column.ProjectID).Get(&res); err != nil {
return err
}
if res.ColumnCount >= maxProjectColumns {
return fmt.Errorf("NewBoard: maximum number of columns reached")
}
column.Sorting = int8(util.Iif(res.ColumnCount > 0, res.MaxSorting+1, 0))
_, err := db.GetEngine(ctx).Insert(column)
return err
}
// DeleteColumnByID removes all issues references to the project column.
func DeleteColumnByID(ctx context.Context, columnID int64) error {
return db.WithTx(ctx, func(ctx context.Context) error {
return deleteColumnByID(ctx, columnID)
})
}
func deleteColumnByID(ctx context.Context, columnID int64) error {
column, err := GetColumn(ctx, columnID)
if err != nil {
if IsErrProjectColumnNotExist(err) {
return nil
}
return err
}
if column.Default {
return fmt.Errorf("deleteColumnByID: cannot delete default column")
}
// move all issues to the default column
project, err := GetProjectByID(ctx, column.ProjectID)
if err != nil {
return err
}
defaultColumn, err := project.GetDefaultColumn(ctx)
if err != nil {
return err
}
if err = column.moveIssuesToAnotherColumn(ctx, defaultColumn); err != nil {
return err
}
if _, err := db.GetEngine(ctx).ID(column.ID).NoAutoCondition().Delete(column); err != nil {
return err
}
return nil
}
func deleteColumnByProjectID(ctx context.Context, projectID int64) error {
_, err := db.GetEngine(ctx).Where("project_id=?", projectID).Delete(&Column{})
return err
}
// GetColumn fetches the current column of a project
func GetColumn(ctx context.Context, columnID int64) (*Column, error) {
column := new(Column)
has, err := db.GetEngine(ctx).ID(columnID).Get(column)
if err != nil {
return nil, err
} else if !has {
return nil, ErrProjectColumnNotExist{ColumnID: columnID}
}
return column, nil
}
// UpdateColumn updates a project column
func UpdateColumn(ctx context.Context, column *Column) error {
var fieldToUpdate []string
if column.Sorting != 0 {
fieldToUpdate = append(fieldToUpdate, "sorting")
}
if column.Title != "" {
fieldToUpdate = append(fieldToUpdate, "title")
}
if len(column.Color) != 0 && !ColumnColorPattern.MatchString(column.Color) {
return fmt.Errorf("bad color code: %s", column.Color)
}
fieldToUpdate = append(fieldToUpdate, "color")
_, err := db.GetEngine(ctx).ID(column.ID).Cols(fieldToUpdate...).Update(column)
return err
}
// GetColumns fetches all columns related to a project
func (p *Project) GetColumns(ctx context.Context) (ColumnList, error) {
columns := make([]*Column, 0, 5)
if err := db.GetEngine(ctx).Where("project_id=?", p.ID).OrderBy("sorting, id").Find(&columns); err != nil {
return nil, err
}
return columns, nil
}
// GetDefaultColumn return default column and ensure only one exists
func (p *Project) GetDefaultColumn(ctx context.Context) (*Column, error) {
var column Column
has, err := db.GetEngine(ctx).
Where("project_id=? AND `default` = ?", p.ID, true).
Desc("id").Get(&column)
if err != nil {
return nil, err
}
if has {
return &column, nil
}
// create a default column if none is found
column = Column{
ProjectID: p.ID,
Default: true,
Title: "Uncategorized",
CreatorID: p.CreatorID,
}
if _, err := db.GetEngine(ctx).Insert(&column); err != nil {
return nil, err
}
return &column, nil
}
// SetDefaultColumn represents a column for issues not assigned to one
func SetDefaultColumn(ctx context.Context, projectID, columnID int64) error {
return db.WithTx(ctx, func(ctx context.Context) error {
if _, err := GetColumn(ctx, columnID); err != nil {
return err
}
if _, err := db.GetEngine(ctx).Where(builder.Eq{
"project_id": projectID,
"`default`": true,
}).Cols("`default`").Update(&Column{Default: false}); err != nil {
return err
}
_, err := db.GetEngine(ctx).ID(columnID).
Where(builder.Eq{"project_id": projectID}).
Cols("`default`").Update(&Column{Default: true})
return err
})
}
// UpdateColumnSorting update project column sorting
func UpdateColumnSorting(ctx context.Context, cl ColumnList) error {
return db.WithTx(ctx, func(ctx context.Context) error {
for i := range cl {
if _, err := db.GetEngine(ctx).ID(cl[i].ID).Cols(
"sorting",
).Update(cl[i]); err != nil {
return err
}
}
return nil
})
}
func GetColumnsByIDs(ctx context.Context, projectID int64, columnsIDs []int64) (ColumnList, error) {
columns := make([]*Column, 0, 5)
if err := db.GetEngine(ctx).
Where("project_id =?", projectID).
In("id", columnsIDs).
OrderBy("sorting").Find(&columns); err != nil {
return nil, err
}
return columns, nil
}
// MoveColumnsOnProject sorts columns in a project
func MoveColumnsOnProject(ctx context.Context, project *Project, sortedColumnIDs map[int64]int64) error {
return db.WithTx(ctx, func(ctx context.Context) error {
sess := db.GetEngine(ctx)
columnIDs := util.ValuesOfMap(sortedColumnIDs)
movedColumns, err := GetColumnsByIDs(ctx, project.ID, columnIDs)
if err != nil {
return err
}
if len(movedColumns) != len(sortedColumnIDs) {
return errors.New("some columns do not exist")
}
for _, column := range movedColumns {
if column.ProjectID != project.ID {
return fmt.Errorf("column[%d]'s projectID is not equal to project's ID [%d]", column.ProjectID, project.ID)
}
}
for sorting, columnID := range sortedColumnIDs {
if _, err := sess.Exec("UPDATE `project_board` SET sorting=? WHERE id=?", sorting, columnID); err != nil {
return err
}
}
return nil
})
}

View File

@ -14,48 +14,48 @@ import (
"github.com/stretchr/testify/assert"
)
func TestGetDefaultBoard(t *testing.T) {
func TestGetDefaultColumn(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
projectWithoutDefault, err := GetProjectByID(db.DefaultContext, 5)
assert.NoError(t, err)
// check if default board was added
board, err := projectWithoutDefault.GetDefaultBoard(db.DefaultContext)
// check if default column was added
column, err := projectWithoutDefault.GetDefaultColumn(db.DefaultContext)
assert.NoError(t, err)
assert.Equal(t, int64(5), board.ProjectID)
assert.Equal(t, "Uncategorized", board.Title)
assert.Equal(t, int64(5), column.ProjectID)
assert.Equal(t, "Uncategorized", column.Title)
projectWithMultipleDefaults, err := GetProjectByID(db.DefaultContext, 6)
assert.NoError(t, err)
// check if multiple defaults were removed
board, err = projectWithMultipleDefaults.GetDefaultBoard(db.DefaultContext)
column, err = projectWithMultipleDefaults.GetDefaultColumn(db.DefaultContext)
assert.NoError(t, err)
assert.Equal(t, int64(6), board.ProjectID)
assert.Equal(t, int64(9), board.ID)
assert.Equal(t, int64(6), column.ProjectID)
assert.Equal(t, int64(9), column.ID)
// set 8 as default board
assert.NoError(t, SetDefaultBoard(db.DefaultContext, board.ProjectID, 8))
// set 8 as default column
assert.NoError(t, SetDefaultColumn(db.DefaultContext, column.ProjectID, 8))
// then 9 will become a non-default board
board, err = GetBoard(db.DefaultContext, 9)
// then 9 will become a non-default column
column, err = GetColumn(db.DefaultContext, 9)
assert.NoError(t, err)
assert.Equal(t, int64(6), board.ProjectID)
assert.False(t, board.Default)
assert.Equal(t, int64(6), column.ProjectID)
assert.False(t, column.Default)
}
func Test_moveIssuesToAnotherColumn(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
column1 := unittest.AssertExistsAndLoadBean(t, &Board{ID: 1, ProjectID: 1})
column1 := unittest.AssertExistsAndLoadBean(t, &Column{ID: 1, ProjectID: 1})
issues, err := column1.GetIssues(db.DefaultContext)
assert.NoError(t, err)
assert.Len(t, issues, 1)
assert.EqualValues(t, 1, issues[0].ID)
column2 := unittest.AssertExistsAndLoadBean(t, &Board{ID: 2, ProjectID: 1})
column2 := unittest.AssertExistsAndLoadBean(t, &Column{ID: 2, ProjectID: 1})
issues, err = column2.GetIssues(db.DefaultContext)
assert.NoError(t, err)
assert.Len(t, issues, 1)
@ -81,7 +81,7 @@ func Test_MoveColumnsOnProject(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
project1 := unittest.AssertExistsAndLoadBean(t, &Project{ID: 1})
columns, err := project1.GetBoards(db.DefaultContext)
columns, err := project1.GetColumns(db.DefaultContext)
assert.NoError(t, err)
assert.Len(t, columns, 3)
assert.EqualValues(t, 0, columns[0].Sorting) // even if there is no default sorting, the code should also work
@ -95,7 +95,7 @@ func Test_MoveColumnsOnProject(t *testing.T) {
})
assert.NoError(t, err)
columnsAfter, err := project1.GetBoards(db.DefaultContext)
columnsAfter, err := project1.GetColumns(db.DefaultContext)
assert.NoError(t, err)
assert.Len(t, columnsAfter, 3)
assert.EqualValues(t, columns[1].ID, columnsAfter[0].ID)
@ -103,23 +103,23 @@ func Test_MoveColumnsOnProject(t *testing.T) {
assert.EqualValues(t, columns[0].ID, columnsAfter[2].ID)
}
func Test_NewBoard(t *testing.T) {
func Test_NewColumn(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
project1 := unittest.AssertExistsAndLoadBean(t, &Project{ID: 1})
columns, err := project1.GetBoards(db.DefaultContext)
columns, err := project1.GetColumns(db.DefaultContext)
assert.NoError(t, err)
assert.Len(t, columns, 3)
for i := 0; i < maxProjectColumns-3; i++ {
err := NewBoard(db.DefaultContext, &Board{
Title: fmt.Sprintf("board-%d", i+4),
err := NewColumn(db.DefaultContext, &Column{
Title: fmt.Sprintf("column-%d", i+4),
ProjectID: project1.ID,
})
assert.NoError(t, err)
}
err = NewBoard(db.DefaultContext, &Board{
Title: "board-21",
err = NewColumn(db.DefaultContext, &Column{
Title: "column-21",
ProjectID: project1.ID,
})
assert.Error(t, err)

View File

@ -18,10 +18,10 @@ type ProjectIssue struct { //revive:disable-line:exported
IssueID int64 `xorm:"INDEX"`
ProjectID int64 `xorm:"INDEX"`
// ProjectBoardID should not be zero since 1.22. If it's zero, the issue will not be displayed on UI and it might result in errors.
ProjectBoardID int64 `xorm:"INDEX"`
// ProjectColumnID should not be zero since 1.22. If it's zero, the issue will not be displayed on UI and it might result in errors.
ProjectColumnID int64 `xorm:"'project_board_id' INDEX"`
// the sorting order on the board
// the sorting order on the column
Sorting int64 `xorm:"NOT NULL DEFAULT 0"`
}
@ -76,13 +76,13 @@ func (p *Project) NumOpenIssues(ctx context.Context) int {
return int(c)
}
// MoveIssuesOnProjectBoard moves or keeps issues in a column and sorts them inside that column
func MoveIssuesOnProjectBoard(ctx context.Context, board *Board, sortedIssueIDs map[int64]int64) error {
// MoveIssuesOnProjectColumn moves or keeps issues in a column and sorts them inside that column
func MoveIssuesOnProjectColumn(ctx context.Context, column *Column, sortedIssueIDs map[int64]int64) error {
return db.WithTx(ctx, func(ctx context.Context) error {
sess := db.GetEngine(ctx)
issueIDs := util.ValuesOfMap(sortedIssueIDs)
count, err := sess.Table(new(ProjectIssue)).Where("project_id=?", board.ProjectID).In("issue_id", issueIDs).Count()
count, err := sess.Table(new(ProjectIssue)).Where("project_id=?", column.ProjectID).In("issue_id", issueIDs).Count()
if err != nil {
return err
}
@ -91,7 +91,7 @@ func MoveIssuesOnProjectBoard(ctx context.Context, board *Board, sortedIssueIDs
}
for sorting, issueID := range sortedIssueIDs {
_, err = sess.Exec("UPDATE `project_issue` SET project_board_id=?, sorting=? WHERE issue_id=?", board.ID, sorting, issueID)
_, err = sess.Exec("UPDATE `project_issue` SET project_board_id=?, sorting=? WHERE issue_id=?", column.ID, sorting, issueID)
if err != nil {
return err
}
@ -100,12 +100,12 @@ func MoveIssuesOnProjectBoard(ctx context.Context, board *Board, sortedIssueIDs
})
}
func (b *Board) moveIssuesToAnotherColumn(ctx context.Context, newColumn *Board) error {
if b.ProjectID != newColumn.ProjectID {
func (c *Column) moveIssuesToAnotherColumn(ctx context.Context, newColumn *Column) error {
if c.ProjectID != newColumn.ProjectID {
return fmt.Errorf("columns have to be in the same project")
}
if b.ID == newColumn.ID {
if c.ID == newColumn.ID {
return nil
}
@ -121,7 +121,7 @@ func (b *Board) moveIssuesToAnotherColumn(ctx context.Context, newColumn *Board)
return err
}
issues, err := b.GetIssues(ctx)
issues, err := c.GetIssues(ctx)
if err != nil {
return err
}
@ -132,7 +132,7 @@ func (b *Board) moveIssuesToAnotherColumn(ctx context.Context, newColumn *Board)
nextSorting := util.Iif(res.IssueCount > 0, res.MaxSorting+1, 0)
return db.WithTx(ctx, func(ctx context.Context) error {
for i, issue := range issues {
issue.ProjectBoardID = newColumn.ID
issue.ProjectColumnID = newColumn.ID
issue.Sorting = nextSorting + int64(i)
if _, err := db.GetEngine(ctx).ID(issue.ID).Cols("project_board_id", "sorting").Update(issue); err != nil {
return err

View File

@ -21,13 +21,7 @@ import (
)
type (
// BoardConfig is used to identify the type of board that is being created
BoardConfig struct {
BoardType BoardType
Translation string
}
// CardConfig is used to identify the type of board card that is being used
// CardConfig is used to identify the type of column card that is being used
CardConfig struct {
CardType CardType
Translation string
@ -38,7 +32,7 @@ type (
)
const (
// TypeIndividual is a type of project board that is owned by an individual
// TypeIndividual is a type of project column that is owned by an individual
TypeIndividual Type = iota + 1
// TypeRepository is a project that is tied to a repository
@ -68,39 +62,39 @@ func (err ErrProjectNotExist) Unwrap() error {
return util.ErrNotExist
}
// ErrProjectBoardNotExist represents a "ProjectBoardNotExist" kind of error.
type ErrProjectBoardNotExist struct {
BoardID int64
// ErrProjectColumnNotExist represents a "ErrProjectColumnNotExist" kind of error.
type ErrProjectColumnNotExist struct {
ColumnID int64
}
// IsErrProjectBoardNotExist checks if an error is a ErrProjectBoardNotExist
func IsErrProjectBoardNotExist(err error) bool {
_, ok := err.(ErrProjectBoardNotExist)
// IsErrProjectColumnNotExist checks if an error is a ErrProjectColumnNotExist
func IsErrProjectColumnNotExist(err error) bool {
_, ok := err.(ErrProjectColumnNotExist)
return ok
}
func (err ErrProjectBoardNotExist) Error() string {
return fmt.Sprintf("project board does not exist [id: %d]", err.BoardID)
func (err ErrProjectColumnNotExist) Error() string {
return fmt.Sprintf("project column does not exist [id: %d]", err.ColumnID)
}
func (err ErrProjectBoardNotExist) Unwrap() error {
func (err ErrProjectColumnNotExist) Unwrap() error {
return util.ErrNotExist
}
// Project represents a project board
// Project represents a project
type Project struct {
ID int64 `xorm:"pk autoincr"`
Title string `xorm:"INDEX NOT NULL"`
Description string `xorm:"TEXT"`
OwnerID int64 `xorm:"INDEX"`
Owner *user_model.User `xorm:"-"`
RepoID int64 `xorm:"INDEX"`
Repo *repo_model.Repository `xorm:"-"`
CreatorID int64 `xorm:"NOT NULL"`
IsClosed bool `xorm:"INDEX"`
BoardType BoardType
CardType CardType
Type Type
ID int64 `xorm:"pk autoincr"`
Title string `xorm:"INDEX NOT NULL"`
Description string `xorm:"TEXT"`
OwnerID int64 `xorm:"INDEX"`
Owner *user_model.User `xorm:"-"`
RepoID int64 `xorm:"INDEX"`
Repo *repo_model.Repository `xorm:"-"`
CreatorID int64 `xorm:"NOT NULL"`
IsClosed bool `xorm:"INDEX"`
TemplateType TemplateType `xorm:"'board_type'"` // TODO: rename the column to template_type
CardType CardType
Type Type
RenderedContent template.HTML `xorm:"-"`
@ -172,16 +166,7 @@ func init() {
db.RegisterModel(new(Project))
}
// GetBoardConfig retrieves the types of configurations project boards could have
func GetBoardConfig() []BoardConfig {
return []BoardConfig{
{BoardTypeNone, "repo.projects.type.none"},
{BoardTypeBasicKanban, "repo.projects.type.basic_kanban"},
{BoardTypeBugTriage, "repo.projects.type.bug_triage"},
}
}
// GetCardConfig retrieves the types of configurations project board cards could have
// GetCardConfig retrieves the types of configurations project column cards could have
func GetCardConfig() []CardConfig {
return []CardConfig{
{CardTypeTextOnly, "repo.projects.card_type.text_only"},
@ -251,8 +236,8 @@ func GetSearchOrderByBySortType(sortType string) db.SearchOrderBy {
// NewProject creates a new Project
func NewProject(ctx context.Context, p *Project) error {
if !IsBoardTypeValid(p.BoardType) {
p.BoardType = BoardTypeNone
if !IsTemplateTypeValid(p.TemplateType) {
p.TemplateType = TemplateTypeNone
}
if !IsCardTypeValid(p.CardType) {
@ -263,27 +248,19 @@ func NewProject(ctx context.Context, p *Project) error {
return util.NewInvalidArgumentErrorf("project type is not valid")
}
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return err
}
defer committer.Close()
if err := db.Insert(ctx, p); err != nil {
return err
}
if p.RepoID > 0 {
if _, err := db.Exec(ctx, "UPDATE `repository` SET num_projects = num_projects + 1 WHERE id = ?", p.RepoID); err != nil {
return db.WithTx(ctx, func(ctx context.Context) error {
if err := db.Insert(ctx, p); err != nil {
return err
}
}
if err := createBoardsForProjectsType(ctx, p); err != nil {
return err
}
if p.RepoID > 0 {
if _, err := db.Exec(ctx, "UPDATE `repository` SET num_projects = num_projects + 1 WHERE id = ?", p.RepoID); err != nil {
return err
}
}
return committer.Commit()
return createDefaultColumnsForProject(ctx, p)
})
}
// GetProjectByID returns the projects in a repository
@ -417,7 +394,7 @@ func DeleteProjectByID(ctx context.Context, id int64) error {
return err
}
if err := deleteBoardByProjectID(ctx, id); err != nil {
if err := deleteColumnByProjectID(ctx, id); err != nil {
return err
}

View File

@ -51,13 +51,13 @@ func TestProject(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
project := &Project{
Type: TypeRepository,
BoardType: BoardTypeBasicKanban,
CardType: CardTypeTextOnly,
Title: "New Project",
RepoID: 1,
CreatedUnix: timeutil.TimeStampNow(),
CreatorID: 2,
Type: TypeRepository,
TemplateType: TemplateTypeBasicKanban,
CardType: CardTypeTextOnly,
Title: "New Project",
RepoID: 1,
CreatedUnix: timeutil.TimeStampNow(),
CreatorID: 2,
}
assert.NoError(t, NewProject(db.DefaultContext, project))

View File

@ -0,0 +1,45 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package project
type (
// TemplateType is used to represent a project template type
TemplateType uint8
// TemplateConfig is used to identify the template type of project that is being created
TemplateConfig struct {
TemplateType TemplateType
Translation string
}
)
const (
// TemplateTypeNone is a project template type that has no predefined columns
TemplateTypeNone TemplateType = iota
// TemplateTypeBasicKanban is a project template type that has basic predefined columns
TemplateTypeBasicKanban
// TemplateTypeBugTriage is a project template type that has predefined columns suited to hunting down bugs
TemplateTypeBugTriage
)
// GetTemplateConfigs retrieves the template configs of configurations project columns could have
func GetTemplateConfigs() []TemplateConfig {
return []TemplateConfig{
{TemplateTypeNone, "repo.projects.type.none"},
{TemplateTypeBasicKanban, "repo.projects.type.basic_kanban"},
{TemplateTypeBugTriage, "repo.projects.type.bug_triage"},
}
}
// IsTemplateTypeValid checks if the project template type is valid
func IsTemplateTypeValid(p TemplateType) bool {
switch p {
case TemplateTypeNone, TemplateTypeBasicKanban, TemplateTypeBugTriage:
return true
default:
return false
}
}

View File

@ -5,11 +5,14 @@ package repo
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/timeutil"
@ -188,7 +191,10 @@ func DeleteAttachments(ctx context.Context, attachments []*Attachment, remove bo
if remove {
for i, a := range attachments {
if err := storage.Attachments.Delete(a.RelativePath()); err != nil {
return i, err
if !errors.Is(err, os.ErrNotExist) {
return i, err
}
log.Warn("Attachment file not found when deleting: %s", a.RelativePath())
}
}
}

View File

@ -84,7 +84,13 @@ func (repo *Repository) relAvatarLink(ctx context.Context) string {
return setting.AppSubURL + "/repo-avatars/" + url.PathEscape(repo.Avatar)
}
// AvatarLink returns the full avatar url with http host. TODO: refactor it to a relative URL, but it is still used in API response at the moment
// AvatarLink returns the full avatar url with http host or the empty string if the repo doesn't have an avatar.
//
// TODO: refactor it to a relative URL, but it is still used in API response at the moment
func (repo *Repository) AvatarLink(ctx context.Context) string {
return httplib.MakeAbsoluteURL(ctx, repo.relAvatarLink(ctx))
relLink := repo.relAvatarLink(ctx)
if relLink != "" {
return httplib.MakeAbsoluteURL(ctx, relLink)
}
return ""
}

View File

@ -472,10 +472,9 @@ func (repo *Repository) MustOwner(ctx context.Context) *user_model.User {
func (repo *Repository) ComposeMetas(ctx context.Context) map[string]string {
if len(repo.RenderingMetas) == 0 {
metas := map[string]string{
"user": repo.OwnerName,
"repo": repo.Name,
"repoPath": repo.RepoPath(),
"mode": "comment",
"user": repo.OwnerName,
"repo": repo.Name,
"mode": "comment",
}
unit, err := repo.GetUnit(ctx, unit.TypeExternalTracker)

View File

@ -175,6 +175,8 @@ type SearchRepoOptions struct {
// True -> include just forks
// False -> include just non-forks
Fork optional.Option[bool]
// If Fork option is True, you can use this option to limit the forks of a special repo by repo id.
ForkFrom int64
// None -> include templates AND non-templates
// True -> include just templates
// False -> include just non-templates
@ -514,6 +516,10 @@ func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
cond = cond.And(builder.Eq{"is_fork": false})
} else {
cond = cond.And(builder.Eq{"is_fork": opts.Fork.Value()})
if opts.ForkFrom > 0 && opts.Fork.Value() {
cond = cond.And(builder.Eq{"fork_id": opts.ForkFrom})
}
}
}

View File

@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"strings"
"sync/atomic"
"code.gitea.io/gitea/models/perm"
"code.gitea.io/gitea/modules/container"
@ -27,7 +28,7 @@ const (
TypeWiki // 5 Wiki
TypeExternalWiki // 6 ExternalWiki
TypeExternalTracker // 7 ExternalTracker
TypeProjects // 8 Kanban board
TypeProjects // 8 Projects
TypePackages // 9 Packages
TypeActions // 10 Actions
)
@ -106,10 +107,23 @@ var (
TypeExternalTracker,
}
// DisabledRepoUnits contains the units that have been globally disabled
DisabledRepoUnits = []Type{}
disabledRepoUnitsAtomic atomic.Pointer[[]Type] // the units that have been globally disabled
)
// DisabledRepoUnitsGet returns the globally disabled units, it is a quick patch to fix data-race during testing.
// Because the queue worker might read when a test is mocking the value. FIXME: refactor to a clear solution later.
func DisabledRepoUnitsGet() []Type {
v := disabledRepoUnitsAtomic.Load()
if v == nil {
return nil
}
return *v
}
func DisabledRepoUnitsSet(v []Type) {
disabledRepoUnitsAtomic.Store(&v)
}
// Get valid set of default repository units from settings
func validateDefaultRepoUnits(defaultUnits, settingDefaultUnits []Type) []Type {
units := defaultUnits
@ -127,7 +141,7 @@ func validateDefaultRepoUnits(defaultUnits, settingDefaultUnits []Type) []Type {
}
// Remove disabled units
for _, disabledUnit := range DisabledRepoUnits {
for _, disabledUnit := range DisabledRepoUnitsGet() {
for i, unit := range units {
if unit == disabledUnit {
units = append(units[:i], units[i+1:]...)
@ -140,11 +154,11 @@ func validateDefaultRepoUnits(defaultUnits, settingDefaultUnits []Type) []Type {
// LoadUnitConfig load units from settings
func LoadUnitConfig() error {
var invalidKeys []string
DisabledRepoUnits, invalidKeys = FindUnitTypes(setting.Repository.DisabledRepoUnits...)
disabledRepoUnits, invalidKeys := FindUnitTypes(setting.Repository.DisabledRepoUnits...)
if len(invalidKeys) > 0 {
log.Warn("Invalid keys in disabled repo units: %s", strings.Join(invalidKeys, ", "))
}
DisabledRepoUnitsSet(disabledRepoUnits)
setDefaultRepoUnits, invalidKeys := FindUnitTypes(setting.Repository.DefaultRepoUnits...)
if len(invalidKeys) > 0 {
@ -167,7 +181,7 @@ func LoadUnitConfig() error {
// UnitGlobalDisabled checks if unit type is global disabled
func (u Type) UnitGlobalDisabled() bool {
for _, ud := range DisabledRepoUnits {
for _, ud := range DisabledRepoUnitsGet() {
if u == ud {
return true
}

View File

@ -14,10 +14,10 @@ import (
func TestLoadUnitConfig(t *testing.T) {
t.Run("regular", func(t *testing.T) {
defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []Type) {
DisabledRepoUnits = disabledRepoUnits
DisabledRepoUnitsSet(disabledRepoUnits)
DefaultRepoUnits = defaultRepoUnits
DefaultForkRepoUnits = defaultForkRepoUnits
}(DisabledRepoUnits, DefaultRepoUnits, DefaultForkRepoUnits)
}(DisabledRepoUnitsGet(), DefaultRepoUnits, DefaultForkRepoUnits)
defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []string) {
setting.Repository.DisabledRepoUnits = disabledRepoUnits
setting.Repository.DefaultRepoUnits = defaultRepoUnits
@ -28,16 +28,16 @@ func TestLoadUnitConfig(t *testing.T) {
setting.Repository.DefaultRepoUnits = []string{"repo.code", "repo.releases", "repo.issues", "repo.pulls"}
setting.Repository.DefaultForkRepoUnits = []string{"repo.releases"}
assert.NoError(t, LoadUnitConfig())
assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnits)
assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnitsGet())
assert.Equal(t, []Type{TypeCode, TypeReleases, TypePullRequests}, DefaultRepoUnits)
assert.Equal(t, []Type{TypeReleases}, DefaultForkRepoUnits)
})
t.Run("invalid", func(t *testing.T) {
defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []Type) {
DisabledRepoUnits = disabledRepoUnits
DisabledRepoUnitsSet(disabledRepoUnits)
DefaultRepoUnits = defaultRepoUnits
DefaultForkRepoUnits = defaultForkRepoUnits
}(DisabledRepoUnits, DefaultRepoUnits, DefaultForkRepoUnits)
}(DisabledRepoUnitsGet(), DefaultRepoUnits, DefaultForkRepoUnits)
defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []string) {
setting.Repository.DisabledRepoUnits = disabledRepoUnits
setting.Repository.DefaultRepoUnits = defaultRepoUnits
@ -48,16 +48,16 @@ func TestLoadUnitConfig(t *testing.T) {
setting.Repository.DefaultRepoUnits = []string{"repo.code", "invalid.2", "repo.releases", "repo.issues", "repo.pulls"}
setting.Repository.DefaultForkRepoUnits = []string{"invalid.3", "repo.releases"}
assert.NoError(t, LoadUnitConfig())
assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnits)
assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnitsGet())
assert.Equal(t, []Type{TypeCode, TypeReleases, TypePullRequests}, DefaultRepoUnits)
assert.Equal(t, []Type{TypeReleases}, DefaultForkRepoUnits)
})
t.Run("duplicate", func(t *testing.T) {
defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []Type) {
DisabledRepoUnits = disabledRepoUnits
DisabledRepoUnitsSet(disabledRepoUnits)
DefaultRepoUnits = defaultRepoUnits
DefaultForkRepoUnits = defaultForkRepoUnits
}(DisabledRepoUnits, DefaultRepoUnits, DefaultForkRepoUnits)
}(DisabledRepoUnitsGet(), DefaultRepoUnits, DefaultForkRepoUnits)
defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []string) {
setting.Repository.DisabledRepoUnits = disabledRepoUnits
setting.Repository.DefaultRepoUnits = defaultRepoUnits
@ -68,16 +68,16 @@ func TestLoadUnitConfig(t *testing.T) {
setting.Repository.DefaultRepoUnits = []string{"repo.code", "repo.releases", "repo.issues", "repo.pulls", "repo.code"}
setting.Repository.DefaultForkRepoUnits = []string{"repo.releases", "repo.releases"}
assert.NoError(t, LoadUnitConfig())
assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnits)
assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnitsGet())
assert.Equal(t, []Type{TypeCode, TypeReleases, TypePullRequests}, DefaultRepoUnits)
assert.Equal(t, []Type{TypeReleases}, DefaultForkRepoUnits)
})
t.Run("empty_default", func(t *testing.T) {
defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []Type) {
DisabledRepoUnits = disabledRepoUnits
DisabledRepoUnitsSet(disabledRepoUnits)
DefaultRepoUnits = defaultRepoUnits
DefaultForkRepoUnits = defaultForkRepoUnits
}(DisabledRepoUnits, DefaultRepoUnits, DefaultForkRepoUnits)
}(DisabledRepoUnitsGet(), DefaultRepoUnits, DefaultForkRepoUnits)
defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []string) {
setting.Repository.DisabledRepoUnits = disabledRepoUnits
setting.Repository.DefaultRepoUnits = defaultRepoUnits
@ -88,7 +88,7 @@ func TestLoadUnitConfig(t *testing.T) {
setting.Repository.DefaultRepoUnits = []string{}
setting.Repository.DefaultForkRepoUnits = []string{"repo.releases", "repo.releases"}
assert.NoError(t, LoadUnitConfig())
assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnits)
assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnitsGet())
assert.ElementsMatch(t, []Type{TypeCode, TypePullRequests, TypeReleases, TypeWiki, TypePackages, TypeProjects, TypeActions}, DefaultRepoUnits)
assert.Equal(t, []Type{TypeReleases}, DefaultForkRepoUnits)
})

View File

@ -10,6 +10,7 @@ import (
"net/mail"
"regexp"
"strings"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/base"
@ -353,14 +354,12 @@ func ChangeInactivePrimaryEmail(ctx context.Context, uid int64, oldEmailAddr, ne
// VerifyActiveEmailCode verifies active email code when active account
func VerifyActiveEmailCode(ctx context.Context, code, email string) *EmailAddress {
minutes := setting.Service.ActiveCodeLives
if user := GetVerifyUser(ctx, code); user != nil {
// time limit code
prefix := code[:base.TimeLimitCodeLength]
data := fmt.Sprintf("%d%s%s%s%s", user.ID, email, user.LowerName, user.Passwd, user.Rands)
if base.VerifyTimeLimitCode(data, minutes, prefix) {
if base.VerifyTimeLimitCode(time.Now(), data, setting.Service.ActiveCodeLives, prefix) {
emailAddress := &EmailAddress{UID: user.ID, Email: email}
if has, _ := db.GetEngine(ctx).Get(emailAddress); has {
return emailAddress

View File

@ -304,7 +304,7 @@ func (u *User) OrganisationLink() string {
func (u *User) GenerateEmailActivateCode(email string) string {
code := base.CreateTimeLimitCode(
fmt.Sprintf("%d%s%s%s%s", u.ID, email, u.LowerName, u.Passwd, u.Rands),
setting.Service.ActiveCodeLives, nil)
setting.Service.ActiveCodeLives, time.Now(), nil)
// Add tail hex username
code += hex.EncodeToString([]byte(u.LowerName))
@ -791,14 +791,11 @@ func GetVerifyUser(ctx context.Context, code string) (user *User) {
// VerifyUserActiveCode verifies active code when active account
func VerifyUserActiveCode(ctx context.Context, code string) (user *User) {
minutes := setting.Service.ActiveCodeLives
if user = GetVerifyUser(ctx, code); user != nil {
// time limit code
prefix := code[:base.TimeLimitCodeLength]
data := fmt.Sprintf("%d%s%s%s%s", user.ID, user.Email, user.LowerName, user.Passwd, user.Rands)
if base.VerifyTimeLimitCode(data, minutes, prefix) {
if base.VerifyTimeLimitCode(time.Now(), data, setting.Service.ActiveCodeLives, prefix) {
return user
}
}
@ -859,6 +856,10 @@ func GetUserByID(ctx context.Context, id int64) (*User, error) {
// GetUserByIDs returns the user objects by given IDs if exists.
func GetUserByIDs(ctx context.Context, ids []int64) ([]*User, error) {
if len(ids) == 0 {
return nil, nil
}
users := make([]*User, 0, len(ids))
err := db.GetEngine(ctx).In("id", ids).
Table("user").

View File

@ -18,7 +18,7 @@ func parseIntParam(value, param, algorithmName, config string, previousErr error
return parsed, previousErr // <- Keep the previous error as this function should still return an error once everything has been checked if any call failed
}
func parseUIntParam(value, param, algorithmName, config string, previousErr error) (uint64, error) {
func parseUIntParam(value, param, algorithmName, config string, previousErr error) (uint64, error) { //nolint:unparam
parsed, err := strconv.ParseUint(value, 10, 64)
if err != nil {
log.Error("invalid integer for %s representation in %s hash spec %s", param, algorithmName, config)

View File

@ -4,12 +4,15 @@
package base
import (
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"crypto/subtle"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"os"
"path/filepath"
"runtime"
@ -25,13 +28,6 @@ import (
"github.com/dustin/go-humanize"
)
// EncodeSha1 string to sha1 hex value.
func EncodeSha1(str string) string {
h := sha1.New()
_, _ = h.Write([]byte(str))
return hex.EncodeToString(h.Sum(nil))
}
// EncodeSha256 string to sha256 hex value.
func EncodeSha256(str string) string {
h := sha256.New()
@ -62,63 +58,62 @@ func BasicAuthDecode(encoded string) (string, string, error) {
}
// VerifyTimeLimitCode verify time limit code
func VerifyTimeLimitCode(data string, minutes int, code string) bool {
func VerifyTimeLimitCode(now time.Time, data string, minutes int, code string) bool {
if len(code) <= 18 {
return false
}
// split code
start := code[:12]
lives := code[12:18]
if d, err := strconv.ParseInt(lives, 10, 0); err == nil {
minutes = int(d)
}
startTimeStr := code[:12]
aliveTimeStr := code[12:18]
aliveTime, _ := strconv.Atoi(aliveTimeStr) // no need to check err, if anything wrong, the following code check will fail soon
// right active code
retCode := CreateTimeLimitCode(data, minutes, start)
if retCode == code && minutes > 0 {
// check time is expired or not
before, _ := time.ParseInLocation("200601021504", start, time.Local)
now := time.Now()
if before.Add(time.Minute*time.Duration(minutes)).Unix() > now.Unix() {
return true
// check code
retCode := CreateTimeLimitCode(data, aliveTime, startTimeStr, nil)
if subtle.ConstantTimeCompare([]byte(retCode), []byte(code)) != 1 {
retCode = CreateTimeLimitCode(data, aliveTime, startTimeStr, sha1.New()) // TODO: this is only for the support of legacy codes, remove this in/after 1.23
if subtle.ConstantTimeCompare([]byte(retCode), []byte(code)) != 1 {
return false
}
}
return false
// check time is expired or not: startTime <= now && now < startTime + minutes
startTime, _ := time.ParseInLocation("200601021504", startTimeStr, time.Local)
return (startTime.Before(now) || startTime.Equal(now)) && now.Before(startTime.Add(time.Minute*time.Duration(minutes)))
}
// TimeLimitCodeLength default value for time limit code
const TimeLimitCodeLength = 12 + 6 + 40
// CreateTimeLimitCode create a time limit code
// code format: 12 length date time string + 6 minutes string + 40 sha1 encoded string
func CreateTimeLimitCode(data string, minutes int, startInf any) string {
format := "200601021504"
// CreateTimeLimitCode create a time-limited code.
// Format: 12 length date time string + 6 minutes string (not used) + 40 hash string, some other code depends on this fixed length
// If h is nil, then use the default hmac hash.
func CreateTimeLimitCode[T time.Time | string](data string, minutes int, startTimeGeneric T, h hash.Hash) string {
const format = "200601021504"
var start, end time.Time
var startStr, endStr string
if startInf == nil {
// Use now time create code
start = time.Now()
startStr = start.Format(format)
var start time.Time
var startTimeAny any = startTimeGeneric
if t, ok := startTimeAny.(time.Time); ok {
start = t
} else {
// use start string create code
startStr = startInf.(string)
start, _ = time.ParseInLocation(format, startStr, time.Local)
startStr = start.Format(format)
var err error
start, err = time.ParseInLocation(format, startTimeAny.(string), time.Local)
if err != nil {
return "" // return an invalid code because the "parse" failed
}
}
startStr := start.Format(format)
end := start.Add(time.Minute * time.Duration(minutes))
end = start.Add(time.Minute * time.Duration(minutes))
endStr = end.Format(format)
// create sha1 encode string
sh := sha1.New()
_, _ = sh.Write([]byte(fmt.Sprintf("%s%s%s%s%d", data, hex.EncodeToString(setting.GetGeneralTokenSigningSecret()), startStr, endStr, minutes)))
encoded := hex.EncodeToString(sh.Sum(nil))
if h == nil {
h = hmac.New(sha1.New, setting.GetGeneralTokenSigningSecret())
}
_, _ = fmt.Fprintf(h, "%s%s%s%s%d", data, hex.EncodeToString(setting.GetGeneralTokenSigningSecret()), startStr, end.Format(format), minutes)
encoded := hex.EncodeToString(h.Sum(nil))
code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded)
if len(code) != TimeLimitCodeLength {
panic("there is a hard requirement for the length of time-limited code") // it shouldn't happen
}
return code
}

View File

@ -4,20 +4,18 @@
package base
import (
"crypto/sha1"
"fmt"
"os"
"testing"
"time"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/test"
"github.com/stretchr/testify/assert"
)
func TestEncodeSha1(t *testing.T) {
assert.Equal(t,
"8843d7f92416211de9ebb963ff4ce28125932878",
EncodeSha1("foobar"),
)
}
func TestEncodeSha256(t *testing.T) {
assert.Equal(t,
"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2",
@ -46,43 +44,54 @@ func TestBasicAuthDecode(t *testing.T) {
}
func TestVerifyTimeLimitCode(t *testing.T) {
tc := []struct {
data string
minutes int
code string
valid bool
}{{
data: "data",
minutes: 2,
code: testCreateTimeLimitCode(t, "data", 2),
valid: true,
}, {
data: "abc123-ß",
minutes: 1,
code: testCreateTimeLimitCode(t, "abc123-ß", 1),
valid: true,
}, {
data: "data",
minutes: 2,
code: "2021012723240000005928251dac409d2c33a6eb82c63410aaad569bed",
valid: false,
}}
for _, test := range tc {
actualValid := VerifyTimeLimitCode(test.data, test.minutes, test.code)
assert.Equal(t, test.valid, actualValid, "data: '%s' code: '%s' should be valid: %t", test.data, test.code, test.valid)
defer test.MockVariableValue(&setting.InstallLock, true)()
initGeneralSecret := func(secret string) {
setting.InstallLock = true
setting.CfgProvider, _ = setting.NewConfigProviderFromData(fmt.Sprintf(`
[oauth2]
JWT_SECRET = %s
`, secret))
setting.LoadCommonSettings()
}
}
func testCreateTimeLimitCode(t *testing.T, data string, m int) string {
result0 := CreateTimeLimitCode(data, m, nil)
result1 := CreateTimeLimitCode(data, m, time.Now().Format("200601021504"))
result2 := CreateTimeLimitCode(data, m, time.Unix(time.Now().Unix()+int64(time.Minute)*int64(m), 0).Format("200601021504"))
initGeneralSecret("KZb_QLUd4fYVyxetjxC4eZkrBgWM2SndOOWDNtgUUko")
now := time.Now()
assert.Equal(t, result0, result1)
assert.NotEqual(t, result0, result2)
t.Run("TestGenericParameter", func(t *testing.T) {
time2000 := time.Date(2000, 1, 2, 3, 4, 5, 0, time.Local)
assert.Equal(t, "2000010203040000026fa5221b2731b7cf80b1b506f5e39e38c115fee5", CreateTimeLimitCode("test-sha1", 2, time2000, sha1.New()))
assert.Equal(t, "2000010203040000026fa5221b2731b7cf80b1b506f5e39e38c115fee5", CreateTimeLimitCode("test-sha1", 2, "200001020304", sha1.New()))
assert.Equal(t, "2000010203040000024842227a2f87041ff82025199c0187410a9297bf", CreateTimeLimitCode("test-hmac", 2, time2000, nil))
assert.Equal(t, "2000010203040000024842227a2f87041ff82025199c0187410a9297bf", CreateTimeLimitCode("test-hmac", 2, "200001020304", nil))
})
assert.True(t, len(result0) != 0)
return result0
t.Run("TestInvalidCode", func(t *testing.T) {
assert.False(t, VerifyTimeLimitCode(now, "data", 2, ""))
assert.False(t, VerifyTimeLimitCode(now, "data", 2, "invalid code"))
})
t.Run("TestCreateAndVerify", func(t *testing.T) {
code := CreateTimeLimitCode("data", 2, now, nil)
assert.False(t, VerifyTimeLimitCode(now.Add(-time.Minute), "data", 2, code)) // not started yet
assert.True(t, VerifyTimeLimitCode(now, "data", 2, code))
assert.True(t, VerifyTimeLimitCode(now.Add(time.Minute), "data", 2, code))
assert.False(t, VerifyTimeLimitCode(now.Add(time.Minute), "DATA", 2, code)) // invalid data
assert.False(t, VerifyTimeLimitCode(now.Add(2*time.Minute), "data", 2, code)) // expired
})
t.Run("TestDifferentSecret", func(t *testing.T) {
// use another secret to ensure the code is invalid for different secret
verifyDataCode := func(c string) bool {
return VerifyTimeLimitCode(now, "data", 2, c)
}
code1 := CreateTimeLimitCode("data", 2, now, sha1.New())
code2 := CreateTimeLimitCode("data", 2, now, nil)
assert.True(t, verifyDataCode(code1))
assert.True(t, verifyDataCode(code2))
initGeneralSecret("000_QLUd4fYVyxetjxC4eZkrBgWM2SndOOWDNtgUUko")
assert.False(t, verifyDataCode(code1))
assert.False(t, verifyDataCode(code2))
})
}
func TestFileSize(t *testing.T) {

View File

@ -8,6 +8,8 @@ import (
"time"
"code.gitea.io/gitea/modules/setting"
_ "gitea.com/go-chi/cache/memcache" //nolint:depguard // memcache plugin for cache, it is required for config "ADAPTER=memcache"
)
var defaultCache StringCache

View File

@ -4,6 +4,8 @@
package git
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"io"
"os"
@ -128,3 +130,9 @@ func (l *LimitedReaderCloser) Read(p []byte) (n int, err error) {
func (l *LimitedReaderCloser) Close() error {
return l.C.Close()
}
func HashFilePathForWebUI(s string) string {
h := sha1.New()
_, _ = h.Write([]byte(s))
return hex.EncodeToString(h.Sum(nil))
}

17
modules/git/utils_test.go Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package git
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestHashFilePathForWebUI(t *testing.T) {
assert.Equal(t,
"8843d7f92416211de9ebb963ff4ce28125932878",
HashFilePathForWebUI("foobar"),
)
}

Some files were not shown because too many files have changed in this diff Show More