Compare commits

..

2 Commits

Author SHA1 Message Date
Wenhao Zhu
6a1c47296c
fix: 修复知识库问题优化无效的BUG (#4033) 2025-03-07 16:01:01 +08:00
archer
6d4776b3aa
action 2025-03-06 22:56:00 +08:00
1684 changed files with 24372 additions and 80093 deletions

View File

@ -1,25 +0,0 @@
# 构建输出目录
dist/
build/
.next/
out/
# 依赖目录
node_modules/
# 缓存和生成文件
coverage/
.coverage/
.nyc_output/
*.log
# 其他不需要检查的文件
*.min.js
*.config.js
vitest.config.mts
# 特定目录
bin/
scripts/
deploy/
docSite/

View File

@ -1,17 +0,0 @@
{
"root": true,
"parser": "@typescript-eslint/parser",
"plugins": ["@typescript-eslint"],
"extends": ["next/core-web-vitals"],
"rules": {
"react-hooks/rules-of-hooks": 0,
"@typescript-eslint/consistent-type-imports": [
"error",
{
"prefer": "type-imports",
"disallowTypeAnnotations": false
}
]
},
"ignorePatterns": ["node_modules/", "dist/", "build/", "coverage/"]
}

30
.github/gh-bot.yml vendored Normal file
View File

@ -0,0 +1,30 @@
version: v1
debug: true
action:
printConfig: false
release:
retry: 15s
actionName: Release
allowOps:
- cuisongliu
bot:
prefix: /
spe: _
allowOps:
- sealos-ci-robot
- sealos-release-robot
email: sealos-ci-robot@sealos.io
username: sealos-ci-robot
repo:
org: false
message:
success: |
🤖 says: Hooray! The action {{.Body}} has been completed successfully. 🎉
format_error: |
🤖 says: ‼️ There is a formatting issue with the action, kindly verify the action's format.
permission_error: |
🤖 says: ‼️ The action doesn't have permission to trigger.
release_error: |
🤖 says: ‼️ Release action failed.
Error details: {{.Error}}

View File

@ -10,13 +10,6 @@ on:
jobs: jobs:
build-fastgpt-docs-images: build-fastgpt-docs-images:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read
packages: write
attestations: write
id-token: write
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -34,6 +27,7 @@ jobs:
with: with:
# list of Docker images to use as base name for tags # list of Docker images to use as base name for tags
images: | images: |
${{ secrets.DOCKER_HUB_NAME }}/fastgpt-docs
ghcr.io/${{ github.repository_owner }}/fastgpt-docs ghcr.io/${{ github.repository_owner }}/fastgpt-docs
registry.cn-hangzhou.aliyuncs.com/${{ secrets.ALI_HUB_USERNAME }}/fastgpt-docs registry.cn-hangzhou.aliyuncs.com/${{ secrets.ALI_HUB_USERNAME }}/fastgpt-docs
tags: | tags: |
@ -46,12 +40,18 @@ jobs:
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Login to ghcr.io - name: Login to ghcr.io
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GH_PAT }}
- name: Login to Aliyun - name: Login to Aliyun
uses: docker/login-action@v3 uses: docker/login-action@v3
@ -70,10 +70,9 @@ jobs:
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
outputs: outputs:
tags: ${{ steps.datetime.outputs.datetime }} tags: ${{ steps.datetime.outputs.datetime }}
update-docs-image: update-docs-image:
needs: build-fastgpt-docs-images needs: build-fastgpt-docs-images
runs-on: ubuntu-24.04 runs-on: ubuntu-20.04
if: github.repository == 'labring/FastGPT' if: github.repository == 'labring/FastGPT'
steps: steps:
- name: Checkout code - name: Checkout code

View File

@ -1,4 +1,4 @@
name: Deploy doc image to cf name: Deploy doc image to vercel
on: on:
workflow_dispatch: workflow_dispatch:
@ -18,12 +18,7 @@ jobs:
url: ${{ steps.vercel-action.outputs.preview-url }} url: ${{ steps.vercel-action.outputs.preview-url }}
# The type of runner that the job will run on # The type of runner that the job will run on
runs-on: ubuntu-24.04 runs-on: ubuntu-22.04
permissions:
contents: write
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
# Job outputs # Job outputs
outputs: outputs:
@ -63,9 +58,20 @@ jobs:
- name: Build - name: Build
run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs@6d0568e && hugo -v --minify run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs@6d0568e && hugo -v --minify
- name: Deploy to GitHub Pages # Step 5 - Push our generated site to Vercel
uses: peaceiris/actions-gh-pages@v4 - name: Deploy to Vercel
if: github.ref == 'refs/heads/main' uses: amondnet/vercel-action@v25
id: vercel-action
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
vercel-org-id: ${{ secrets.VERCEL_ORG_ID }} #Required
vercel-project-id: ${{ secrets.VERCEL_PROJECT_ID }} #Required
github-comment: false
vercel-args: '--prod --local-config ../vercel.json' # Optional
working-directory: docSite/public
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GH_PAT }}
publish_dir: docSite/public publish_dir: docSite/public

View File

@ -10,19 +10,13 @@ on:
jobs: jobs:
# This workflow contains jobs "deploy-production" # This workflow contains jobs "deploy-production"
deploy-preview: deploy-preview:
permissions:
contents: read
packages: write
attestations: write
id-token: write
pull-requests: write
# The environment this job references # The environment this job references
environment: environment:
name: Preview name: Preview
url: ${{ steps.vercel-action.outputs.preview-url }} url: ${{ steps.vercel-action.outputs.preview-url }}
# The type of runner that the job will run on # The type of runner that the job will run on
runs-on: ubuntu-24.04 runs-on: ubuntu-22.04
# Job outputs # Job outputs
outputs: outputs:
@ -38,7 +32,6 @@ jobs:
repository: ${{ github.event.pull_request.head.repo.full_name }} repository: ${{ github.event.pull_request.head.repo.full_name }}
submodules: recursive # Fetch submodules submodules: recursive # Fetch submodules
fetch-depth: 0 # Fetch all history for .GitInfo and .Lastmod fetch-depth: 0 # Fetch all history for .GitInfo and .Lastmod
token: ${{ secrets.GITHUB_TOKEN }}
# Step 2 Detect changes to Docs Content # Step 2 Detect changes to Docs Content
- name: Detect changes in doc content - name: Detect changes in doc content
@ -50,6 +43,10 @@ jobs:
- 'docSite/content/docs/**' - 'docSite/content/docs/**'
base: main base: main
- name: Add cdn for images
run: |
sed -i "s#\](/imgs/#\](https://cdn.jsdelivr.net/gh/yangchuansheng/fastgpt-imgs@main/imgs/#g" $(grep -rl "\](/imgs/" docSite/content/zh-cn/docs)
# Step 3 - Install Hugo (specific version) # Step 3 - Install Hugo (specific version)
- name: Install Hugo - name: Install Hugo
uses: peaceiris/actions-hugo@v2 uses: peaceiris/actions-hugo@v2
@ -61,35 +58,39 @@ jobs:
- name: Build - name: Build
run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs@6d0568e && hugo -v --minify run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs@6d0568e && hugo -v --minify
# Step 5 - Push our generated site to Cloudflare # Step 5 - Push our generated site to Vercel
- name: Deploy to Cloudflare Pages - name: Deploy to Vercel
id: deploy uses: amondnet/vercel-action@v25
uses: cloudflare/wrangler-action@v3 id: vercel-action
with: with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} vercel-org-id: ${{ secrets.VERCEL_ORG_ID }} #Required
command: pages deploy ./docSite/public --project-name=fastgpt-doc vercel-project-id: ${{ secrets.VERCEL_PROJECT_ID }} #Required
packageManager: npm github-comment: false
vercel-args: '--local-config ../vercel.json' # Optional
- name: Create deployment status comment working-directory: docSite/public
if: always() alias-domains: | #Optional
fastgpt-staging.vercel.app
docsOutput:
needs: [deploy-preview]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
- name: Write md
run: |
echo "# 🤖 Generated by deploy action" > report.md
echo "[👀 Visit Preview](${{ needs.deploy-preview.outputs.url }})" >> report.md
cat report.md
- name: Gh Rebot for Sealos
uses: labring/gh-rebot@v0.0.6
if: ${{ (github.event_name == 'pull_request_target') }}
with:
version: v0.0.6
env: env:
JOB_STATUS: ${{ job.status }} GH_TOKEN: '${{ secrets.GH_PAT }}'
PREVIEW_URL: ${{ steps.deploy.outputs.deployment-url }} SEALOS_TYPE: 'pr_comment'
uses: actions/github-script@v6 SEALOS_FILENAME: 'report.md'
with: SEALOS_REPLACE_TAG: 'DEFAULT_REPLACE_DEPLOY'
token: ${{ secrets.GITHUB_TOKEN }}
script: |
const success = process.env.JOB_STATUS === 'success';
const deploymentUrl = `${process.env.PREVIEW_URL}`;
const status = success ? '✅ Success' : '❌ Failed';
console.log(process.env.JOB_STATUS);
const commentBody = `**Deployment Status: ${status}**
${success ? `🔗 Preview URL: ${deploymentUrl}` : ''}`;
await github.rest.issues.createComment({
...context.repo,
issue_number: context.payload.pull_request.number,
body: commentBody
});

View File

@ -1,6 +1,6 @@
name: Sync images name: Sync images
on: on:
pull_request: pull_request_target:
branches: branches:
- main - main
paths: paths:
@ -15,6 +15,13 @@ jobs:
sync: sync:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout
uses: actions/checkout@v3
if: ${{ (github.event_name == 'pull_request_target') }}
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
@ -22,7 +29,7 @@ jobs:
uses: BetaHuhn/repo-file-sync-action@v1.21.0 uses: BetaHuhn/repo-file-sync-action@v1.21.0
with: with:
GH_PAT: ${{ secrets.IMG_GH_PAT }} GH_PAT: ${{ secrets.IMG_GH_PAT }}
CONFIG_PATH: .github/doc-sync-image.yml CONFIG_PATH: .github/sync_imgs.yml
ORIGINAL_MESSAGE: true ORIGINAL_MESSAGE: true
SKIP_PR: true SKIP_PR: true
COMMIT_EACH_FILE: false COMMIT_EACH_FILE: false

View File

@ -9,12 +9,7 @@ on:
- 'main' - 'main'
jobs: jobs:
build-fastgpt-images: build-fastgpt-images:
permissions: runs-on: ubuntu-20.04
packages: write
contents: read
attestations: write
id-token: write
runs-on: ubuntu-24.04
if: github.repository != 'labring/FastGPT' if: github.repository != 'labring/FastGPT'
steps: steps:
- name: Checkout - name: Checkout
@ -37,7 +32,7 @@ jobs:
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GH_PAT }}
- name: Set DOCKER_REPO_TAGGED based on branch or tag - name: Set DOCKER_REPO_TAGGED based on branch or tag
run: | run: |
echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt:latest" >> $GITHUB_ENV echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt:latest" >> $GITHUB_ENV

View File

@ -1,171 +1,258 @@
name: Build FastGPT images name: Build FastGPT images
on: on:
workflow_dispatch: workflow_dispatch:
push: push:
paths: paths:
- "projects/app/**" - 'projects/app/**'
- "packages/**" - 'packages/**'
tags: tags:
- "v*" - 'v*'
jobs: jobs:
build-fastgpt-images: build-fastgpt-images:
permissions: runs-on: ubuntu-20.04
packages: write
contents: read
attestations: write
id-token: write
strategy:
matrix:
sub_routes:
- repo: fastgpt
base_url: ""
- repo: fastgpt-sub-route
base_url: "/fastai"
- repo: fastgpt-sub-route-gchat
base_url: "/gchat"
archs:
- arch: amd64
- arch: arm64
runs-on: ubuntu-24.04-arm
runs-on: ${{ matrix.archs.runs-on || 'ubuntu-24.04' }}
steps: steps:
# install env # install env
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 1 fetch-depth: 1
- name: Install Dependencies
run: |
sudo apt update && sudo apt install -y nodejs npm
- name: Set up QEMU (optional)
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v2
with: with:
driver-opts: network=host driver-opts: network=host
- name: Cache Docker layers - name: Cache Docker layers
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: /tmp/.buildx-cache path: /tmp/.buildx-cache
key: ${{ runner.os }}-${{ matrix.sub_routes.repo }}-buildx-${{ github.sha }} key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: | restore-keys: |
${{ runner.os }}-${{ matrix.sub_routes.repo }}-buildx- ${{ runner.os }}-buildx-
# login docker # login docker
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GH_PAT }}
- name: Login to Ali Hub - name: Login to Ali Hub
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
registry: registry.cn-hangzhou.aliyuncs.com registry: registry.cn-hangzhou.aliyuncs.com
username: ${{ secrets.ALI_HUB_USERNAME }} username: ${{ secrets.ALI_HUB_USERNAME }}
password: ${{ secrets.ALI_HUB_PASSWORD }} password: ${{ secrets.ALI_HUB_PASSWORD }}
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_HUB_NAME }} username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }} password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build for ${{ matrix.archs.arch }} # Set tag
id: build
uses: docker/build-push-action@v6
with:
context: .
file: projects/app/Dockerfile
platforms: linux/${{ matrix.archs.arch }}
build-args: |
${{ matrix.sub_routes.base_url && format('base_url={0}', matrix.sub_routes.base_url) || '' }}
labels: |
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.description=${{ matrix.sub_routes.repo }} image
outputs: type=image,"name=ghcr.io/${{ github.repository_owner }}/${{ matrix.sub_routes.repo }},${{ secrets.ALI_IMAGE_NAME }}/${{ matrix.sub_routes.repo }},${{ secrets.DOCKER_IMAGE_NAME }}/${{ matrix.sub_routes.repo }}",push-by-digest=true,push=true
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests/${{ matrix.sub_routes.repo }}
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${{ matrix.sub_routes.repo }}/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ matrix.sub_routes.repo }}-${{ github.sha }}-${{ matrix.archs.arch }}
path: ${{ runner.temp }}/digests/${{ matrix.sub_routes.repo }}/*
if-no-files-found: error
retention-days: 1
release-fastgpt-images:
permissions:
packages: write
contents: read
attestations: write
id-token: write
needs: build-fastgpt-images
strategy:
matrix:
sub_routes:
- repo: fastgpt
- repo: fastgpt-sub-route
- repo: fastgpt-sub-route-gchat
runs-on: ubuntu-24.04
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Ali Hub
uses: docker/login-action@v3
with:
registry: registry.cn-hangzhou.aliyuncs.com
username: ${{ secrets.ALI_HUB_USERNAME }}
password: ${{ secrets.ALI_HUB_PASSWORD }}
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Download digests
uses: actions/download-artifact@v4
with:
path: ${{ runner.temp }}/digests
pattern: digests-${{ matrix.sub_routes.repo }}-${{ github.sha }}-*
merge-multiple: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set image name and tag - name: Set image name and tag
run: | run: |
if [[ "${{ github.ref_name }}" == "main" ]]; then if [[ "${{ github.ref_name }}" == "main" ]]; then
echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/${{ matrix.sub_routes.repo }}:latest" >> $GITHUB_ENV echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/fastgpt:latest" >> $GITHUB_ENV
echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/${{ matrix.sub_routes.repo }}:latest" >> $GITHUB_ENV echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/fastgpt:latest" >> $GITHUB_ENV
echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/${{ matrix.sub_routes.repo }}:latest" >> $GITHUB_ENV echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/fastgpt:latest" >> $GITHUB_ENV
echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/${{ matrix.sub_routes.repo }}:latest" >> $GITHUB_ENV echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/fastgpt:latest" >> $GITHUB_ENV
echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/${{ matrix.sub_routes.repo }}:latest" >> $GITHUB_ENV echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt:latest" >> $GITHUB_ENV
echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/${{ matrix.sub_routes.repo }}:latest" >> $GITHUB_ENV echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt:latest" >> $GITHUB_ENV
else else
echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/${{ matrix.sub_routes.repo }}:${{ github.ref_name }}" >> $GITHUB_ENV echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/fastgpt:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/${{ matrix.sub_routes.repo }}:latest" >> $GITHUB_ENV echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/fastgpt:latest" >> $GITHUB_ENV
echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/${{ matrix.sub_routes.repo }}:${{ github.ref_name }}" >> $GITHUB_ENV echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/fastgpt:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/${{ matrix.sub_routes.repo }}:latest" >> $GITHUB_ENV echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/fastgpt:latest" >> $GITHUB_ENV
echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/${{ matrix.sub_routes.repo }}:${{ github.ref_name }}" >> $GITHUB_ENV echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/${{ matrix.sub_routes.repo }}:latest" >> $GITHUB_ENV echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt:latest" >> $GITHUB_ENV
fi fi
- name: Create manifest list and push - name: Build and publish image for main branch or tag push event
working-directory: ${{ runner.temp }}/digests env:
DOCKER_REPO_TAGGED: ${{ env.DOCKER_REPO_TAGGED }}
run: | run: |
TAGS="$(echo -e "${Git_Tag}\n${Git_Latest}\n${Ali_Tag}\n${Ali_Latest}\n${Docker_Hub_Tag}\n${Docker_Hub_Latest}")" docker buildx build \
for TAG in $TAGS; do -f projects/app/Dockerfile \
docker buildx imagetools create -t $TAG \ --platform linux/amd64,linux/arm64 \
$(printf 'ghcr.io/${{ github.repository_owner }}/${{ matrix.sub_routes.repo }}@sha256:%s ' *) --label "org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/FastGPT" \
sleep 5 --label "org.opencontainers.image.description=fastgpt image" \
done --push \
--cache-from=type=local,src=/tmp/.buildx-cache \
--cache-to=type=local,dest=/tmp/.buildx-cache \
-t ${Git_Tag} \
-t ${Git_Latest} \
-t ${Ali_Tag} \
-t ${Ali_Latest} \
-t ${Docker_Hub_Tag} \
-t ${Docker_Hub_Latest} \
.
build-fastgpt-images-sub-route:
runs-on: ubuntu-20.04
steps:
# install env
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Install Dependencies
run: |
sudo apt update && sudo apt install -y nodejs npm
- name: Set up QEMU (optional)
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
driver-opts: network=host
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
# login docker
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
- name: Login to Ali Hub
uses: docker/login-action@v2
with:
registry: registry.cn-hangzhou.aliyuncs.com
username: ${{ secrets.ALI_HUB_USERNAME }}
password: ${{ secrets.ALI_HUB_PASSWORD }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
# Set tag
- name: Set image name and tag
run: |
if [[ "${{ github.ref_name }}" == "main" ]]; then
echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/fastgpt-sub-route:latest" >> $GITHUB_ENV
echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/fastgpt-sub-route:latest" >> $GITHUB_ENV
echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-sub-route:latest" >> $GITHUB_ENV
echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-sub-route:latest" >> $GITHUB_ENV
echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sub-route:latest" >> $GITHUB_ENV
echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sub-route:latest" >> $GITHUB_ENV
else
echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/fastgpt-sub-route:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/fastgpt-sub-route:latest" >> $GITHUB_ENV
echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-sub-route:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-sub-route:latest" >> $GITHUB_ENV
echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sub-route:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sub-route:latest" >> $GITHUB_ENV
fi
- name: Build and publish image for main branch or tag push event
env:
DOCKER_REPO_TAGGED: ${{ env.DOCKER_REPO_TAGGED }}
run: |
docker buildx build \
-f projects/app/Dockerfile \
--platform linux/amd64,linux/arm64 \
--build-arg base_url=/fastai \
--label "org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/FastGPT" \
--label "org.opencontainers.image.description=fastgpt image" \
--push \
--cache-from=type=local,src=/tmp/.buildx-cache \
--cache-to=type=local,dest=/tmp/.buildx-cache \
-t ${Git_Tag} \
-t ${Git_Latest} \
-t ${Ali_Tag} \
-t ${Ali_Latest} \
-t ${Docker_Hub_Tag} \
-t ${Docker_Hub_Latest} \
.
build-fastgpt-images-sub-route-gchat:
runs-on: ubuntu-20.04
steps:
# install env
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Install Dependencies
run: |
sudo apt update && sudo apt install -y nodejs npm
- name: Set up QEMU (optional)
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
driver-opts: network=host
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
# login docker
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
- name: Login to Ali Hub
uses: docker/login-action@v2
with:
registry: registry.cn-hangzhou.aliyuncs.com
username: ${{ secrets.ALI_HUB_USERNAME }}
password: ${{ secrets.ALI_HUB_PASSWORD }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
# Set tag
- name: Set image name and tag
run: |
if [[ "${{ github.ref_name }}" == "main" ]]; then
echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/fastgpt-sub-route-gchat:latest" >> $GITHUB_ENV
echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/fastgpt-sub-route-gchat:latest" >> $GITHUB_ENV
echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-sub-route-gchat:latest" >> $GITHUB_ENV
echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-sub-route-gchat:latest" >> $GITHUB_ENV
echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sub-route-gchat:latest" >> $GITHUB_ENV
echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sub-route-gchat:latest" >> $GITHUB_ENV
else
echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/fastgpt-sub-route-gchat:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/fastgpt-sub-route-gchat:latest" >> $GITHUB_ENV
echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-sub-route-gchat:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-sub-route-gchat:latest" >> $GITHUB_ENV
echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sub-route-gchat:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sub-route-gchat:latest" >> $GITHUB_ENV
fi
- name: Build and publish image for main branch or tag push event
env:
DOCKER_REPO_TAGGED: ${{ env.DOCKER_REPO_TAGGED }}
run: |
docker buildx build \
-f projects/app/Dockerfile \
--platform linux/amd64,linux/arm64 \
--build-arg base_url=/gchat \
--label "org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/FastGPT" \
--label "org.opencontainers.image.description=fastgpt-sub-route-gchat image" \
--push \
--cache-from=type=local,src=/tmp/.buildx-cache \
--cache-to=type=local,dest=/tmp/.buildx-cache \
-t ${Git_Tag} \
-t ${Git_Latest} \
-t ${Ali_Tag} \
-t ${Ali_Latest} \
-t ${Docker_Hub_Tag} \
-t ${Docker_Hub_Latest} \
.

View File

@ -1,89 +1,70 @@
name: Preview FastGPT images name: Preview FastGPT images
on: on:
pull_request_target: pull_request_target:
paths:
- 'projects/app/**'
- 'packages/**'
workflow_dispatch: workflow_dispatch:
jobs: jobs:
preview-fastgpt-images: preview-fastgpt-images:
permissions: runs-on: ubuntu-20.04
contents: read
packages: write
attestations: write
id-token: write
pull-requests: write
runs-on: ubuntu-24.04
strategy:
matrix:
image: [fastgpt, sandbox, mcp_server]
fail-fast: false # 即使一个镜像构建失败,也继续构建其他镜像
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
ref: ${{ github.event.pull_request.head.ref }} ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }} repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0 submodules: recursive # Fetch submodules
token: ${{ secrets.GITHUB_TOKEN }} fetch-depth: 0 # Fetch all history for .GitInfo and .Lastmod
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v2
with: with:
driver-opts: network=host driver-opts: network=host
- name: Cache Docker layers - name: Cache Docker layers
uses: actions/cache@v3 uses: actions/cache@v3
with: with:
path: /tmp/.buildx-cache path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}-${{ matrix.image }} key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: | restore-keys: |
${{ runner.os }}-buildx-${{ github.sha }}-
${{ runner.os }}-buildx- ${{ runner.os }}-buildx-
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v2 uses: docker/login-action@v2
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GH_PAT }}
- name: Set DOCKER_REPO_TAGGED based on branch or tag
- name: Set image config
id: config
run: | run: |
if [[ "${{ matrix.image }}" == "fastgpt" ]]; then echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt-pr:${{ github.event.pull_request.head.sha }}" >> $GITHUB_ENV
echo "DOCKERFILE=projects/app/Dockerfile" >> $GITHUB_OUTPUT - name: Build image for PR
echo "DESCRIPTION=fastgpt-pr image" >> $GITHUB_OUTPUT env:
echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt-pr:fatsgpt_${{ github.event.pull_request.head.sha }}" >> $GITHUB_OUTPUT DOCKER_REPO_TAGGED: ${{ env.DOCKER_REPO_TAGGED }}
elif [[ "${{ matrix.image }}" == "sandbox" ]]; then
echo "DOCKERFILE=projects/sandbox/Dockerfile" >> $GITHUB_OUTPUT
echo "DESCRIPTION=fastgpt-sandbox-pr image" >> $GITHUB_OUTPUT
echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt-pr:fatsgpt_sandbox_${{ github.event.pull_request.head.sha }}" >> $GITHUB_OUTPUT
elif [[ "${{ matrix.image }}" == "mcp_server" ]]; then
echo "DOCKERFILE=projects/mcp_server/Dockerfile" >> $GITHUB_OUTPUT
echo "DESCRIPTION=fastgpt-mcp_server-pr image" >> $GITHUB_OUTPUT
echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt-pr:fatsgpt_mcp_server_${{ github.event.pull_request.head.sha }}" >> $GITHUB_OUTPUT
fi
- name: Build ${{ matrix.image }} image for PR
run: | run: |
docker buildx build \ docker buildx build \
-f ${{ steps.config.outputs.DOCKERFILE }} \ -f projects/app/Dockerfile \
--label "org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/FastGPT" \ --label "org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/FastGPT" \
--label "org.opencontainers.image.description=${{ steps.config.outputs.DESCRIPTION }}" \ --label "org.opencontainers.image.description=fastgpt-pr image" \
--label "org.opencontainers.image.licenses=Apache" \
--push \ --push \
--cache-from=type=local,src=/tmp/.buildx-cache \ --cache-from=type=local,src=/tmp/.buildx-cache \
--cache-to=type=local,dest=/tmp/.buildx-cache \ --cache-to=type=local,dest=/tmp/.buildx-cache \
-t ${{ steps.config.outputs.DOCKER_REPO_TAGGED }} \ -t ${DOCKER_REPO_TAGGED} \
. .
# Add write md step after build
- name: Write md
run: |
echo "# 🤖 Generated by deploy action" > report.md
echo "📦 Preview Image: \`${DOCKER_REPO_TAGGED}\`" >> report.md
cat report.md
- uses: actions/github-script@v7 - name: Gh Rebot for Sealos
uses: labring/gh-rebot@v0.0.6
if: ${{ (github.event_name == 'pull_request_target') }}
with: with:
github-token: ${{secrets.GITHUB_TOKEN}} version: v0.0.6
script: | env:
github.rest.issues.createComment({ GH_TOKEN: '${{ secrets.GH_PAT }}'
issue_number: context.issue.number, SEALOS_TYPE: 'pr_comment'
owner: context.repo.owner, SEALOS_FILENAME: 'report.md'
repo: context.repo.repo, SEALOS_REPLACE_TAG: 'DEFAULT_REPLACE_DEPLOY'
body: 'Preview ${{ matrix.image }} Image: `${{ steps.config.outputs.DOCKER_REPO_TAGGED }}`'
})

View File

@ -1,32 +0,0 @@
name: 'FastGPT-Test'
on:
pull_request:
workflow_dispatch:
jobs:
test:
runs-on: ubuntu-latest
permissions:
# Required to checkout the code
contents: read
# Required to put a comment into the pull-request
pull-requests: write
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
- uses: pnpm/action-setup@v4
with:
version: 10
- name: 'Install Deps'
run: pnpm install
- name: 'Test'
run: pnpm run test
- name: 'Report Coverage'
# Set if: always() to also generate the report if tests are failing
# Only works if you set `reportOnFailure: true` in your vite config as specified above
if: always()
uses: davelosert/vitest-coverage-report-action@v2

View File

@ -8,12 +8,7 @@ on:
jobs: jobs:
helm: helm:
permissions: runs-on: ubuntu-20.04
packages: write
contents: read
attestations: write
id-token: write
runs-on: ubuntu-24.04
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -25,7 +20,7 @@ jobs:
run: echo "tag=$(git describe --tags)" >> $GITHUB_OUTPUT run: echo "tag=$(git describe --tags)" >> $GITHUB_OUTPUT
- name: Release Helm - name: Release Helm
run: | run: |
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io -u ${{ github.repository_owner }} --password-stdin echo ${{ secrets.GH_PAT }} | helm registry login ghcr.io -u ${{ github.repository_owner }} --password-stdin
export APP_VERSION=${{ steps.vars.outputs.tag }} export APP_VERSION=${{ steps.vars.outputs.tag }}
export HELM_VERSION=${{ steps.vars.outputs.tag }} export HELM_VERSION=${{ steps.vars.outputs.tag }}
export HELM_REPO=ghcr.io/${{ github.repository_owner }} export HELM_REPO=ghcr.io/${{ github.repository_owner }}

View File

@ -1,151 +0,0 @@
name: Build fastgpt-mcp-server images
on:
workflow_dispatch:
push:
paths:
- 'projects/mcp_server/**'
tags:
- 'v*'
jobs:
build-fastgpt-mcp_server-images:
permissions:
packages: write
contents: read
attestations: write
id-token: write
strategy:
matrix:
include:
- arch: amd64
- arch: arm64
runs-on: ubuntu-24.04-arm
runs-on: ${{ matrix.runs-on || 'ubuntu-24.04' }}
steps:
# install env
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: network=host
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-mcp-server-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-mcp_server-buildx-
# login docker
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Ali Hub
uses: docker/login-action@v3
with:
registry: registry.cn-hangzhou.aliyuncs.com
username: ${{ secrets.ALI_HUB_USERNAME }}
password: ${{ secrets.ALI_HUB_PASSWORD }}
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build for ${{ matrix.arch }}
id: build
uses: docker/build-push-action@v6
with:
context: .
file: projects/mcp_server/Dockerfile
platforms: linux/${{ matrix.arch }}
labels: |
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.description=fastgpt-mcp_server image
outputs: type=image,"name=ghcr.io/${{ github.repository_owner }}/fastgpt-mcp_server,${{ secrets.ALI_IMAGE_NAME }}/fastgpt-mcp_server,${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-mcp_server",push-by-digest=true,push=true
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-fastgpt-mcp_server-${{ github.sha }}-${{ matrix.arch }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
release-fastgpt-mcp_server-images:
permissions:
packages: write
contents: read
attestations: write
id-token: write
needs: build-fastgpt-mcp_server-images
runs-on: ubuntu-24.04
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Ali Hub
uses: docker/login-action@v3
with:
registry: registry.cn-hangzhou.aliyuncs.com
username: ${{ secrets.ALI_HUB_USERNAME }}
password: ${{ secrets.ALI_HUB_PASSWORD }}
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Download digests
uses: actions/download-artifact@v4
with:
path: ${{ runner.temp }}/digests
pattern: digests-fastgpt-mcp_server-${{ github.sha }}-*
merge-multiple: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set image name and tag
run: |
if [[ "${{ github.ref_name }}" == "main" ]]; then
echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/fastgpt-mcp_server:latest" >> $GITHUB_ENV
echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/fastgpt-mcp_server:latest" >> $GITHUB_ENV
echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-mcp_server:latest" >> $GITHUB_ENV
echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-mcp_server:latest" >> $GITHUB_ENV
echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-mcp_server:latest" >> $GITHUB_ENV
echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-mcp_server:latest" >> $GITHUB_ENV
else
echo "Git_Tag=ghcr.io/${{ github.repository_owner }}/fastgpt-mcp_server:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Git_Latest=ghcr.io/${{ github.repository_owner }}/fastgpt-mcp_server:latest" >> $GITHUB_ENV
echo "Ali_Tag=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-mcp_server:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Ali_Latest=${{ secrets.ALI_IMAGE_NAME }}/fastgpt-mcp_server:latest" >> $GITHUB_ENV
echo "Docker_Hub_Tag=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-mcp_server:${{ github.ref_name }}" >> $GITHUB_ENV
echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-mcp_server:latest" >> $GITHUB_ENV
fi
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
TAGS="$(echo -e "${Git_Tag}\n${Git_Latest}\n${Ali_Tag}\n${Ali_Latest}\n${Docker_Hub_Tag}\n${Docker_Hub_Latest}")"
for TAG in $TAGS; do
docker buildx imagetools create -t $TAG \
$(printf 'ghcr.io/${{ github.repository_owner }}/fastgpt-mcp_server@sha256:%s ' *)
sleep 5
done

View File

@ -8,120 +8,50 @@ on:
- 'v*' - 'v*'
jobs: jobs:
build-fastgpt-sandbox-images: build-fastgpt-sandbox-images:
permissions: runs-on: ubuntu-20.04
packages: write
contents: read
attestations: write
id-token: write
strategy:
matrix:
include:
- arch: amd64
- arch: arm64
runs-on: ubuntu-24.04-arm
runs-on: ${{ matrix.runs-on || 'ubuntu-24.04' }}
steps: steps:
# install env # install env
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Dependencies
run: |
sudo apt update && sudo apt install -y nodejs npm
- name: Set up QEMU (optional)
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v2
with: with:
driver-opts: network=host driver-opts: network=host
- name: Cache Docker layers - name: Cache Docker layers
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: /tmp/.buildx-cache path: /tmp/.buildx-cache
key: ${{ runner.os }}-sandbox-buildx-${{ github.sha }} key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: | restore-keys: |
${{ runner.os }}-sandbox-buildx- ${{ runner.os }}-buildx-
# login docker # login docker
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GH_PAT }}
- name: Login to Ali Hub - name: Login to Ali Hub
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
registry: registry.cn-hangzhou.aliyuncs.com registry: registry.cn-hangzhou.aliyuncs.com
username: ${{ secrets.ALI_HUB_USERNAME }} username: ${{ secrets.ALI_HUB_USERNAME }}
password: ${{ secrets.ALI_HUB_PASSWORD }} password: ${{ secrets.ALI_HUB_PASSWORD }}
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_HUB_NAME }} username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }} password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build for ${{ matrix.arch }} # Set tag
id: build
uses: docker/build-push-action@v6
with:
context: .
file: projects/sandbox/Dockerfile
platforms: linux/${{ matrix.arch }}
labels: |
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.description=fastgpt-sandbox image
outputs: type=image,"name=ghcr.io/${{ github.repository_owner }}/fastgpt-sandbox,${{ secrets.ALI_IMAGE_NAME }}/fastgpt-sandbox,${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sandbox",push-by-digest=true,push=true
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-fastgpt-sandbox-${{ github.sha }}-${{ matrix.arch }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
release-fastgpt-sandbox-images:
permissions:
packages: write
contents: read
attestations: write
id-token: write
needs: build-fastgpt-sandbox-images
runs-on: ubuntu-24.04
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Ali Hub
uses: docker/login-action@v3
with:
registry: registry.cn-hangzhou.aliyuncs.com
username: ${{ secrets.ALI_HUB_USERNAME }}
password: ${{ secrets.ALI_HUB_PASSWORD }}
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Download digests
uses: actions/download-artifact@v4
with:
path: ${{ runner.temp }}/digests
pattern: digests-fastgpt-sandbox-${{ github.sha }}-*
merge-multiple: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set image name and tag - name: Set image name and tag
run: | run: |
if [[ "${{ github.ref_name }}" == "main" ]]; then if [[ "${{ github.ref_name }}" == "main" ]]; then
@ -140,12 +70,27 @@ jobs:
echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sandbox:latest" >> $GITHUB_ENV echo "Docker_Hub_Latest=${{ secrets.DOCKER_IMAGE_NAME }}/fastgpt-sandbox:latest" >> $GITHUB_ENV
fi fi
- name: Create manifest list and push - name: Build and publish image for main branch or tag push event
working-directory: ${{ runner.temp }}/digests env:
Git_Tag: ${{ env.Git_Tag }}
Git_Latest: ${{ env.Git_Latest }}
Ali_Tag: ${{ env.Ali_Tag }}
Ali_Latest: ${{ env.Ali_Latest }}
Docker_Hub_Tag: ${{ env.Docker_Hub_Tag }}
Docker_Hub_Latest: ${{ env.Docker_Hub_Latest }}
run: | run: |
TAGS="$(echo -e "${Git_Tag}\n${Git_Latest}\n${Ali_Tag}\n${Ali_Latest}\n${Docker_Hub_Tag}\n${Docker_Hub_Latest}")" docker buildx build \
for TAG in $TAGS; do -f projects/sandbox/Dockerfile \
docker buildx imagetools create -t $TAG \ --platform linux/amd64,linux/arm64 \
$(printf 'ghcr.io/${{ github.repository_owner }}/fastgpt-sandbox@sha256:%s ' *) --label "org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/fastgpt-sandbox" \
sleep 5 --label "org.opencontainers.image.description=fastgpt-sandbox image" \
done --push \
--cache-from=type=local,src=/tmp/.buildx-cache \
--cache-to=type=local,dest=/tmp/.buildx-cache \
-t ${Git_Tag} \
-t ${Git_Latest} \
-t ${Ali_Tag} \
-t ${Ali_Latest} \
-t ${Docker_Hub_Tag} \
-t ${Docker_Hub_Latest} \
.

1
.gitignore vendored
View File

@ -44,4 +44,3 @@ files/helm/fastgpt/fastgpt-0.1.0.tgz
files/helm/fastgpt/charts/*.tgz files/helm/fastgpt/charts/*.tgz
tmp/ tmp/
coverage

View File

@ -5,6 +5,4 @@ node_modules
docSite/ docSite/
*.md *.md
pnpm-lock.yaml
cl100l_base.ts cl100l_base.ts
dict.json

View File

@ -16,6 +16,16 @@ usageMatchRegex:
# the `{key}` will be placed by a proper keypath matching regex, # the `{key}` will be placed by a proper keypath matching regex,
# you can ignore it and use your own matching rules as well # you can ignore it and use your own matching rules as well
- "[^\\w\\d]t\\(['\"`]({key})['\"`]" - "[^\\w\\d]t\\(['\"`]({key})['\"`]"
- "[^\\w\\d]commonT\\(['\"`]({key})['\"`]"
# 支持 appT("your.i18n.keys")
- "[^\\w\\d]appT\\(['\"`]({key})['\"`]"
# 支持 datasetT("your.i18n.keys")
- "[^\\w\\d]datasetT\\(['\"`]({key})['\"`]"
- "[^\\w\\d]fileT\\(['\"`]({key})['\"`]"
- "[^\\w\\d]publishT\\(['\"`]({key})['\"`]"
- "[^\\w\\d]workflowT\\(['\"`]({key})['\"`]"
- "[^\\w\\d]userT\\(['\"`]({key})['\"`]"
- "[^\\w\\d]chatT\\(['\"`]({key})['\"`]"
- "[^\\w\\d]i18nT\\(['\"`]({key})['\"`]" - "[^\\w\\d]i18nT\\(['\"`]({key})['\"`]"
# A RegEx to set a custom scope range. This scope will be used as a prefix when detecting keys # A RegEx to set a custom scope range. This scope will be used as a prefix when detecting keys

39
.vscode/launch.json vendored
View File

@ -1,39 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Next.js: debug server-side",
"type": "node-terminal",
"request": "launch",
"command": "pnpm run dev",
"cwd": "${workspaceFolder}/projects/app"
},
{
"name": "Next.js: debug client-side",
"type": "chrome",
"request": "launch",
"url": "http://localhost:3000"
},
{
"name": "Next.js: debug client-side (Edge)",
"type": "msedge",
"request": "launch",
"url": "http://localhost:3000"
},
{
"name": "Next.js: debug full stack",
"type": "node-terminal",
"request": "launch",
"command": "pnpm run dev",
"cwd": "${workspaceFolder}/projects/app",
"skipFiles": ["<node_internals>/**"],
"serverReadyAction": {
"action": "debugWithEdge",
"killOnServerStop": true,
"pattern": "- Local:.+(https?://.+)",
"uriFormat": "%s",
"webRoot": "${workspaceFolder}/projects/app"
}
}
]
}

View File

@ -52,16 +52,70 @@
"description": "FastGPT usecontext template" "description": "FastGPT usecontext template"
}, },
"Vitest test case template": { "Jest test template": {
"scope": "typescript", "scope": "typescriptreact",
"prefix": "template_test", "prefix": "jesttest",
"body": [ "body": [
"import { describe, it, expect } from 'vitest';", "import '@/pages/api/__mocks__/base';",
"import { root } from '@/pages/api/__mocks__/db/init';",
"import { getTestRequest } from '@fastgpt/service/test/utils'; ;",
"import { AppErrEnum } from '@fastgpt/global/common/error/code/app';",
"import handler from './demo';",
"", "",
"describe('authType2UsageSource', () => {", "// Import the schema",
" it('Test description', () => {", "import { MongoOutLink } from '@fastgpt/service/support/outLink/schema';",
" expect().toBe();", "",
" });", "beforeAll(async () => {",
" // await MongoOutLink.create({",
" // shareId: 'aaa',",
" // appId: root.appId,",
" // tmbId: root.tmbId,",
" // teamId: root.teamId,",
" // type: 'share',",
" // name: 'aaa'",
" // })",
"});",
"",
"test('Should return a list of outLink', async () => {",
" // Mock request",
" const res = (await handler(",
" ...getTestRequest({",
" query: {",
" appId: root.appId,",
" type: 'share'",
" },",
" user: root",
" })",
" )) as any;",
"",
" expect(res.code).toBe(200);",
" expect(res.data.length).toBe(2);",
"});",
"",
"test('appId is required', async () => {",
" const res = (await handler(",
" ...getTestRequest({",
" query: {",
" type: 'share'",
" },",
" user: root",
" })",
" )) as any;",
" expect(res.code).toBe(500);",
" expect(res.error).toBe(AppErrEnum.unExist);",
"});",
"",
"test('if type is not provided, return nothing', async () => {",
" const res = (await handler(",
" ...getTestRequest({",
" query: {",
" appId: root.appId",
" },",
" user: root",
" })",
" )) as any;",
" expect(res.code).toBe(200);",
" expect(res.data.length).toBe(0);",
"});" "});"
] ]
} }

View File

@ -21,11 +21,13 @@
"i18n-ally.namespace": true, "i18n-ally.namespace": true,
"i18n-ally.pathMatcher": "{locale}/{namespaces}.json", "i18n-ally.pathMatcher": "{locale}/{namespaces}.json",
"i18n-ally.extract.targetPickingStrategy": "most-similar-by-key", "i18n-ally.extract.targetPickingStrategy": "most-similar-by-key",
"i18n-ally.translate.engines": ["deepl","google"], "i18n-ally.translate.engines": ["google"],
"[typescript]": { "[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode" "editor.defaultFormatter": "esbenp.prettier-vscode"
}, },
"markdown.copyFiles.destination": { "markdown.copyFiles.destination": {
"/docSite/content/**/*": "${documentWorkspaceFolder}/docSite/assets/imgs/" "/docSite/content/**/*": "${documentWorkspaceFolder}/docSite/assets/imgs/"
} },
"markdown.copyFiles.overwriteBehavior": "nameIncrementally",
"markdown.copyFiles.transformPath": "const filename = uri.path.split('/').pop(); return `/imgs/${filename}`;"
} }

View File

@ -10,7 +10,7 @@
<a href="./README_ja.md">日语</a> <a href="./README_ja.md">日语</a>
</p> </p>
FastGPT 是一个 AI Agent 构建平台,提供开箱即用的数据处理、模型调用等能力,同时可以通过 Flow 可视化进行工作流编排,从而实现复杂的应用场景! FastGPT 是一个基于 LLM 大语言模型的知识库问答系统,提供开箱即用的数据处理、模型调用等能力。同时可以通过 Flow 可视化进行工作流编排,从而实现复杂的问答场景!
</div> </div>
@ -114,6 +114,16 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right"> <img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
</a> </a>
## 🏘️ 社区交流群
扫码加入飞书话题群:
![](https://oss.laf.run/otnvvf-imgs/fastgpt-feishu1.png)
<a href="#readme">
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
</a>
## 🏘️ 加入我们 ## 🏘️ 加入我们
我们正在寻找志同道合的小伙伴,加速 FastGPT 的发展。你可以通过 [FastGPT 2025 招聘](https://fael3z0zfze.feishu.cn/wiki/P7FOwEmPziVcaYkvVaacnVX1nvg)了解 FastGPT 的招聘信息。 我们正在寻找志同道合的小伙伴,加速 FastGPT 的发展。你可以通过 [FastGPT 2025 招聘](https://fael3z0zfze.feishu.cn/wiki/P7FOwEmPziVcaYkvVaacnVX1nvg)了解 FastGPT 的招聘信息。
@ -123,26 +133,17 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
- [Laf3 分钟快速接入三方应用](https://github.com/labring/laf) - [Laf3 分钟快速接入三方应用](https://github.com/labring/laf)
- [Sealos快速部署集群应用](https://github.com/labring/sealos) - [Sealos快速部署集群应用](https://github.com/labring/sealos)
- [One API多模型管理支持 Azure、文心一言等](https://github.com/songquanpeng/one-api) - [One API多模型管理支持 Azure、文心一言等](https://github.com/songquanpeng/one-api)
- [TuShan5 分钟搭建后台管理系统](https://github.com/msgbyte/tushan)
<a href="#readme"> <a href="#readme">
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right"> <img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
</a> </a>
## 🌿 第三方生态 ## 🌿 第三方生态
- [PPIO 派欧云:一键调用高性价比的开源模型 API 和 GPU 容器](https://ppinfra.com/user/register?invited_by=VITYVU&utm_source=github_fastgpt)
- [AI Proxy国内模型聚合服务](https://sealos.run/aiproxy/?k=fastgpt-github/)
- [SiliconCloud (硅基流动) —— 开源模型在线体验平台](https://cloud.siliconflow.cn/i/TR9Ym0c4)
- [COW 个人微信/企微机器人](https://doc.tryfastgpt.ai/docs/use-cases/external-integration/onwechat/) - [COW 个人微信/企微机器人](https://doc.tryfastgpt.ai/docs/use-cases/external-integration/onwechat/)
- [SiliconCloud (硅基流动) —— 开源模型在线体验平台](https://cloud.siliconflow.cn/i/TR9Ym0c4)
<a href="#readme">
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
</a>
## 🏘️ 社区交流群
扫码加入飞书话题群:
![](https://oss.laf.run/otnvvf-imgs/fastgpt-feishu1.png)
<a href="#readme"> <a href="#readme">
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right"> <img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">

View File

@ -69,7 +69,7 @@ Project tech stack: NextJs + TS + ChakraUI + MongoDB + PostgreSQL (PG Vector plu
> When using [Sealos](https://sealos.io) services, there is no need to purchase servers or domain names. It supports high concurrency and dynamic scaling, and the database application uses the kubeblocks database, which far exceeds the simple Docker container deployment in terms of IO performance. > When using [Sealos](https://sealos.io) services, there is no need to purchase servers or domain names. It supports high concurrency and dynamic scaling, and the database application uses the kubeblocks database, which far exceeds the simple Docker container deployment in terms of IO performance.
<div align="center"> <div align="center">
[![](https://cdn.jsdelivr.net/gh/labring-actions/templates@main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-fastdeploy%3FtemplateName%3Dfastgpt&uid=fnWRt09fZP) [![](https://cdn.jsdelivr.net/gh/labring-actions/templates@main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-fastdeploy%3FtemplateName%3Dfastgpt)
</div> </div>
Give it a 2-4 minute wait after deployment as it sets up the database. Initially, it might be a too slow since we're using the basic settings. Give it a 2-4 minute wait after deployment as it sets up the database. Initially, it might be a too slow since we're using the basic settings.

View File

@ -94,7 +94,7 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
- **⚡ デプロイ** - **⚡ デプロイ**
[![](https://cdn.jsdelivr.net/gh/labring-actions/templates@main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-fastdeploy%3FtemplateName%3Dfastgpt&uid=fnWRt09fZP) [![](https://cdn.jsdelivr.net/gh/labring-actions/templates@main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-fastdeploy%3FtemplateName%3Dfastgpt)
デプロイ 後、データベースをセットアップするので、24分待 ってください。基本設定 を 使 っているので、最初 は 少 し 遅 いかもしれません。 デプロイ 後、データベースをセットアップするので、24分待 ってください。基本設定 を 使 っているので、最初 は 少 し 遅 いかもしれません。

View File

@ -100,7 +100,7 @@ services:
exec docker-entrypoint.sh "$$@" & exec docker-entrypoint.sh "$$@" &
# 等待MongoDB服务启动 # 等待MongoDB服务启动
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')" > /dev/null 2>&1; do
echo "Waiting for MongoDB to start..." echo "Waiting for MongoDB to start..."
sleep 2 sleep 2
done done
@ -110,48 +110,19 @@ services:
# 等待docker-entrypoint.sh脚本执行的MongoDB服务进程 # 等待docker-entrypoint.sh脚本执行的MongoDB服务进程
wait $$! wait $$!
redis:
image: redis:7.2-alpine
container_name: redis
# ports:
# - 6379:6379
networks:
- fastgpt
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
healthcheck:
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
volumes:
- ./redis/data:/data
# fastgpt # fastgpt
sandbox: sandbox:
container_name: sandbox container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git image: ghcr.io/labring/fastgpt-sandbox:v4.8.23-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云 # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.23-fix # 阿里云
networks: networks:
- fastgpt - fastgpt
restart: always restart: always
fastgpt-mcp-server:
container_name: fastgpt-mcp-server
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
ports:
- 3005:3000
networks:
- fastgpt
restart: always
environment:
- FASTGPT_ENDPOINT=http://fastgpt:3000
fastgpt: fastgpt:
container_name: fastgpt container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git image: ghcr.io/labring/fastgpt:v4.8.23-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云 # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.23-fix # 阿里云
ports: ports:
- 3000:3000 - 3000:3000
networks: networks:
@ -166,13 +137,10 @@ services:
- FE_DOMAIN= - FE_DOMAIN=
# root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。 # root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。
- DEFAULT_ROOT_PSW=1234 - DEFAULT_ROOT_PSW=1234
# AI Proxy 的地址,如果配了该地址,优先使用 # AI模型的API地址哦。务必加 /v1。这里默认填写了OneApi的访问地址。
- AIPROXY_API_ENDPOINT=http://aiproxy:3000 - OPENAI_BASE_URL=http://oneapi:3000/v1
# AI Proxy 的 Admin Token与 AI Proxy 中的环境变量 ADMIN_KEY # AI模型的API Key。这里默认填写了OneAPI的快速默认key测试通后务必及时修改
- AIPROXY_API_TOKEN=aiproxy - CHAT_API_KEY=sk-fastgpt
# 模型中转地址(如果用了 AI Proxy下面 2 个就不需要了,旧版 OneAPI 用户,使用下面的变量)
# - OPENAI_BASE_URL=http://oneapi:3000/v1
# - CHAT_API_KEY=sk-fastgpt
# 数据库最大连接数 # 数据库最大连接数
- DB_MAX_LINK=30 - DB_MAX_LINK=30
# 登录凭证密钥 # 登录凭证密钥
@ -186,8 +154,6 @@ services:
# zilliz 连接参数 # zilliz 连接参数
- MILVUS_ADDRESS=http://milvusStandalone:19530 - MILVUS_ADDRESS=http://milvusStandalone:19530
- MILVUS_TOKEN=none - MILVUS_TOKEN=none
# Redis 地址
- REDIS_URL=redis://default:mypassword@redis:6379
# sandbox 地址 # sandbox 地址
- SANDBOX_URL=http://sandbox:3000 - SANDBOX_URL=http://sandbox:3000
# 日志等级: debug, info, warn, error # 日志等级: debug, info, warn, error
@ -201,58 +167,51 @@ services:
- ALLOWED_ORIGINS= - ALLOWED_ORIGINS=
# 是否开启IP限制默认不开启 # 是否开启IP限制默认不开启
- USE_IP_LIMIT=false - USE_IP_LIMIT=false
# 对话文件过期天数
- CHAT_FILE_EXPIRE_TIME=7
volumes: volumes:
- ./config.json:/app/data/config.json - ./config.json:/app/data/config.json
# AI Proxy # oneapi
aiproxy: mysql:
image: ghcr.io/labring/aiproxy:v0.1.7 image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mysql:8.0.36 # 阿里云
# image: registry.cn-hangzhou.aliyuncs.com/labring/aiproxy:v0.1.7 # 阿里云 # image: mysql:8.0.36
container_name: aiproxy container_name: mysql
restart: unless-stopped restart: always
depends_on: ports:
aiproxy_pg: - 3306:3306
condition: service_healthy
networks: networks:
- fastgpt - fastgpt
command: --default-authentication-plugin=mysql_native_password
environment: environment:
# 对应 fastgpt 里的AIPROXY_API_TOKEN # 默认root密码仅首次运行有效
- ADMIN_KEY=aiproxy MYSQL_ROOT_PASSWORD: oneapimmysql
# 错误日志详情保存时间(小时) MYSQL_DATABASE: oneapi
- LOG_DETAIL_STORAGE_HOURS=1
# 数据库连接地址
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
# 最大重试次数
- RETRY_TIMES=3
# 不需要计费
- BILLING_ENABLED=false
# 不需要严格检测模型
- DISABLE_MODEL_CONFIG=true
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
interval: 5s
timeout: 5s
retries: 10
aiproxy_pg:
image: pgvector/pgvector:0.8.0-pg15 # docker hub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
restart: unless-stopped
container_name: aiproxy_pg
volumes: volumes:
- ./aiproxy_pg:/var/lib/postgresql/data - ./mysql:/var/lib/mysql
oneapi:
container_name: oneapi
image: ghcr.io/songquanpeng/one-api:v0.6.7
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/one-api:v0.6.6 # 阿里云
ports:
- 3001:3000
depends_on:
- mysql
networks: networks:
- fastgpt - fastgpt
restart: always
environment: environment:
TZ: Asia/Shanghai # mysql 连接参数
POSTGRES_USER: postgres - SQL_DSN=root:oneapimmysql@tcp(mysql:3306)/oneapi
POSTGRES_DB: aiproxy # 登录凭证加密密钥
POSTGRES_PASSWORD: aiproxy - SESSION_SECRET=oneapikey
healthcheck: # 内存缓存
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy'] - MEMORY_CACHE_ENABLED=true
interval: 5s # 启动聚合更新,减少数据交互频率
timeout: 5s - BATCH_UPDATE_ENABLED=true
retries: 10 # 聚合更新时长
- BATCH_UPDATE_INTERVAL=10
# 初始化的 root 密钥(建议部署完后更改,否则容易泄露)
- INITIAL_ROOT_TOKEN=fastgpt
volumes:
- ./oneapi:/data
networks: networks:
fastgpt: fastgpt:

View File

@ -1,234 +0,0 @@
# 数据库的默认账号和密码仅首次运行时设置有效
# 如果修改了账号密码,记得改数据库和项目连接参数,别只改一处~
# 该配置文件只是给快速启动,测试使用。正式使用,记得务必修改账号密码,以及调整合适的知识库参数,共享内存等。
# 如何无法访问 dockerhub 和 git可以用阿里云阿里云没有arm包
version: '3.3'
services:
# vector db
ob:
image: oceanbase/oceanbase-ce:4.3.5-lts # docker hub
# image: quay.io/oceanbase/oceanbase-ce:4.3.5-lts # 镜像
container_name: ob
restart: always
# ports: # 生产环境建议不要暴露
# - 2881:2881
networks:
- fastgpt
environment:
# 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
- OB_SYS_PASSWORD=obsyspassword
# 不同于传统数据库OceanBase 数据库的账号包含更多字段,包括用户名、租户名和集群名。经典格式为“用户名@租户名#集群名”
# 比如用mysql客户端连接时根据本文件的默认配置应该指定 “-uroot@tenantname”
- OB_TENANT_NAME=tenantname
- OB_TENANT_PASSWORD=tenantpassword
# MODE分为MINI和NORMAL 后者会最大程度使用主机资源
- MODE=MINI
- OB_SERVER_IP=127.0.0.1
# 更多环境变量配置见oceanbase官方文档 https://www.oceanbase.com/docs/common-oceanbase-database-cn-1000000002013494
volumes:
- ./ob/data:/root/ob
- ./ob/config:/root/.obd/cluster
- ./init.sql:/root/boot/init.d/init.sql
healthcheck:
# obclient -h127.0.0.1 -P2881 -uroot@tenantname -ptenantpassword -e "SELECT 1;"
test:
[
'CMD-SHELL',
'obclient -h$${OB_SERVER_IP} -P2881 -uroot@$${OB_TENANT_NAME} -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"'
]
interval: 30s
timeout: 10s
retries: 1000
start_period: 10s
mongo:
image: mongo:5.0.18 # dockerhub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云
# image: mongo:4.4.29 # cpu不支持AVX时候使用
container_name: mongo
restart: always
# ports:
# - 27017:27017
networks:
- fastgpt
command: mongod --keyFile /data/mongodb.key --replSet rs0
environment:
- MONGO_INITDB_ROOT_USERNAME=myusername
- MONGO_INITDB_ROOT_PASSWORD=mypassword
volumes:
- ./mongo/data:/data/db
entrypoint:
- bash
- -c
- |
openssl rand -base64 128 > /data/mongodb.key
chmod 400 /data/mongodb.key
chown 999:999 /data/mongodb.key
echo 'const isInited = rs.status().ok === 1
if(!isInited){
rs.initiate({
_id: "rs0",
members: [
{ _id: 0, host: "mongo:27017" }
]
})
}' > /data/initReplicaSet.js
# 启动MongoDB服务
exec docker-entrypoint.sh "$$@" &
# 等待MongoDB服务启动
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do
echo "Waiting for MongoDB to start..."
sleep 2
done
# 执行初始化副本集的脚本
mongo -u myusername -p mypassword --authenticationDatabase admin /data/initReplicaSet.js
# 等待docker-entrypoint.sh脚本执行的MongoDB服务进程
wait $$!
redis:
image: redis:7.2-alpine
container_name: redis
# ports:
# - 6379:6379
networks:
- fastgpt
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
volumes:
- ./redis/data:/data
healthcheck:
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云
networks:
- fastgpt
restart: always
fastgpt-mcp-server:
container_name: fastgpt-mcp-server
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
ports:
- 3005:3000
networks:
- fastgpt
restart: always
environment:
- FASTGPT_ENDPOINT=http://fastgpt:3000
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云
ports:
- 3000:3000
networks:
- fastgpt
depends_on:
mongo:
condition: service_started
ob:
condition: service_healthy
sandbox:
condition: service_started
restart: always
environment:
# 前端外部可访问的地址,用于自动补全文件资源路径。例如 https:fastgpt.cn不能填 localhost。这个值可以不填不填则发给模型的图片会是一个相对路径而不是全路径模型可能伪造Host。
- FE_DOMAIN=
# root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。
- DEFAULT_ROOT_PSW=1234
# # AI Proxy 的地址,如果配了该地址,优先使用
- AIPROXY_API_ENDPOINT=http://aiproxy:3000
# # AI Proxy 的 Admin Token与 AI Proxy 中的环境变量 ADMIN_KEY
- AIPROXY_API_TOKEN=aiproxy
# 数据库最大连接数
- DB_MAX_LINK=30
# 登录凭证密钥
- TOKEN_KEY=any
# root的密钥常用于升级时候的初始化请求
- ROOT_KEY=root_key
# 文件阅读加密
- FILE_TOKEN_KEY=filetoken
# MongoDB 连接参数. 用户名myusername,密码mypassword。
- MONGODB_URI=mongodb://myusername:mypassword@mongo:27017/fastgpt?authSource=admin
# OceanBase 向量库连接参数
- OCEANBASE_URL=mysql://root%40tenantname:tenantpassword@ob:2881/test
# Redis 连接参数
- REDIS_URL=redis://default:mypassword@redis:6379
# sandbox 地址
- SANDBOX_URL=http://sandbox:3000
# 日志等级: debug, info, warn, error
- LOG_LEVEL=info
- STORE_LOG_LEVEL=warn
# 工作流最大运行次数
- WORKFLOW_MAX_RUN_TIMES=1000
# 批量执行节点,最大输入长度
- WORKFLOW_MAX_LOOP_TIMES=100
# 自定义跨域,不配置时,默认都允许跨域(多个域名通过逗号分割)
- ALLOWED_ORIGINS=
# 是否开启IP限制默认不开启
- USE_IP_LIMIT=false
# 对话文件过期天数
- CHAT_FILE_EXPIRE_TIME=7
volumes:
- ./config.json:/app/data/config.json
# AI Proxy
aiproxy:
image: ghcr.io/labring/aiproxy:v0.1.7
# image: registry.cn-hangzhou.aliyuncs.com/labring/aiproxy:v0.1.7 # 阿里云
container_name: aiproxy
restart: unless-stopped
depends_on:
aiproxy_pg:
condition: service_healthy
networks:
- fastgpt
environment:
# 对应 fastgpt 里的AIPROXY_API_TOKEN
- ADMIN_KEY=aiproxy
# 错误日志详情保存时间(小时)
- LOG_DETAIL_STORAGE_HOURS=1
# 数据库连接地址
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
# 最大重试次数
- RETRY_TIMES=3
# 不需要计费
- BILLING_ENABLED=false
# 不需要严格检测模型
- DISABLE_MODEL_CONFIG=true
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
interval: 5s
timeout: 5s
retries: 10
aiproxy_pg:
image: pgvector/pgvector:0.8.0-pg15 # docker hub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
restart: unless-stopped
container_name: aiproxy_pg
volumes:
- ./aiproxy_pg:/var/lib/postgresql/data
networks:
- fastgpt
environment:
TZ: Asia/Shanghai
POSTGRES_USER: postgres
POSTGRES_DB: aiproxy
POSTGRES_PASSWORD: aiproxy
healthcheck:
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
interval: 5s
timeout: 5s
retries: 10
networks:
fastgpt:

View File

@ -1,2 +0,0 @@
ALTER SYSTEM SET ob_vector_memory_limit_percentage = 30;

View File

@ -7,12 +7,12 @@ version: '3.3'
services: services:
# db # db
pg: pg:
image: pgvector/pgvector:0.8.0-pg15 # docker hub image: pgvector/pgvector:0.7.0-pg15 # docker hub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云 # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.7.0 # 阿里云
container_name: pg container_name: pg
restart: always restart: always
# ports: # 生产环境建议不要暴露 ports: # 生产环境建议不要暴露
# - 5432:5432 - 5432:5432
networks: networks:
- fastgpt - fastgpt
environment: environment:
@ -22,19 +22,14 @@ services:
- POSTGRES_DB=postgres - POSTGRES_DB=postgres
volumes: volumes:
- ./pg/data:/var/lib/postgresql/data - ./pg/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'postgres']
interval: 5s
timeout: 5s
retries: 10
mongo: mongo:
image: mongo:5.0.18 # dockerhub image: mongo:5.0.18 # dockerhub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云 # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云
# image: mongo:4.4.29 # cpu不支持AVX时候使用 # image: mongo:4.4.29 # cpu不支持AVX时候使用
container_name: mongo container_name: mongo
restart: always restart: always
# ports: ports:
# - 27017:27017 - 27017:27017
networks: networks:
- fastgpt - fastgpt
command: mongod --keyFile /data/mongodb.key --replSet rs0 command: mongod --keyFile /data/mongodb.key --replSet rs0
@ -63,7 +58,7 @@ services:
exec docker-entrypoint.sh "$$@" & exec docker-entrypoint.sh "$$@" &
# 等待MongoDB服务启动 # 等待MongoDB服务启动
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')" > /dev/null 2>&1; do
echo "Waiting for MongoDB to start..." echo "Waiting for MongoDB to start..."
sleep 2 sleep 2
done done
@ -74,48 +69,18 @@ services:
# 等待docker-entrypoint.sh脚本执行的MongoDB服务进程 # 等待docker-entrypoint.sh脚本执行的MongoDB服务进程
wait $$! wait $$!
redis:
image: redis:7.2-alpine
container_name: redis
# ports:
# - 6379:6379
networks:
- fastgpt
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
healthcheck:
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
volumes:
- ./redis/data:/data
# fastgpt # fastgpt
sandbox: sandbox:
container_name: sandbox container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git image: ghcr.io/labring/fastgpt-sandbox:v4.8.23-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云 # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.23-fix # 阿里云
networks: networks:
- fastgpt - fastgpt
restart: always restart: always
fastgpt-mcp-server:
container_name: fastgpt-mcp-server
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
ports:
- 3005:3000
networks:
- fastgpt
restart: always
environment:
- FASTGPT_ENDPOINT=http://fastgpt:3000
fastgpt: fastgpt:
container_name: fastgpt container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git image: ghcr.io/labring/fastgpt:v4.8.23-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云 # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.23-fix # 阿里云
ports: ports:
- 3000:3000 - 3000:3000
networks: networks:
@ -130,10 +95,10 @@ services:
- FE_DOMAIN= - FE_DOMAIN=
# root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。 # root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。
- DEFAULT_ROOT_PSW=1234 - DEFAULT_ROOT_PSW=1234
# AI Proxy 的地址,如果配了该地址,优先使用 # AI模型的API地址哦。务必加 /v1。这里默认填写了OneApi的访问地址。
- AIPROXY_API_ENDPOINT=http://aiproxy:3000 - OPENAI_BASE_URL=http://oneapi:3000/v1
# AI Proxy 的 Admin Token与 AI Proxy 中的环境变量 ADMIN_KEY # AI模型的API Key。这里默认填写了OneAPI的快速默认key测试通后务必及时修改
- AIPROXY_API_TOKEN=aiproxy - CHAT_API_KEY=sk-fastgpt
# 数据库最大连接数 # 数据库最大连接数
- DB_MAX_LINK=30 - DB_MAX_LINK=30
# 登录凭证密钥 # 登录凭证密钥
@ -146,8 +111,6 @@ services:
- MONGODB_URI=mongodb://myusername:mypassword@mongo:27017/fastgpt?authSource=admin - MONGODB_URI=mongodb://myusername:mypassword@mongo:27017/fastgpt?authSource=admin
# pg 连接参数 # pg 连接参数
- PG_URL=postgresql://username:password@pg:5432/postgres - PG_URL=postgresql://username:password@pg:5432/postgres
# Redis 连接参数
- REDIS_URL=redis://default:mypassword@redis:6379
# sandbox 地址 # sandbox 地址
- SANDBOX_URL=http://sandbox:3000 - SANDBOX_URL=http://sandbox:3000
# 日志等级: debug, info, warn, error # 日志等级: debug, info, warn, error
@ -161,58 +124,51 @@ services:
- ALLOWED_ORIGINS= - ALLOWED_ORIGINS=
# 是否开启IP限制默认不开启 # 是否开启IP限制默认不开启
- USE_IP_LIMIT=false - USE_IP_LIMIT=false
# 对话文件过期天数
- CHAT_FILE_EXPIRE_TIME=7
volumes: volumes:
- ./config.json:/app/data/config.json - ./config.json:/app/data/config.json
# AI Proxy # oneapi
aiproxy: mysql:
image: ghcr.io/labring/aiproxy:v0.1.7 # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mysql:8.0.36 # 阿里云
# image: registry.cn-hangzhou.aliyuncs.com/labring/aiproxy:v0.1.7 # 阿里云 image: mysql:8.0.36
container_name: aiproxy container_name: mysql
restart: unless-stopped restart: always
depends_on: ports:
aiproxy_pg: - 3306:3306
condition: service_healthy
networks: networks:
- fastgpt - fastgpt
command: --default-authentication-plugin=mysql_native_password
environment: environment:
# 对应 fastgpt 里的AIPROXY_API_TOKEN # 默认root密码仅首次运行有效
- ADMIN_KEY=aiproxy MYSQL_ROOT_PASSWORD: oneapimmysql
# 错误日志详情保存时间(小时) MYSQL_DATABASE: oneapi
- LOG_DETAIL_STORAGE_HOURS=1
# 数据库连接地址
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
# 最大重试次数
- RETRY_TIMES=3
# 不需要计费
- BILLING_ENABLED=false
# 不需要严格检测模型
- DISABLE_MODEL_CONFIG=true
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
interval: 5s
timeout: 5s
retries: 10
aiproxy_pg:
image: pgvector/pgvector:0.8.0-pg15 # docker hub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
restart: unless-stopped
container_name: aiproxy_pg
volumes: volumes:
- ./aiproxy_pg:/var/lib/postgresql/data - ./mysql:/var/lib/mysql
oneapi:
container_name: oneapi
image: ghcr.io/songquanpeng/one-api:v0.6.7
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/one-api:v0.6.6 # 阿里云
ports:
- 3001:3000
depends_on:
- mysql
networks: networks:
- fastgpt - fastgpt
restart: always
environment: environment:
TZ: Asia/Shanghai # mysql 连接参数
POSTGRES_USER: postgres - SQL_DSN=root:oneapimmysql@tcp(mysql:3306)/oneapi
POSTGRES_DB: aiproxy # 登录凭证加密密钥
POSTGRES_PASSWORD: aiproxy - SESSION_SECRET=oneapikey
healthcheck: # 内存缓存
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy'] - MEMORY_CACHE_ENABLED=true
interval: 5s # 启动聚合更新,减少数据交互频率
timeout: 5s - BATCH_UPDATE_ENABLED=true
retries: 10 # 聚合更新时长
- BATCH_UPDATE_INTERVAL=10
# 初始化的 root 密钥(建议部署完后更改,否则容易泄露)
- INITIAL_ROOT_TOKEN=fastgpt
volumes:
- ./oneapi:/data
networks: networks:
fastgpt: fastgpt:

View File

@ -41,7 +41,7 @@ services:
exec docker-entrypoint.sh "$$@" & exec docker-entrypoint.sh "$$@" &
# 等待MongoDB服务启动 # 等待MongoDB服务启动
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')" > /dev/null 2>&1; do
echo "Waiting for MongoDB to start..." echo "Waiting for MongoDB to start..."
sleep 2 sleep 2
done done
@ -51,47 +51,17 @@ services:
# 等待docker-entrypoint.sh脚本执行的MongoDB服务进程 # 等待docker-entrypoint.sh脚本执行的MongoDB服务进程
wait $$! wait $$!
redis:
image: redis:7.2-alpine
container_name: redis
# ports:
# - 6379:6379
networks:
- fastgpt
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
healthcheck:
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
volumes:
- ./redis/data:/data
sandbox: sandbox:
container_name: sandbox container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git image: ghcr.io/labring/fastgpt-sandbox:v4.8.23-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云 # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.23-fix # 阿里云
networks: networks:
- fastgpt - fastgpt
restart: always restart: always
fastgpt-mcp-server:
container_name: fastgpt-mcp-server
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
ports:
- 3005:3000
networks:
- fastgpt
restart: always
environment:
- FASTGPT_ENDPOINT=http://fastgpt:3000
fastgpt: fastgpt:
container_name: fastgpt container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git image: ghcr.io/labring/fastgpt:v4.8.23-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云 # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.23-fix # 阿里云
ports: ports:
- 3000:3000 - 3000:3000
networks: networks:
@ -105,13 +75,10 @@ services:
- FE_DOMAIN= - FE_DOMAIN=
# root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。 # root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。
- DEFAULT_ROOT_PSW=1234 - DEFAULT_ROOT_PSW=1234
# AI Proxy 的地址,如果配了该地址,优先使用 # AI模型的API地址哦。务必加 /v1。这里默认填写了OneApi的访问地址。
- AIPROXY_API_ENDPOINT=http://aiproxy:3000 - OPENAI_BASE_URL=http://oneapi:3000/v1
# AI Proxy 的 Admin Token与 AI Proxy 中的环境变量 ADMIN_KEY # AI模型的API Key。这里默认填写了OneAPI的快速默认key测试通后务必及时修改
- AIPROXY_API_TOKEN=aiproxy - CHAT_API_KEY=sk-fastgpt
# 模型中转地址(如果用了 AI Proxy下面 2 个就不需要了,旧版 OneAPI 用户,使用下面的变量)
# - OPENAI_BASE_URL=http://oneapi:3000/v1
# - CHAT_API_KEY=sk-fastgpt
# 数据库最大连接数 # 数据库最大连接数
- DB_MAX_LINK=30 - DB_MAX_LINK=30
# 登录凭证密钥 # 登录凭证密钥
@ -122,8 +89,6 @@ services:
- FILE_TOKEN_KEY=filetoken - FILE_TOKEN_KEY=filetoken
# MongoDB 连接参数. 用户名myusername,密码mypassword。 # MongoDB 连接参数. 用户名myusername,密码mypassword。
- MONGODB_URI=mongodb://myusername:mypassword@mongo:27017/fastgpt?authSource=admin - MONGODB_URI=mongodb://myusername:mypassword@mongo:27017/fastgpt?authSource=admin
# Redis 连接参数
- REDIS_URI=redis://default:mypassword@redis:6379
# zilliz 连接参数 # zilliz 连接参数
- MILVUS_ADDRESS=zilliz_cloud_address - MILVUS_ADDRESS=zilliz_cloud_address
- MILVUS_TOKEN=zilliz_cloud_token - MILVUS_TOKEN=zilliz_cloud_token
@ -140,58 +105,51 @@ services:
- ALLOWED_ORIGINS= - ALLOWED_ORIGINS=
# 是否开启IP限制默认不开启 # 是否开启IP限制默认不开启
- USE_IP_LIMIT=false - USE_IP_LIMIT=false
# 对话文件过期天数
- CHAT_FILE_EXPIRE_TIME=7
volumes: volumes:
- ./config.json:/app/data/config.json - ./config.json:/app/data/config.json
# AI Proxy # oneapi
aiproxy: mysql:
image: ghcr.io/labring/aiproxy:v0.1.7 image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mysql:8.0.36 # 阿里云
# image: registry.cn-hangzhou.aliyuncs.com/labring/aiproxy:v0.1.7 # 阿里云 # image: mysql:8.0.36
container_name: aiproxy container_name: mysql
restart: unless-stopped restart: always
depends_on: ports:
aiproxy_pg: - 3306:3306
condition: service_healthy
networks: networks:
- fastgpt - fastgpt
command: --default-authentication-plugin=mysql_native_password
environment: environment:
# 对应 fastgpt 里的AIPROXY_API_TOKEN # 默认root密码仅首次运行有效
- ADMIN_KEY=aiproxy MYSQL_ROOT_PASSWORD: oneapimmysql
# 错误日志详情保存时间(小时) MYSQL_DATABASE: oneapi
- LOG_DETAIL_STORAGE_HOURS=1
# 数据库连接地址
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
# 最大重试次数
- RETRY_TIMES=3
# 不需要计费
- BILLING_ENABLED=false
# 不需要严格检测模型
- DISABLE_MODEL_CONFIG=true
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
interval: 5s
timeout: 5s
retries: 10
aiproxy_pg:
image: pgvector/pgvector:0.8.0-pg15 # docker hub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
restart: unless-stopped
container_name: aiproxy_pg
volumes: volumes:
- ./aiproxy_pg:/var/lib/postgresql/data - ./mysql:/var/lib/mysql
oneapi:
container_name: oneapi
image: ghcr.io/songquanpeng/one-api:v0.6.7
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/one-api:v0.6.6 # 阿里云
ports:
- 3001:3000
depends_on:
- mysql
networks: networks:
- fastgpt - fastgpt
restart: always
environment: environment:
TZ: Asia/Shanghai # mysql 连接参数
POSTGRES_USER: postgres - SQL_DSN=root:oneapimmysql@tcp(mysql:3306)/oneapi
POSTGRES_DB: aiproxy # 登录凭证加密密钥
POSTGRES_PASSWORD: aiproxy - SESSION_SECRET=oneapikey
healthcheck: # 内存缓存
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy'] - MEMORY_CACHE_ENABLED=true
interval: 5s # 启动聚合更新,减少数据交互频率
timeout: 5s - BATCH_UPDATE_ENABLED=true
retries: 10 # 聚合更新时长
- BATCH_UPDATE_INTERVAL=10
# 初始化的 root 密钥(建议部署完后更改,否则容易泄露)
- INITIAL_ROOT_TOKEN=fastgpt
volumes:
- ./oneapi:/data
networks: networks:
fastgpt: fastgpt:

View File

@ -6,8 +6,7 @@ data:
"openapiPrefix": "fastgpt", "openapiPrefix": "fastgpt",
"vectorMaxProcess": 15, "vectorMaxProcess": 15,
"qaMaxProcess": 15, "qaMaxProcess": 15,
"vlmMaxProcess": 15, "pgHNSWEfSearch": 100
"hnswEfSearch": 100
}, },
"llmModels": [ "llmModels": [
{ {

4
dev.md
View File

@ -99,10 +99,6 @@ export default staticContent;
- Use the t(namespace:key) format to ensure consistent naming. - Use the t(namespace:key) format to ensure consistent naming.
- Translation keys should use lowercase letters and underscores, e.g., common.close. - Translation keys should use lowercase letters and underscores, e.g., common.close.
## audit
Please fill the OperationLogEventEnum and operationLog/audit function is added to the ts, and on the corresponding position to fill i18n, at the same time to add the location of the log using addOpearationLog function add function
## Build ## Build
```sh ```sh

View File

@ -10,5 +10,3 @@ FROM fholzer/nginx-brotli:latest
LABEL org.opencontainers.image.source https://github.com/labring/FastGPT LABEL org.opencontainers.image.source https://github.com/labring/FastGPT
COPY --from=builder /app/hugo/public /usr/share/nginx/html COPY --from=builder /app/hugo/public /usr/share/nginx/html
COPY ./docSite/nginx.conf /etc/nginx/conf.d/default.conf

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 179 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 138 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 124 KiB

After

Width:  |  Height:  |  Size: 323 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 145 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 221 KiB

After

Width:  |  Height:  |  Size: 222 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 203 KiB

After

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 220 KiB

After

Width:  |  Height:  |  Size: 220 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 198 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 235 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 135 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

After

Width:  |  Height:  |  Size: 332 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

After

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 159 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

After

Width:  |  Height:  |  Size: 207 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 226 KiB

After

Width:  |  Height:  |  Size: 228 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 210 KiB

After

Width:  |  Height:  |  Size: 211 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 221 KiB

After

Width:  |  Height:  |  Size: 221 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 205 KiB

After

Width:  |  Height:  |  Size: 207 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 208 KiB

After

Width:  |  Height:  |  Size: 212 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 101 KiB

After

Width:  |  Height:  |  Size: 313 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 368 KiB

After

Width:  |  Height:  |  Size: 369 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 248 KiB

After

Width:  |  Height:  |  Size: 249 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 212 KiB

After

Width:  |  Height:  |  Size: 214 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 242 KiB

After

Width:  |  Height:  |  Size: 246 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 82 KiB

After

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

After

Width:  |  Height:  |  Size: 309 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 195 KiB

After

Width:  |  Height:  |  Size: 195 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

After

Width:  |  Height:  |  Size: 190 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 299 KiB

After

Width:  |  Height:  |  Size: 298 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 170 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 87 KiB

Some files were not shown because too many files have changed in this diff Show More