* perf: insert mongo dataset data session * perf: dataset data index * remove delay * rename bill schema * rename bill record * perf: bill table * perf: prompt * perf: sub plan * change the usage count * feat: usage bill * publish usages * doc * 新增团队聊天功能 (#20) * perf: doc * feat 添加标签部分 feat 信息团队标签配置 feat 新增团队同步管理 feat team分享页面 feat 完成team分享页面 feat 实现模糊搜索 style 格式化 fix 修复迷糊匹配 style 样式修改 fix 团队标签功能修复 * fix 修复鉴权功能 * merge 合并代码 * fix 修复引用错误 * fix 修复pr问题 * fix 修复ts格式问题 --------- Co-authored-by: archer <545436317@qq.com> Co-authored-by: liuxingwan <liuxingwan.lxw@alibaba-inc.com> * update extra plan * fix: ts * format * perf: bill field * feat: standard plan * fix: ts * feat 个人账号页面修改 (#22) * feat 添加标签部分 feat 信息团队标签配置 feat 新增团队同步管理 feat team分享页面 feat 完成team分享页面 feat 实现模糊搜索 style 格式化 fix 修复迷糊匹配 style 样式修改 fix 团队标签功能修复 * fix 修复鉴权功能 * merge 合并代码 * fix 修复引用错误 * fix 修复pr问题 * fix 修复ts格式问题 * feat 修改个人账号页 --------- Co-authored-by: liuxingwan <liuxingwan.lxw@alibaba-inc.com> * fix chunk index; error page text * feat: dataset process Integral prediction * feat: stand plan field * feat: sub plan limit * perf: index * query extension * perf: share link push app name * perf: plan point unit * perf: get sub plan * perf: account page --------- Co-authored-by: yst <77910600+yu-and-liu@users.noreply.github.com> Co-authored-by: liuxingwan <liuxingwan.lxw@alibaba-inc.com>
173 lines
3.9 KiB
TypeScript
173 lines
3.9 KiB
TypeScript
import {
|
|
TrainingModeEnum,
|
|
DatasetCollectionTypeEnum
|
|
} from '@fastgpt/global/core/dataset/constants';
|
|
import type { CreateDatasetCollectionParams } from '@fastgpt/global/core/dataset/api.d';
|
|
import { MongoDatasetCollection } from './schema';
|
|
import {
|
|
CollectionWithDatasetType,
|
|
DatasetCollectionSchemaType
|
|
} from '@fastgpt/global/core/dataset/type';
|
|
import { MongoDatasetTraining } from '../training/schema';
|
|
import { delay } from '@fastgpt/global/common/system/utils';
|
|
import { MongoDatasetData } from '../data/schema';
|
|
import { delImgByRelatedId } from '../../../common/file/image/controller';
|
|
import { deleteDatasetDataVector } from '../../../common/vectorStore/controller';
|
|
import { delFileByFileIdList } from '../../../common/file/gridfs/controller';
|
|
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
|
|
import { ClientSession } from '../../../common/mongo';
|
|
|
|
export async function createOneCollection({
|
|
teamId,
|
|
tmbId,
|
|
name,
|
|
parentId,
|
|
datasetId,
|
|
type,
|
|
|
|
trainingType = TrainingModeEnum.chunk,
|
|
chunkSize = 512,
|
|
chunkSplitter,
|
|
qaPrompt,
|
|
|
|
fileId,
|
|
rawLink,
|
|
|
|
hashRawText,
|
|
rawTextLength,
|
|
metadata = {},
|
|
session,
|
|
...props
|
|
}: CreateDatasetCollectionParams & {
|
|
teamId: string;
|
|
tmbId: string;
|
|
[key: string]: any;
|
|
session?: ClientSession;
|
|
}) {
|
|
const [collection] = await MongoDatasetCollection.create(
|
|
[
|
|
{
|
|
...props,
|
|
teamId,
|
|
tmbId,
|
|
parentId: parentId || null,
|
|
datasetId,
|
|
name,
|
|
type,
|
|
|
|
trainingType,
|
|
chunkSize,
|
|
chunkSplitter,
|
|
qaPrompt,
|
|
|
|
fileId,
|
|
rawLink,
|
|
|
|
rawTextLength,
|
|
hashRawText,
|
|
metadata
|
|
}
|
|
],
|
|
{ session }
|
|
);
|
|
|
|
// create default collection
|
|
if (type === DatasetCollectionTypeEnum.folder) {
|
|
await createDefaultCollection({
|
|
datasetId,
|
|
parentId: collection._id,
|
|
teamId,
|
|
tmbId,
|
|
session
|
|
});
|
|
}
|
|
|
|
return collection;
|
|
}
|
|
|
|
// create default collection
|
|
export function createDefaultCollection({
|
|
name = '手动录入',
|
|
datasetId,
|
|
parentId,
|
|
teamId,
|
|
tmbId,
|
|
session
|
|
}: {
|
|
name?: '手动录入' | '手动标注';
|
|
datasetId: string;
|
|
parentId?: string;
|
|
teamId: string;
|
|
tmbId: string;
|
|
session?: ClientSession;
|
|
}) {
|
|
return MongoDatasetCollection.create(
|
|
[
|
|
{
|
|
name,
|
|
teamId,
|
|
tmbId,
|
|
datasetId,
|
|
parentId,
|
|
type: DatasetCollectionTypeEnum.virtual,
|
|
trainingType: TrainingModeEnum.chunk,
|
|
chunkSize: 0,
|
|
updateTime: new Date('2099')
|
|
}
|
|
],
|
|
{ session }
|
|
);
|
|
}
|
|
|
|
/**
|
|
* delete collection and it related data
|
|
*/
|
|
export async function delCollectionAndRelatedSources({
|
|
collections,
|
|
session
|
|
}: {
|
|
collections: (CollectionWithDatasetType | DatasetCollectionSchemaType)[];
|
|
session: ClientSession;
|
|
}) {
|
|
if (collections.length === 0) return;
|
|
|
|
const teamId = collections[0].teamId;
|
|
|
|
if (!teamId) return Promise.reject('teamId is not exist');
|
|
|
|
const collectionIds = collections.map((item) => String(item._id));
|
|
const fileIdList = collections.map((item) => item?.fileId || '').filter(Boolean);
|
|
const relatedImageIds = collections
|
|
.map((item) => item?.metadata?.relatedImgId || '')
|
|
.filter(Boolean);
|
|
|
|
// delete training data
|
|
await MongoDatasetTraining.deleteMany({
|
|
teamId,
|
|
collectionId: { $in: collectionIds }
|
|
});
|
|
|
|
// delete dataset.datas
|
|
await MongoDatasetData.deleteMany({ teamId, collectionId: { $in: collectionIds } }, { session });
|
|
// delete imgs
|
|
await delImgByRelatedId({
|
|
teamId,
|
|
relateIds: relatedImageIds,
|
|
session
|
|
});
|
|
// delete collections
|
|
await MongoDatasetCollection.deleteMany(
|
|
{
|
|
_id: { $in: collectionIds }
|
|
},
|
|
{ session }
|
|
);
|
|
|
|
// no session delete: delete files, vector data
|
|
await deleteDatasetDataVector({ teamId, collectionIds });
|
|
await delFileByFileIdList({
|
|
bucketName: BucketNameEnum.dataset,
|
|
fileIdList
|
|
});
|
|
}
|