update fetch urls & web search show view
This commit is contained in:
parent
7642cc1410
commit
c8d03bf799
@ -607,7 +607,6 @@ const Chat = forwardRef<ChatRef, ChatProps>((props, ref) => {
|
||||
query: toolArgs.query,
|
||||
scope: scope_folders,
|
||||
})
|
||||
console.log("results", results)
|
||||
let snippets = results.map(({ path, content, metadata }) => {
|
||||
const contentWithLineNumbers = addLineNumbers(content, metadata.startLine)
|
||||
return `<file_block_content location="${path}#L${metadata.startLine}-${metadata.endLine}">\n${contentWithLineNumbers}\n</file_block_content>`
|
||||
|
||||
@ -1,11 +1,8 @@
|
||||
import { ChevronDown, ChevronRight, Globe } from 'lucide-react'
|
||||
import React, { useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { Check, ChevronDown, ChevronRight, Globe, Loader2, X } from 'lucide-react'
|
||||
import React, { useEffect, useRef, useState } from 'react'
|
||||
|
||||
import { useDarkModeContext } from '../../contexts/DarkModeContext'
|
||||
import { ApplyStatus, FetchUrlsContentToolArgs } from '../../types/apply'
|
||||
|
||||
import { MemoizedSyntaxHighlighterWrapper } from './SyntaxHighlighterWrapper'
|
||||
|
||||
export default function MarkdownFetchUrlsContentBlock({
|
||||
applyStatus,
|
||||
onApply,
|
||||
@ -17,14 +14,11 @@ export default function MarkdownFetchUrlsContentBlock({
|
||||
urls: string[],
|
||||
finish: boolean
|
||||
}) {
|
||||
const { isDarkMode } = useDarkModeContext()
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
const [isOpen, setIsOpen] = useState(true)
|
||||
|
||||
React.useEffect(() => {
|
||||
console.log('finish', finish, applyStatus)
|
||||
if (finish && applyStatus === ApplyStatus.Idle) {
|
||||
console.log('finish auto fetch urls content', urls)
|
||||
onApply({
|
||||
type: 'fetch_urls_content',
|
||||
urls: urls
|
||||
@ -32,48 +26,67 @@ export default function MarkdownFetchUrlsContentBlock({
|
||||
}
|
||||
}, [finish])
|
||||
|
||||
const urlsMarkdownContent = useMemo(() => {
|
||||
return urls.map(url => {
|
||||
return `${url}`
|
||||
}).join('\n\n')
|
||||
}, [urls])
|
||||
|
||||
useEffect(() => {
|
||||
if (containerRef.current) {
|
||||
containerRef.current.scrollTop = containerRef.current.scrollHeight
|
||||
}
|
||||
}, [urlsMarkdownContent])
|
||||
}, [urls])
|
||||
|
||||
return (
|
||||
urlsMarkdownContent && (
|
||||
<div
|
||||
className={`infio-chat-code-block has-filename infio-reasoning-block`}
|
||||
>
|
||||
<div className={'infio-chat-code-block-header'}>
|
||||
<div className={'infio-chat-code-block-header-filename'}>
|
||||
urls.length > 0 && (
|
||||
<div className="infio-chat-code-block has-filename infio-reasoning-block">
|
||||
<div className="infio-chat-code-block-header">
|
||||
<div className="infio-chat-code-block-header-filename">
|
||||
<Globe size={10} className="infio-chat-code-block-header-icon" />
|
||||
Fetch URLs Content
|
||||
</div>
|
||||
<button
|
||||
className="clickable-icon infio-chat-list-dropdown"
|
||||
onClick={() => setIsOpen(!isOpen)}
|
||||
>
|
||||
{isOpen ? <ChevronDown size={16} /> : <ChevronRight size={16} />}
|
||||
</button>
|
||||
<div className="infio-chat-code-block-header-button">
|
||||
<button
|
||||
className="infio-chat-code-block-status-button"
|
||||
disabled={true}
|
||||
>
|
||||
{
|
||||
!finish || applyStatus === ApplyStatus.Idle ? (
|
||||
<>
|
||||
<Loader2 className="spinner" size={14} /> Fetching...
|
||||
</>
|
||||
) : applyStatus === ApplyStatus.Applied ? (
|
||||
<>
|
||||
<Check size={14} /> Done
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<X size={14} /> Failed
|
||||
</>
|
||||
)}
|
||||
</button>
|
||||
<button
|
||||
className="clickable-icon infio-chat-list-dropdown"
|
||||
onClick={() => setIsOpen(!isOpen)}
|
||||
>
|
||||
{isOpen ? <ChevronDown size={16} /> : <ChevronRight size={16} />}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
ref={containerRef}
|
||||
className="infio-reasoning-content-wrapper"
|
||||
style={{ display: isOpen ? 'block' : 'none' }}
|
||||
>
|
||||
<MemoizedSyntaxHighlighterWrapper
|
||||
isDarkMode={isDarkMode}
|
||||
language="markdown"
|
||||
hasFilename={true}
|
||||
wrapLines={true}
|
||||
isOpen={isOpen}
|
||||
>
|
||||
{urlsMarkdownContent}
|
||||
</MemoizedSyntaxHighlighterWrapper>
|
||||
<ul className="infio-chat-code-block-url-list">
|
||||
{urls.map((url, index) => (
|
||||
<li key={index}>
|
||||
<a
|
||||
href={url}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="infio-chat-code-block-url-link"
|
||||
>
|
||||
{url}
|
||||
</a>
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@ -25,9 +25,7 @@ export default function MarkdownListFilesBlock({
|
||||
}
|
||||
|
||||
React.useEffect(() => {
|
||||
console.log('finish', finish, applyStatus)
|
||||
if (finish && applyStatus === ApplyStatus.Idle) {
|
||||
console.log('finish auto list files', path)
|
||||
onApply({
|
||||
type: 'list_files',
|
||||
filepath: path,
|
||||
|
||||
@ -25,9 +25,7 @@ export default function MarkdownRegexSearchFilesBlock({
|
||||
}
|
||||
|
||||
React.useEffect(() => {
|
||||
console.log('finish', finish, applyStatus)
|
||||
if (finish && applyStatus === ApplyStatus.Idle) {
|
||||
console.log('finish auto regex search files', path)
|
||||
onApply({
|
||||
type: 'regex_search_files',
|
||||
filepath: path,
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { Search } from 'lucide-react'
|
||||
import { Check, Loader2, Search, X } from 'lucide-react'
|
||||
import React from 'react'
|
||||
|
||||
import { useSettings } from '../../contexts/SettingsContext'
|
||||
@ -48,6 +48,27 @@ export default function MarkdownWebSearchBlock({
|
||||
<Search size={14} className="infio-chat-code-block-header-icon" />
|
||||
Web search: {query}
|
||||
</div>
|
||||
<div className={'infio-chat-code-block-header-button'}>
|
||||
<button
|
||||
style={{ color: '#008000' }}
|
||||
disabled={true}
|
||||
>
|
||||
{
|
||||
!finish || applyStatus === ApplyStatus.Idle ? (
|
||||
<>
|
||||
<Loader2 className="spinner" size={14} /> Searching...
|
||||
</>
|
||||
) : applyStatus === ApplyStatus.Applied ? (
|
||||
<>
|
||||
<Check size={14} /> Done
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<X size={14} /> Failed
|
||||
</>
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@ -25,9 +25,7 @@ export default function MarkdownSemanticSearchFilesBlock({
|
||||
}
|
||||
|
||||
React.useEffect(() => {
|
||||
console.log('finish', finish, applyStatus)
|
||||
if (finish && applyStatus === ApplyStatus.Idle) {
|
||||
console.log('finish auto semantic search files', path)
|
||||
onApply({
|
||||
type: 'semantic_search_files',
|
||||
filepath: path,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,738 +0,0 @@
|
||||
import { NewUnifiedDiffStrategy } from "../new-unified"
|
||||
|
||||
describe("main", () => {
|
||||
let strategy: NewUnifiedDiffStrategy
|
||||
|
||||
beforeEach(() => {
|
||||
strategy = new NewUnifiedDiffStrategy(0.97)
|
||||
})
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should use default confidence threshold when not provided", () => {
|
||||
const defaultStrategy = new NewUnifiedDiffStrategy()
|
||||
expect(defaultStrategy["confidenceThreshold"]).toBe(1)
|
||||
})
|
||||
|
||||
it("should use provided confidence threshold", () => {
|
||||
const customStrategy = new NewUnifiedDiffStrategy(0.85)
|
||||
expect(customStrategy["confidenceThreshold"]).toBe(0.85)
|
||||
})
|
||||
|
||||
it("should enforce minimum confidence threshold", () => {
|
||||
const lowStrategy = new NewUnifiedDiffStrategy(0.7) // Below minimum of 0.8
|
||||
expect(lowStrategy["confidenceThreshold"]).toBe(0.8)
|
||||
})
|
||||
})
|
||||
|
||||
describe("getToolDescription", () => {
|
||||
it("should return tool description with correct cwd", () => {
|
||||
const cwd = "/test/path"
|
||||
const description = strategy.getToolDescription({ cwd })
|
||||
|
||||
expect(description).toContain("apply_diff Tool - Generate Precise Code Changes")
|
||||
expect(description).toContain(cwd)
|
||||
expect(description).toContain("Step-by-Step Instructions")
|
||||
expect(description).toContain("Requirements")
|
||||
expect(description).toContain("Examples")
|
||||
expect(description).toContain("Parameters:")
|
||||
})
|
||||
})
|
||||
|
||||
it("should apply simple diff correctly", async () => {
|
||||
const original = `line1
|
||||
line2
|
||||
line3`
|
||||
|
||||
const diff = `--- a/file.txt
|
||||
+++ b/file.txt
|
||||
@@ ... @@
|
||||
line1
|
||||
+new line
|
||||
line2
|
||||
-line3
|
||||
+modified line3`
|
||||
|
||||
const result = await strategy.applyDiff(original, diff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(`line1
|
||||
new line
|
||||
line2
|
||||
modified line3`)
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle multiple hunks", async () => {
|
||||
const original = `line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
line5`
|
||||
|
||||
const diff = `--- a/file.txt
|
||||
+++ b/file.txt
|
||||
@@ ... @@
|
||||
line1
|
||||
+new line
|
||||
line2
|
||||
-line3
|
||||
+modified line3
|
||||
@@ ... @@
|
||||
line4
|
||||
-line5
|
||||
+modified line5
|
||||
+new line at end`
|
||||
|
||||
const result = await strategy.applyDiff(original, diff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(`line1
|
||||
new line
|
||||
line2
|
||||
modified line3
|
||||
line4
|
||||
modified line5
|
||||
new line at end`)
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle complex large", async () => {
|
||||
const original = `line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
line5
|
||||
line6
|
||||
line7
|
||||
line8
|
||||
line9
|
||||
line10`
|
||||
|
||||
const diff = `--- a/file.txt
|
||||
+++ b/file.txt
|
||||
@@ ... @@
|
||||
line1
|
||||
+header line
|
||||
+another header
|
||||
line2
|
||||
-line3
|
||||
-line4
|
||||
+modified line3
|
||||
+modified line4
|
||||
+extra line
|
||||
@@ ... @@
|
||||
line6
|
||||
+middle section
|
||||
line7
|
||||
-line8
|
||||
+changed line8
|
||||
+bonus line
|
||||
@@ ... @@
|
||||
line9
|
||||
-line10
|
||||
+final line
|
||||
+very last line`
|
||||
|
||||
const result = await strategy.applyDiff(original, diff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(`line1
|
||||
header line
|
||||
another header
|
||||
line2
|
||||
modified line3
|
||||
modified line4
|
||||
extra line
|
||||
line5
|
||||
line6
|
||||
middle section
|
||||
line7
|
||||
changed line8
|
||||
bonus line
|
||||
line9
|
||||
final line
|
||||
very last line`)
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle indentation changes", async () => {
|
||||
const original = `first line
|
||||
indented line
|
||||
double indented line
|
||||
back to single indent
|
||||
no indent
|
||||
indented again
|
||||
double indent again
|
||||
triple indent
|
||||
back to single
|
||||
last line`
|
||||
|
||||
const diff = `--- original
|
||||
+++ modified
|
||||
@@ ... @@
|
||||
first line
|
||||
indented line
|
||||
+ tab indented line
|
||||
+ new indented line
|
||||
double indented line
|
||||
back to single indent
|
||||
no indent
|
||||
indented again
|
||||
double indent again
|
||||
- triple indent
|
||||
+ hi there mate
|
||||
back to single
|
||||
last line`
|
||||
|
||||
const expected = `first line
|
||||
indented line
|
||||
tab indented line
|
||||
new indented line
|
||||
double indented line
|
||||
back to single indent
|
||||
no indent
|
||||
indented again
|
||||
double indent again
|
||||
hi there mate
|
||||
back to single
|
||||
last line`
|
||||
|
||||
const result = await strategy.applyDiff(original, diff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(expected)
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle high level edits", async () => {
|
||||
const original = `def factorial(n):
|
||||
if n == 0:
|
||||
return 1
|
||||
else:
|
||||
return n * factorial(n-1)`
|
||||
const diff = `@@ ... @@
|
||||
-def factorial(n):
|
||||
- if n == 0:
|
||||
- return 1
|
||||
- else:
|
||||
- return n * factorial(n-1)
|
||||
+def factorial(number):
|
||||
+ if number == 0:
|
||||
+ return 1
|
||||
+ else:
|
||||
+ return number * factorial(number-1)`
|
||||
|
||||
const expected = `def factorial(number):
|
||||
if number == 0:
|
||||
return 1
|
||||
else:
|
||||
return number * factorial(number-1)`
|
||||
|
||||
const result = await strategy.applyDiff(original, diff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(expected)
|
||||
}
|
||||
})
|
||||
|
||||
it("it should handle very complex edits", async () => {
|
||||
const original = `//Initialize the array that will hold the primes
|
||||
var primeArray = [];
|
||||
/*Write a function that checks for primeness and
|
||||
pushes those values to t*he array*/
|
||||
function PrimeCheck(candidate){
|
||||
isPrime = true;
|
||||
for(var i = 2; i < candidate && isPrime; i++){
|
||||
if(candidate%i === 0){
|
||||
isPrime = false;
|
||||
} else {
|
||||
isPrime = true;
|
||||
}
|
||||
}
|
||||
if(isPrime){
|
||||
primeArray.push(candidate);
|
||||
}
|
||||
return primeArray;
|
||||
}
|
||||
/*Write the code that runs the above until the
|
||||
l ength of the array equa*ls the number of primes
|
||||
desired*/
|
||||
|
||||
var numPrimes = prompt("How many primes?");
|
||||
|
||||
//Display the finished array of primes
|
||||
|
||||
//for loop starting at 2 as that is the lowest prime number keep going until the array is as long as we requested
|
||||
for (var i = 2; primeArray.length < numPrimes; i++) {
|
||||
PrimeCheck(i); //
|
||||
}
|
||||
console.log(primeArray);
|
||||
`
|
||||
|
||||
const diff = `--- test_diff.js
|
||||
+++ test_diff.js
|
||||
@@ ... @@
|
||||
-//Initialize the array that will hold the primes
|
||||
var primeArray = [];
|
||||
-/*Write a function that checks for primeness and
|
||||
- pushes those values to t*he array*/
|
||||
function PrimeCheck(candidate){
|
||||
isPrime = true;
|
||||
for(var i = 2; i < candidate && isPrime; i++){
|
||||
@@ ... @@
|
||||
return primeArray;
|
||||
}
|
||||
-/*Write the code that runs the above until the
|
||||
- l ength of the array equa*ls the number of primes
|
||||
- desired*/
|
||||
|
||||
var numPrimes = prompt("How many primes?");
|
||||
|
||||
-//Display the finished array of primes
|
||||
-
|
||||
-//for loop starting at 2 as that is the lowest prime number keep going until the array is as long as we requested
|
||||
for (var i = 2; primeArray.length < numPrimes; i++) {
|
||||
- PrimeCheck(i); //
|
||||
+ PrimeCheck(i);
|
||||
}
|
||||
console.log(primeArray);`
|
||||
|
||||
const expected = `var primeArray = [];
|
||||
function PrimeCheck(candidate){
|
||||
isPrime = true;
|
||||
for(var i = 2; i < candidate && isPrime; i++){
|
||||
if(candidate%i === 0){
|
||||
isPrime = false;
|
||||
} else {
|
||||
isPrime = true;
|
||||
}
|
||||
}
|
||||
if(isPrime){
|
||||
primeArray.push(candidate);
|
||||
}
|
||||
return primeArray;
|
||||
}
|
||||
|
||||
var numPrimes = prompt("How many primes?");
|
||||
|
||||
for (var i = 2; primeArray.length < numPrimes; i++) {
|
||||
PrimeCheck(i);
|
||||
}
|
||||
console.log(primeArray);
|
||||
`
|
||||
|
||||
const result = await strategy.applyDiff(original, diff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(expected)
|
||||
}
|
||||
})
|
||||
|
||||
describe("error handling and edge cases", () => {
|
||||
it("should reject completely invalid diff format", async () => {
|
||||
const original = "line1\nline2\nline3"
|
||||
const invalidDiff = "this is not a diff at all"
|
||||
|
||||
const result = await strategy.applyDiff(original, invalidDiff)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
it("should reject diff with invalid hunk format", async () => {
|
||||
const original = "line1\nline2\nline3"
|
||||
const invalidHunkDiff = `--- a/file.txt
|
||||
+++ b/file.txt
|
||||
invalid hunk header
|
||||
line1
|
||||
-line2
|
||||
+new line`
|
||||
|
||||
const result = await strategy.applyDiff(original, invalidHunkDiff)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
it("should fail when diff tries to modify non-existent content", async () => {
|
||||
const original = "line1\nline2\nline3"
|
||||
const nonMatchingDiff = `--- a/file.txt
|
||||
+++ b/file.txt
|
||||
@@ ... @@
|
||||
line1
|
||||
-nonexistent line
|
||||
+new line
|
||||
line3`
|
||||
|
||||
const result = await strategy.applyDiff(original, nonMatchingDiff)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
it("should handle overlapping hunks", async () => {
|
||||
const original = `line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
line5`
|
||||
const overlappingDiff = `--- a/file.txt
|
||||
+++ b/file.txt
|
||||
@@ ... @@
|
||||
line1
|
||||
line2
|
||||
-line3
|
||||
+modified3
|
||||
line4
|
||||
@@ ... @@
|
||||
line2
|
||||
-line3
|
||||
-line4
|
||||
+modified3and4
|
||||
line5`
|
||||
|
||||
const result = await strategy.applyDiff(original, overlappingDiff)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
it("should handle empty lines modifications", async () => {
|
||||
const original = `line1
|
||||
|
||||
line3
|
||||
|
||||
line5`
|
||||
const emptyLinesDiff = `--- a/file.txt
|
||||
+++ b/file.txt
|
||||
@@ ... @@
|
||||
line1
|
||||
|
||||
-line3
|
||||
+line3modified
|
||||
|
||||
line5`
|
||||
|
||||
const result = await strategy.applyDiff(original, emptyLinesDiff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(`line1
|
||||
|
||||
line3modified
|
||||
|
||||
line5`)
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle mixed line endings in diff", async () => {
|
||||
const original = "line1\r\nline2\nline3\r\n"
|
||||
const mixedEndingsDiff = `--- a/file.txt
|
||||
+++ b/file.txt
|
||||
@@ ... @@
|
||||
line1\r
|
||||
-line2
|
||||
+modified2\r
|
||||
line3`
|
||||
|
||||
const result = await strategy.applyDiff(original, mixedEndingsDiff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe("line1\r\nmodified2\r\nline3\r\n")
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle partial line modifications", async () => {
|
||||
const original = "const value = oldValue + 123;"
|
||||
const partialDiff = `--- a/file.txt
|
||||
+++ b/file.txt
|
||||
@@ ... @@
|
||||
-const value = oldValue + 123;
|
||||
+const value = newValue + 123;`
|
||||
|
||||
const result = await strategy.applyDiff(original, partialDiff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe("const value = newValue + 123;")
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle slightly malformed but recoverable diff", async () => {
|
||||
const original = "line1\nline2\nline3"
|
||||
// Missing space after --- and +++
|
||||
const slightlyBadDiff = `---a/file.txt
|
||||
+++b/file.txt
|
||||
@@ ... @@
|
||||
line1
|
||||
-line2
|
||||
+new line
|
||||
line3`
|
||||
|
||||
const result = await strategy.applyDiff(original, slightlyBadDiff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe("line1\nnew line\nline3")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe("similar code sections", () => {
|
||||
it("should correctly modify the right section when similar code exists", async () => {
|
||||
const original = `function add(a, b) {
|
||||
return a + b;
|
||||
}
|
||||
|
||||
function subtract(a, b) {
|
||||
return a - b;
|
||||
}
|
||||
|
||||
function multiply(a, b) {
|
||||
return a + b; // Bug here
|
||||
}`
|
||||
|
||||
const diff = `--- a/math.js
|
||||
+++ b/math.js
|
||||
@@ ... @@
|
||||
function multiply(a, b) {
|
||||
- return a + b; // Bug here
|
||||
+ return a * b;
|
||||
}`
|
||||
|
||||
const result = await strategy.applyDiff(original, diff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(`function add(a, b) {
|
||||
return a + b;
|
||||
}
|
||||
|
||||
function subtract(a, b) {
|
||||
return a - b;
|
||||
}
|
||||
|
||||
function multiply(a, b) {
|
||||
return a * b;
|
||||
}`)
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle multiple similar sections with correct context", async () => {
|
||||
const original = `if (condition) {
|
||||
doSomething();
|
||||
doSomething();
|
||||
doSomething();
|
||||
}
|
||||
|
||||
if (otherCondition) {
|
||||
doSomething();
|
||||
doSomething();
|
||||
doSomething();
|
||||
}`
|
||||
|
||||
const diff = `--- a/file.js
|
||||
+++ b/file.js
|
||||
@@ ... @@
|
||||
if (otherCondition) {
|
||||
doSomething();
|
||||
- doSomething();
|
||||
+ doSomethingElse();
|
||||
doSomething();
|
||||
}`
|
||||
|
||||
const result = await strategy.applyDiff(original, diff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(`if (condition) {
|
||||
doSomething();
|
||||
doSomething();
|
||||
doSomething();
|
||||
}
|
||||
|
||||
if (otherCondition) {
|
||||
doSomething();
|
||||
doSomethingElse();
|
||||
doSomething();
|
||||
}`)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe("hunk splitting", () => {
|
||||
it("should handle large diffs with multiple non-contiguous changes", async () => {
|
||||
const original = `import { readFile } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { Logger } from './logger';
|
||||
|
||||
const logger = new Logger();
|
||||
|
||||
async function processFile(filePath: string) {
|
||||
try {
|
||||
const data = await readFile(filePath, 'utf8');
|
||||
logger.info('File read successfully');
|
||||
return data;
|
||||
} catch (error) {
|
||||
logger.error('Failed to read file:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
function validateInput(input: string): boolean {
|
||||
if (!input) {
|
||||
logger.warn('Empty input received');
|
||||
return false;
|
||||
}
|
||||
return input.length > 0;
|
||||
}
|
||||
|
||||
async function writeOutput(data: string) {
|
||||
logger.info('Processing output');
|
||||
// TODO: Implement output writing
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
function parseConfig(configPath: string) {
|
||||
logger.debug('Reading config from:', configPath);
|
||||
// Basic config parsing
|
||||
return {
|
||||
enabled: true,
|
||||
maxRetries: 3
|
||||
};
|
||||
}
|
||||
|
||||
export {
|
||||
processFile,
|
||||
validateInput,
|
||||
writeOutput,
|
||||
parseConfig
|
||||
};`
|
||||
|
||||
const diff = `--- a/file.ts
|
||||
+++ b/file.ts
|
||||
@@ ... @@
|
||||
-import { readFile } from 'fs';
|
||||
+import { readFile, writeFile } from 'fs';
|
||||
import { join } from 'path';
|
||||
-import { Logger } from './logger';
|
||||
+import { Logger } from './utils/logger';
|
||||
+import { Config } from './types';
|
||||
|
||||
-const logger = new Logger();
|
||||
+const logger = new Logger('FileProcessor');
|
||||
|
||||
async function processFile(filePath: string) {
|
||||
try {
|
||||
const data = await readFile(filePath, 'utf8');
|
||||
- logger.info('File read successfully');
|
||||
+ logger.info(\`File \${filePath} read successfully\`);
|
||||
return data;
|
||||
} catch (error) {
|
||||
- logger.error('Failed to read file:', error);
|
||||
+ logger.error(\`Failed to read file \${filePath}:\`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
function validateInput(input: string): boolean {
|
||||
if (!input) {
|
||||
- logger.warn('Empty input received');
|
||||
+ logger.warn('Validation failed: Empty input received');
|
||||
return false;
|
||||
}
|
||||
- return input.length > 0;
|
||||
+ return input.trim().length > 0;
|
||||
}
|
||||
|
||||
-async function writeOutput(data: string) {
|
||||
- logger.info('Processing output');
|
||||
- // TODO: Implement output writing
|
||||
- return Promise.resolve();
|
||||
+async function writeOutput(data: string, outputPath: string) {
|
||||
+ try {
|
||||
+ await writeFile(outputPath, data, 'utf8');
|
||||
+ logger.info(\`Output written to \${outputPath}\`);
|
||||
+ } catch (error) {
|
||||
+ logger.error(\`Failed to write output to \${outputPath}:\`, error);
|
||||
+ throw error;
|
||||
+ }
|
||||
}
|
||||
|
||||
-function parseConfig(configPath: string) {
|
||||
- logger.debug('Reading config from:', configPath);
|
||||
- // Basic config parsing
|
||||
- return {
|
||||
- enabled: true,
|
||||
- maxRetries: 3
|
||||
- };
|
||||
+async function parseConfig(configPath: string): Promise<Config> {
|
||||
+ try {
|
||||
+ const configData = await readFile(configPath, 'utf8');
|
||||
+ logger.debug(\`Reading config from \${configPath}\`);
|
||||
+ return JSON.parse(configData);
|
||||
+ } catch (error) {
|
||||
+ logger.error(\`Failed to parse config from \${configPath}:\`, error);
|
||||
+ throw error;
|
||||
+ }
|
||||
}
|
||||
|
||||
export {
|
||||
processFile,
|
||||
validateInput,
|
||||
writeOutput,
|
||||
- parseConfig
|
||||
+ parseConfig,
|
||||
+ type Config
|
||||
};`
|
||||
|
||||
const expected = `import { readFile, writeFile } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { Logger } from './utils/logger';
|
||||
import { Config } from './types';
|
||||
|
||||
const logger = new Logger('FileProcessor');
|
||||
|
||||
async function processFile(filePath: string) {
|
||||
try {
|
||||
const data = await readFile(filePath, 'utf8');
|
||||
logger.info(\`File \${filePath} read successfully\`);
|
||||
return data;
|
||||
} catch (error) {
|
||||
logger.error(\`Failed to read file \${filePath}:\`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
function validateInput(input: string): boolean {
|
||||
if (!input) {
|
||||
logger.warn('Validation failed: Empty input received');
|
||||
return false;
|
||||
}
|
||||
return input.trim().length > 0;
|
||||
}
|
||||
|
||||
async function writeOutput(data: string, outputPath: string) {
|
||||
try {
|
||||
await writeFile(outputPath, data, 'utf8');
|
||||
logger.info(\`Output written to \${outputPath}\`);
|
||||
} catch (error) {
|
||||
logger.error(\`Failed to write output to \${outputPath}:\`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function parseConfig(configPath: string): Promise<Config> {
|
||||
try {
|
||||
const configData = await readFile(configPath, 'utf8');
|
||||
logger.debug(\`Reading config from \${configPath}\`);
|
||||
return JSON.parse(configData);
|
||||
} catch (error) {
|
||||
logger.error(\`Failed to parse config from \${configPath}:\`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export {
|
||||
processFile,
|
||||
validateInput,
|
||||
writeOutput,
|
||||
parseConfig,
|
||||
type Config
|
||||
};`
|
||||
|
||||
const result = await strategy.applyDiff(original, diff)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(expected)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,228 +0,0 @@
|
||||
import { UnifiedDiffStrategy } from "../unified"
|
||||
|
||||
describe("UnifiedDiffStrategy", () => {
|
||||
let strategy: UnifiedDiffStrategy
|
||||
|
||||
beforeEach(() => {
|
||||
strategy = new UnifiedDiffStrategy()
|
||||
})
|
||||
|
||||
describe("getToolDescription", () => {
|
||||
it("should return tool description with correct cwd", () => {
|
||||
const cwd = "/test/path"
|
||||
const description = strategy.getToolDescription({ cwd })
|
||||
|
||||
expect(description).toContain("apply_diff")
|
||||
expect(description).toContain(cwd)
|
||||
expect(description).toContain("Parameters:")
|
||||
expect(description).toContain("Format Requirements:")
|
||||
})
|
||||
})
|
||||
|
||||
describe("applyDiff", () => {
|
||||
it("should successfully apply a function modification diff", async () => {
|
||||
const originalContent = `import { Logger } from '../logger';
|
||||
|
||||
function calculateTotal(items: number[]): number {
|
||||
return items.reduce((sum, item) => {
|
||||
return sum + item;
|
||||
}, 0);
|
||||
}
|
||||
|
||||
export { calculateTotal };`
|
||||
|
||||
const diffContent = `--- src/utils/helper.ts
|
||||
+++ src/utils/helper.ts
|
||||
@@ -1,9 +1,10 @@
|
||||
import { Logger } from '../logger';
|
||||
|
||||
function calculateTotal(items: number[]): number {
|
||||
- return items.reduce((sum, item) => {
|
||||
- return sum + item;
|
||||
+ const total = items.reduce((sum, item) => {
|
||||
+ return sum + item * 1.1; // Add 10% markup
|
||||
}, 0);
|
||||
+ return Math.round(total * 100) / 100; // Round to 2 decimal places
|
||||
}
|
||||
|
||||
export { calculateTotal };`
|
||||
|
||||
const expected = `import { Logger } from '../logger';
|
||||
|
||||
function calculateTotal(items: number[]): number {
|
||||
const total = items.reduce((sum, item) => {
|
||||
return sum + item * 1.1; // Add 10% markup
|
||||
}, 0);
|
||||
return Math.round(total * 100) / 100; // Round to 2 decimal places
|
||||
}
|
||||
|
||||
export { calculateTotal };`
|
||||
|
||||
const result = await strategy.applyDiff(originalContent, diffContent)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(expected)
|
||||
}
|
||||
})
|
||||
|
||||
it("should successfully apply a diff adding a new method", async () => {
|
||||
const originalContent = `class Calculator {
|
||||
add(a: number, b: number): number {
|
||||
return a + b;
|
||||
}
|
||||
}`
|
||||
|
||||
const diffContent = `--- src/Calculator.ts
|
||||
+++ src/Calculator.ts
|
||||
@@ -1,5 +1,9 @@
|
||||
class Calculator {
|
||||
add(a: number, b: number): number {
|
||||
return a + b;
|
||||
}
|
||||
+
|
||||
+ multiply(a: number, b: number): number {
|
||||
+ return a * b;
|
||||
+ }
|
||||
}`
|
||||
|
||||
const expected = `class Calculator {
|
||||
add(a: number, b: number): number {
|
||||
return a + b;
|
||||
}
|
||||
|
||||
multiply(a: number, b: number): number {
|
||||
return a * b;
|
||||
}
|
||||
}`
|
||||
|
||||
const result = await strategy.applyDiff(originalContent, diffContent)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(expected)
|
||||
}
|
||||
})
|
||||
|
||||
it("should successfully apply a diff modifying imports", async () => {
|
||||
const originalContent = `import { useState } from 'react';
|
||||
import { Button } from './components';
|
||||
|
||||
function App() {
|
||||
const [count, setCount] = useState(0);
|
||||
return <Button onClick={() => setCount(count + 1)}>{count}</Button>;
|
||||
}`
|
||||
|
||||
const diffContent = `--- src/App.tsx
|
||||
+++ src/App.tsx
|
||||
@@ -1,7 +1,8 @@
|
||||
-import { useState } from 'react';
|
||||
+import { useState, useEffect } from 'react';
|
||||
import { Button } from './components';
|
||||
|
||||
function App() {
|
||||
const [count, setCount] = useState(0);
|
||||
+ useEffect(() => { document.title = \`Count: \${count}\` }, [count]);
|
||||
return <Button onClick={() => setCount(count + 1)}>{count}</Button>;
|
||||
}`
|
||||
|
||||
const expected = `import { useState, useEffect } from 'react';
|
||||
import { Button } from './components';
|
||||
|
||||
function App() {
|
||||
const [count, setCount] = useState(0);
|
||||
useEffect(() => { document.title = \`Count: \${count}\` }, [count]);
|
||||
return <Button onClick={() => setCount(count + 1)}>{count}</Button>;
|
||||
}`
|
||||
|
||||
const result = await strategy.applyDiff(originalContent, diffContent)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(expected)
|
||||
}
|
||||
})
|
||||
|
||||
it("should successfully apply a diff with multiple hunks", async () => {
|
||||
const originalContent = `import { readFile, writeFile } from 'fs';
|
||||
|
||||
function processFile(path: string) {
|
||||
readFile(path, 'utf8', (err, data) => {
|
||||
if (err) throw err;
|
||||
const processed = data.toUpperCase();
|
||||
writeFile(path, processed, (err) => {
|
||||
if (err) throw err;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export { processFile };`
|
||||
|
||||
const diffContent = `--- src/file-processor.ts
|
||||
+++ src/file-processor.ts
|
||||
@@ -1,12 +1,14 @@
|
||||
-import { readFile, writeFile } from 'fs';
|
||||
+import { promises as fs } from 'fs';
|
||||
+import { join } from 'path';
|
||||
|
||||
-function processFile(path: string) {
|
||||
- readFile(path, 'utf8', (err, data) => {
|
||||
- if (err) throw err;
|
||||
+async function processFile(path: string) {
|
||||
+ try {
|
||||
+ const data = await fs.readFile(join(__dirname, path), 'utf8');
|
||||
const processed = data.toUpperCase();
|
||||
- writeFile(path, processed, (err) => {
|
||||
- if (err) throw err;
|
||||
- });
|
||||
- });
|
||||
+ await fs.writeFile(join(__dirname, path), processed);
|
||||
+ } catch (error) {
|
||||
+ console.error('Failed to process file:', error);
|
||||
+ throw error;
|
||||
+ }
|
||||
}
|
||||
|
||||
export { processFile };`
|
||||
|
||||
const expected = `import { promises as fs } from 'fs';
|
||||
import { join } from 'path';
|
||||
|
||||
async function processFile(path: string) {
|
||||
try {
|
||||
const data = await fs.readFile(join(__dirname, path), 'utf8');
|
||||
const processed = data.toUpperCase();
|
||||
await fs.writeFile(join(__dirname, path), processed);
|
||||
} catch (error) {
|
||||
console.error('Failed to process file:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export { processFile };`
|
||||
|
||||
const result = await strategy.applyDiff(originalContent, diffContent)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(expected)
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle empty original content", async () => {
|
||||
const originalContent = ""
|
||||
const diffContent = `--- empty.ts
|
||||
+++ empty.ts
|
||||
@@ -0,0 +1,3 @@
|
||||
+export function greet(name: string): string {
|
||||
+ return \`Hello, \${name}!\`;
|
||||
+}`
|
||||
|
||||
const expected = `export function greet(name: string): string {
|
||||
return \`Hello, \${name}!\`;
|
||||
}\n`
|
||||
|
||||
const result = await strategy.applyDiff(originalContent, diffContent)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.content).toBe(expected)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -1,295 +0,0 @@
|
||||
import { applyContextMatching, applyDMP, applyGitFallback } from "../edit-strategies"
|
||||
import { Hunk } from "../types"
|
||||
|
||||
const testCases = [
|
||||
{
|
||||
name: "should return original content if no match is found",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: "line1" },
|
||||
{ type: "add", content: "line2" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: ["line1", "line3"],
|
||||
matchPosition: -1,
|
||||
expected: {
|
||||
confidence: 0,
|
||||
result: ["line1", "line3"],
|
||||
},
|
||||
expectedResult: "line1\nline3",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a simple add change",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: "line1" },
|
||||
{ type: "add", content: "line2" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: ["line1", "line3"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: ["line1", "line2", "line3"],
|
||||
},
|
||||
expectedResult: "line1\nline2\nline3",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a simple remove change",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: "line1" },
|
||||
{ type: "remove", content: "line2" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: ["line1", "line2", "line3"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: ["line1", "line3"],
|
||||
},
|
||||
expectedResult: "line1\nline3",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a simple context change",
|
||||
hunk: {
|
||||
changes: [{ type: "context", content: "line1" }],
|
||||
} as Hunk,
|
||||
content: ["line1", "line2", "line3"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: ["line1", "line2", "line3"],
|
||||
},
|
||||
expectedResult: "line1\nline2\nline3",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a multi-line add change",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: "line1" },
|
||||
{ type: "add", content: "line2\nline3" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: ["line1", "line4"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: ["line1", "line2\nline3", "line4"],
|
||||
},
|
||||
expectedResult: "line1\nline2\nline3\nline4",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a multi-line remove change",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: "line1" },
|
||||
{ type: "remove", content: "line2\nline3" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: ["line1", "line2", "line3", "line4"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: ["line1", "line4"],
|
||||
},
|
||||
expectedResult: "line1\nline4",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a multi-line context change",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: "line1" },
|
||||
{ type: "context", content: "line2\nline3" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: ["line1", "line2", "line3", "line4"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: ["line1", "line2\nline3", "line4"],
|
||||
},
|
||||
expectedResult: "line1\nline2\nline3\nline4",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a change with indentation",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: " line1" },
|
||||
{ type: "add", content: " line2" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: [" line1", " line3"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: [" line1", " line2", " line3"],
|
||||
},
|
||||
expectedResult: " line1\n line2\n line3",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a change with mixed indentation",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: "\tline1" },
|
||||
{ type: "add", content: " line2" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: ["\tline1", " line3"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: ["\tline1", " line2", " line3"],
|
||||
},
|
||||
expectedResult: "\tline1\n line2\n line3",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a change with mixed indentation and multi-line",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: " line1" },
|
||||
{ type: "add", content: "\tline2\n line3" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: [" line1", " line4"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: [" line1", "\tline2\n line3", " line4"],
|
||||
},
|
||||
expectedResult: " line1\n\tline2\n line3\n line4",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a complex change with mixed indentation and multi-line",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: " line1" },
|
||||
{ type: "remove", content: " line2" },
|
||||
{ type: "add", content: "\tline3\n line4" },
|
||||
{ type: "context", content: " line5" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: [" line1", " line2", " line5", " line6"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: [" line1", "\tline3\n line4", " line5", " line6"],
|
||||
},
|
||||
expectedResult: " line1\n\tline3\n line4\n line5\n line6",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a complex change with mixed indentation and multi-line and context",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: " line1" },
|
||||
{ type: "remove", content: " line2" },
|
||||
{ type: "add", content: "\tline3\n line4" },
|
||||
{ type: "context", content: " line5" },
|
||||
{ type: "context", content: " line6" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: [" line1", " line2", " line5", " line6", " line7"],
|
||||
matchPosition: 0,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: [" line1", "\tline3\n line4", " line5", " line6", " line7"],
|
||||
},
|
||||
expectedResult: " line1\n\tline3\n line4\n line5\n line6\n line7",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
{
|
||||
name: "should apply a complex change with mixed indentation and multi-line and context and a different match position",
|
||||
hunk: {
|
||||
changes: [
|
||||
{ type: "context", content: " line1" },
|
||||
{ type: "remove", content: " line2" },
|
||||
{ type: "add", content: "\tline3\n line4" },
|
||||
{ type: "context", content: " line5" },
|
||||
{ type: "context", content: " line6" },
|
||||
],
|
||||
} as Hunk,
|
||||
content: [" line0", " line1", " line2", " line5", " line6", " line7"],
|
||||
matchPosition: 1,
|
||||
expected: {
|
||||
confidence: 1,
|
||||
result: [" line0", " line1", "\tline3\n line4", " line5", " line6", " line7"],
|
||||
},
|
||||
expectedResult: " line0\n line1\n\tline3\n line4\n line5\n line6\n line7",
|
||||
strategies: ["context", "dmp"],
|
||||
},
|
||||
]
|
||||
|
||||
describe("applyContextMatching", () => {
|
||||
testCases.forEach(({ name, hunk, content, matchPosition, expected, strategies, expectedResult }) => {
|
||||
if (!strategies?.includes("context")) {
|
||||
return
|
||||
}
|
||||
it(name, () => {
|
||||
const result = applyContextMatching(hunk, content, matchPosition)
|
||||
expect(result.result.join("\n")).toEqual(expectedResult)
|
||||
expect(result.confidence).toBeGreaterThanOrEqual(expected.confidence)
|
||||
expect(result.strategy).toBe("context")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("applyDMP", () => {
|
||||
testCases.forEach(({ name, hunk, content, matchPosition, expected, strategies, expectedResult }) => {
|
||||
if (!strategies?.includes("dmp")) {
|
||||
return
|
||||
}
|
||||
it(name, () => {
|
||||
const result = applyDMP(hunk, content, matchPosition)
|
||||
expect(result.result.join("\n")).toEqual(expectedResult)
|
||||
expect(result.confidence).toBeGreaterThanOrEqual(expected.confidence)
|
||||
expect(result.strategy).toBe("dmp")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("applyGitFallback", () => {
|
||||
it("should successfully apply changes using git operations", async () => {
|
||||
const hunk = {
|
||||
changes: [
|
||||
{ type: "context", content: "line1", indent: "" },
|
||||
{ type: "remove", content: "line2", indent: "" },
|
||||
{ type: "add", content: "new line2", indent: "" },
|
||||
{ type: "context", content: "line3", indent: "" },
|
||||
],
|
||||
} as Hunk
|
||||
|
||||
const content = ["line1", "line2", "line3"]
|
||||
const result = await applyGitFallback(hunk, content)
|
||||
|
||||
expect(result.result.join("\n")).toEqual("line1\nnew line2\nline3")
|
||||
expect(result.confidence).toBe(1)
|
||||
expect(result.strategy).toBe("git-fallback")
|
||||
})
|
||||
|
||||
it("should return original content with 0 confidence when changes cannot be applied", async () => {
|
||||
const hunk = {
|
||||
changes: [
|
||||
{ type: "context", content: "nonexistent", indent: "" },
|
||||
{ type: "add", content: "new line", indent: "" },
|
||||
],
|
||||
} as Hunk
|
||||
|
||||
const content = ["line1", "line2", "line3"]
|
||||
const result = await applyGitFallback(hunk, content)
|
||||
|
||||
expect(result.result).toEqual(content)
|
||||
expect(result.confidence).toBe(0)
|
||||
expect(result.strategy).toBe("git-fallback")
|
||||
})
|
||||
})
|
||||
@ -1,262 +0,0 @@
|
||||
import { findAnchorMatch, findExactMatch, findSimilarityMatch, findLevenshteinMatch } from "../search-strategies"
|
||||
|
||||
type SearchStrategy = (
|
||||
searchStr: string,
|
||||
content: string[],
|
||||
startIndex?: number,
|
||||
) => {
|
||||
index: number
|
||||
confidence: number
|
||||
strategy: string
|
||||
}
|
||||
|
||||
const testCases = [
|
||||
{
|
||||
name: "should return no match if the search string is not found",
|
||||
searchStr: "not found",
|
||||
content: ["line1", "line2", "line3"],
|
||||
expected: { index: -1, confidence: 0 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match if the search string is found",
|
||||
searchStr: "line2",
|
||||
content: ["line1", "line2", "line3"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match with correct index when startIndex is provided",
|
||||
searchStr: "line3",
|
||||
content: ["line1", "line2", "line3", "line4", "line3"],
|
||||
startIndex: 3,
|
||||
expected: { index: 4, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match even if there are more lines in content",
|
||||
searchStr: "line2",
|
||||
content: ["line1", "line2", "line3", "line4", "line5"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match even if the search string is at the beginning of the content",
|
||||
searchStr: "line1",
|
||||
content: ["line1", "line2", "line3"],
|
||||
expected: { index: 0, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match even if the search string is at the end of the content",
|
||||
searchStr: "line3",
|
||||
content: ["line1", "line2", "line3"],
|
||||
expected: { index: 2, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match for a multi-line search string",
|
||||
searchStr: "line2\nline3",
|
||||
content: ["line1", "line2", "line3", "line4"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return no match if a multi-line search string is not found",
|
||||
searchStr: "line2\nline4",
|
||||
content: ["line1", "line2", "line3", "line4"],
|
||||
expected: { index: -1, confidence: 0 },
|
||||
strategies: ["exact", "similarity"],
|
||||
},
|
||||
{
|
||||
name: "should return a match with indentation",
|
||||
searchStr: " line2",
|
||||
content: ["line1", " line2", "line3"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match with more complex indentation",
|
||||
searchStr: " line3",
|
||||
content: [" line1", " line2", " line3", " line4"],
|
||||
expected: { index: 2, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match with mixed indentation",
|
||||
searchStr: "\tline2",
|
||||
content: [" line1", "\tline2", " line3"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match with mixed indentation and multi-line",
|
||||
searchStr: " line2\n\tline3",
|
||||
content: ["line1", " line2", "\tline3", " line4"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return no match if mixed indentation and multi-line is not found",
|
||||
searchStr: " line2\n line4",
|
||||
content: ["line1", " line2", "\tline3", " line4"],
|
||||
expected: { index: -1, confidence: 0 },
|
||||
strategies: ["exact", "similarity"],
|
||||
},
|
||||
{
|
||||
name: "should return a match with leading and trailing spaces",
|
||||
searchStr: " line2 ",
|
||||
content: ["line1", " line2 ", "line3"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match with leading and trailing tabs",
|
||||
searchStr: "\tline2\t",
|
||||
content: ["line1", "\tline2\t", "line3"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match with mixed leading and trailing spaces and tabs",
|
||||
searchStr: " \tline2\t ",
|
||||
content: ["line1", " \tline2\t ", "line3"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return a match with mixed leading and trailing spaces and tabs and multi-line",
|
||||
searchStr: " \tline2\t \n line3 ",
|
||||
content: ["line1", " \tline2\t ", " line3 ", "line4"],
|
||||
expected: { index: 1, confidence: 1 },
|
||||
strategies: ["exact", "similarity", "levenshtein"],
|
||||
},
|
||||
{
|
||||
name: "should return no match if mixed leading and trailing spaces and tabs and multi-line is not found",
|
||||
searchStr: " \tline2\t \n line4 ",
|
||||
content: ["line1", " \tline2\t ", " line3 ", "line4"],
|
||||
expected: { index: -1, confidence: 0 },
|
||||
strategies: ["exact", "similarity"],
|
||||
},
|
||||
]
|
||||
|
||||
describe("findExactMatch", () => {
|
||||
testCases.forEach(({ name, searchStr, content, startIndex, expected, strategies }) => {
|
||||
if (!strategies?.includes("exact")) {
|
||||
return
|
||||
}
|
||||
it(name, () => {
|
||||
const result = findExactMatch(searchStr, content, startIndex)
|
||||
expect(result.index).toBe(expected.index)
|
||||
expect(result.confidence).toBeGreaterThanOrEqual(expected.confidence)
|
||||
expect(result.strategy).toMatch(/exact(-overlapping)?/)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("findAnchorMatch", () => {
|
||||
const anchorTestCases = [
|
||||
{
|
||||
name: "should return no match if no anchors are found",
|
||||
searchStr: " \n \n ",
|
||||
content: ["line1", "line2", "line3"],
|
||||
expected: { index: -1, confidence: 0 },
|
||||
},
|
||||
{
|
||||
name: "should return no match if anchor positions cannot be validated",
|
||||
searchStr: "unique line\ncontext line 1\ncontext line 2",
|
||||
content: [
|
||||
"different line 1",
|
||||
"different line 2",
|
||||
"different line 3",
|
||||
"another unique line",
|
||||
"context line 1",
|
||||
"context line 2",
|
||||
],
|
||||
expected: { index: -1, confidence: 0 },
|
||||
},
|
||||
{
|
||||
name: "should return a match if anchor positions can be validated",
|
||||
searchStr: "unique line\ncontext line 1\ncontext line 2",
|
||||
content: ["line1", "line2", "unique line", "context line 1", "context line 2", "line 6"],
|
||||
expected: { index: 2, confidence: 1 },
|
||||
},
|
||||
{
|
||||
name: "should return a match with correct index when startIndex is provided",
|
||||
searchStr: "unique line\ncontext line 1\ncontext line 2",
|
||||
content: ["line1", "line2", "line3", "unique line", "context line 1", "context line 2", "line 7"],
|
||||
startIndex: 3,
|
||||
expected: { index: 3, confidence: 1 },
|
||||
},
|
||||
{
|
||||
name: "should return a match even if there are more lines in content",
|
||||
searchStr: "unique line\ncontext line 1\ncontext line 2",
|
||||
content: [
|
||||
"line1",
|
||||
"line2",
|
||||
"unique line",
|
||||
"context line 1",
|
||||
"context line 2",
|
||||
"line 6",
|
||||
"extra line 1",
|
||||
"extra line 2",
|
||||
],
|
||||
expected: { index: 2, confidence: 1 },
|
||||
},
|
||||
{
|
||||
name: "should return a match even if the anchor is at the beginning of the content",
|
||||
searchStr: "unique line\ncontext line 1\ncontext line 2",
|
||||
content: ["unique line", "context line 1", "context line 2", "line 6"],
|
||||
expected: { index: 0, confidence: 1 },
|
||||
},
|
||||
{
|
||||
name: "should return a match even if the anchor is at the end of the content",
|
||||
searchStr: "unique line\ncontext line 1\ncontext line 2",
|
||||
content: ["line1", "line2", "unique line", "context line 1", "context line 2"],
|
||||
expected: { index: 2, confidence: 1 },
|
||||
},
|
||||
{
|
||||
name: "should return no match if no valid anchor is found",
|
||||
searchStr: "non-unique line\ncontext line 1\ncontext line 2",
|
||||
content: ["line1", "line2", "non-unique line", "context line 1", "context line 2", "non-unique line"],
|
||||
expected: { index: -1, confidence: 0 },
|
||||
},
|
||||
]
|
||||
|
||||
anchorTestCases.forEach(({ name, searchStr, content, startIndex, expected }) => {
|
||||
it(name, () => {
|
||||
const result = findAnchorMatch(searchStr, content, startIndex)
|
||||
expect(result.index).toBe(expected.index)
|
||||
expect(result.confidence).toBeGreaterThanOrEqual(expected.confidence)
|
||||
expect(result.strategy).toBe("anchor")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("findSimilarityMatch", () => {
|
||||
testCases.forEach(({ name, searchStr, content, startIndex, expected, strategies }) => {
|
||||
if (!strategies?.includes("similarity")) {
|
||||
return
|
||||
}
|
||||
it(name, () => {
|
||||
const result = findSimilarityMatch(searchStr, content, startIndex)
|
||||
expect(result.index).toBe(expected.index)
|
||||
expect(result.confidence).toBeGreaterThanOrEqual(expected.confidence)
|
||||
expect(result.strategy).toBe("similarity")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("findLevenshteinMatch", () => {
|
||||
testCases.forEach(({ name, searchStr, content, startIndex, expected, strategies }) => {
|
||||
if (!strategies?.includes("levenshtein")) {
|
||||
return
|
||||
}
|
||||
it(name, () => {
|
||||
const result = findLevenshteinMatch(searchStr, content, startIndex)
|
||||
expect(result.index).toBe(expected.index)
|
||||
expect(result.confidence).toBeGreaterThanOrEqual(expected.confidence)
|
||||
expect(result.strategy).toBe("levenshtein")
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -157,7 +157,7 @@ export async function applyGitFallback(app: App, hunk: Hunk, content: string[]):
|
||||
const vaultBasePath = adapter.getBasePath();
|
||||
const tmpGitPath = normalizePath(path.join(vaultBasePath, ".tmp_git"));
|
||||
|
||||
console.log("tmpGitPath", tmpGitPath)
|
||||
// console.log("tmpGitPath", tmpGitPath)
|
||||
|
||||
try {
|
||||
const exists = await adapter.exists(tmpGitPath);
|
||||
@ -190,22 +190,22 @@ export async function applyGitFallback(app: App, hunk: Hunk, content: string[]):
|
||||
fs.writeFileSync(filePath, originalText)
|
||||
await git.add("file.txt")
|
||||
const originalCommit = await git.commit("original")
|
||||
console.log("Strategy 1 - Original commit:", originalCommit.commit)
|
||||
// console.log("Strategy 1 - Original commit:", originalCommit.commit)
|
||||
|
||||
fs.writeFileSync(filePath, searchText)
|
||||
await git.add("file.txt")
|
||||
const searchCommit1 = await git.commit("search")
|
||||
console.log("Strategy 1 - Search commit:", searchCommit1.commit)
|
||||
// const searchCommit1 = await git.commit("search")
|
||||
// console.log("Strategy 1 - Search commit:", searchCommit1.commit)
|
||||
|
||||
fs.writeFileSync(filePath, replaceText)
|
||||
await git.add("file.txt")
|
||||
const replaceCommit = await git.commit("replace")
|
||||
console.log("Strategy 1 - Replace commit:", replaceCommit.commit)
|
||||
// console.log("Strategy 1 - Replace commit:", replaceCommit.commit)
|
||||
|
||||
console.log("Strategy 1 - Attempting checkout of:", originalCommit.commit)
|
||||
// console.log("Strategy 1 - Attempting checkout of:", originalCommit.commit)
|
||||
await git.raw(["checkout", originalCommit.commit])
|
||||
try {
|
||||
console.log("Strategy 1 - Attempting cherry-pick of:", replaceCommit.commit)
|
||||
// console.log("Strategy 1 - Attempting cherry-pick of:", replaceCommit.commit)
|
||||
await git.raw(["cherry-pick", "--minimal", replaceCommit.commit])
|
||||
|
||||
const newText = fs.readFileSync(filePath, "utf-8")
|
||||
@ -231,23 +231,23 @@ export async function applyGitFallback(app: App, hunk: Hunk, content: string[]):
|
||||
await git.add("file.txt")
|
||||
const searchCommit = await git.commit("search")
|
||||
const searchHash = searchCommit.commit.replace(/^HEAD /, "")
|
||||
console.log("Strategy 2 - Search commit:", searchHash)
|
||||
// console.log("Strategy 2 - Search commit:", searchHash)
|
||||
|
||||
fs.writeFileSync(filePath, replaceText)
|
||||
await git.add("file.txt")
|
||||
const replaceCommit = await git.commit("replace")
|
||||
const replaceHash = replaceCommit.commit.replace(/^HEAD /, "")
|
||||
console.log("Strategy 2 - Replace commit:", replaceHash)
|
||||
// console.log("Strategy 2 - Replace commit:", replaceHash)
|
||||
|
||||
console.log("Strategy 2 - Attempting checkout of:", searchHash)
|
||||
// console.log("Strategy 2 - Attempting checkout of:", searchHash)
|
||||
await git.raw(["checkout", searchHash])
|
||||
fs.writeFileSync(filePath, originalText)
|
||||
await git.add("file.txt")
|
||||
const originalCommit2 = await git.commit("original")
|
||||
console.log("Strategy 2 - Original commit:", originalCommit2.commit)
|
||||
// const originalCommit2 = await git.commit("original")
|
||||
// console.log("Strategy 2 - Original commit:", originalCommit2.commit)
|
||||
|
||||
try {
|
||||
console.log("Strategy 2 - Attempting cherry-pick of:", replaceHash)
|
||||
// console.log("Strategy 2 - Attempting cherry-pick of:", replaceHash)
|
||||
await git.raw(["cherry-pick", "--minimal", replaceHash])
|
||||
|
||||
const newText = fs.readFileSync(filePath, "utf-8")
|
||||
@ -287,7 +287,7 @@ export async function applyEdit(
|
||||
): Promise<EditResult> {
|
||||
// Don't attempt regular edits if confidence is too low
|
||||
if (confidence < confidenceThreshold) {
|
||||
console.log(
|
||||
console.warn(
|
||||
`Search confidence (${confidence}) below minimum threshold (${confidenceThreshold}), trying git fallback...`,
|
||||
)
|
||||
return applyGitFallback(app, hunk, content)
|
||||
|
||||
@ -27,7 +27,6 @@ export class NewUnifiedDiffStrategy implements DiffStrategy {
|
||||
private parseUnifiedDiff(diff: string): Diff {
|
||||
const MAX_CONTEXT_LINES = 6 // Number of context lines to keep before/after changes
|
||||
const lines = diff.split("\n")
|
||||
// console.log("lines: ", lines)
|
||||
const hunks: Hunk[] = []
|
||||
let currentHunk: Hunk | null = null
|
||||
|
||||
@ -269,7 +268,7 @@ Your diff here
|
||||
strategy,
|
||||
} = findBestMatch(contextStr, result, 0, this.confidenceThreshold)
|
||||
if (confidence < this.confidenceThreshold) {
|
||||
console.log("Full hunk application failed, trying sub-hunks strategy")
|
||||
console.warn("Full hunk application failed, trying sub-hunks strategy")
|
||||
// Try splitting the hunk into smaller hunks
|
||||
const subHunks = this.splitHunk(hunk)
|
||||
let subHunkSuccess = true
|
||||
|
||||
@ -199,16 +199,12 @@ export function findExactMatch(
|
||||
startIndex: number = 0,
|
||||
confidenceThreshold: number = 0.97,
|
||||
): SearchResult {
|
||||
// console.log("searchStr: ", searchStr)
|
||||
// console.log("content: ", content)
|
||||
const searchLines = searchStr.split("\n")
|
||||
const windows = createOverlappingWindows(content.slice(startIndex), searchLines.length)
|
||||
const matches: (SearchResult & { windowIndex: number })[] = []
|
||||
|
||||
windows.forEach((windowData, windowIndex) => {
|
||||
const windowStr = windowData.window.join("\n")
|
||||
// console.log("searchStr: ", searchStr)
|
||||
// console.log("windowStr:", windowStr)
|
||||
const exactMatch = windowStr.indexOf(searchStr)
|
||||
|
||||
if (exactMatch !== -1) {
|
||||
@ -404,18 +400,10 @@ export function findBestMatch(
|
||||
|
||||
for (const strategy of strategies) {
|
||||
const result = strategy(searchStr, content, startIndex, confidenceThreshold)
|
||||
if (searchStr === "由于年久失修,街区路面坑洼不平,污水横流,垃圾遍地,甚至可见弹痕血迹。") {
|
||||
console.log("findBestMatch result: ", strategy.name, result)
|
||||
}
|
||||
if (result.confidence > bestResult.confidence) {
|
||||
bestResult = result
|
||||
}
|
||||
}
|
||||
// if (bestResult.confidence < 0.97) {
|
||||
// console.log("searchStr: ", searchStr)
|
||||
// console.log("content: ", content)
|
||||
// console.log("findBestMatch result: ", bestResult)
|
||||
// }
|
||||
|
||||
return bestResult
|
||||
}
|
||||
|
||||
@ -60,7 +60,6 @@ export class GeminiProvider implements BaseLLMProvider {
|
||||
: undefined
|
||||
|
||||
try {
|
||||
console.log(request)
|
||||
const model = this.client.getGenerativeModel({
|
||||
model: request.model,
|
||||
generationConfig: {
|
||||
|
||||
@ -1,136 +0,0 @@
|
||||
import { Anthropic } from "@anthropic-ai/sdk"
|
||||
import * as diff from "diff"
|
||||
import * as path from "path"
|
||||
|
||||
export const formatResponse = {
|
||||
toolDenied: () => `The user denied this operation.`,
|
||||
|
||||
toolDeniedWithFeedback: (feedback?: string) =>
|
||||
`The user denied this operation and provided the following feedback:\n<feedback>\n${feedback}\n</feedback>`,
|
||||
|
||||
toolApprovedWithFeedback: (feedback?: string) =>
|
||||
`The user approved this operation and provided the following context:\n<feedback>\n${feedback}\n</feedback>`,
|
||||
|
||||
toolError: (error?: string) => `The tool execution failed with the following error:\n<error>\n${error}\n</error>`,
|
||||
|
||||
noToolsUsed: () =>
|
||||
`[ERROR] You did not use a tool in your previous response! Please retry with a tool use.
|
||||
|
||||
${toolUseInstructionsReminder}
|
||||
|
||||
# Next Steps
|
||||
|
||||
If you have completed the user's task, use the attempt_completion tool.
|
||||
If you require additional information from the user, use the ask_followup_question tool.
|
||||
Otherwise, if you have not completed the task and do not need additional information, then proceed with the next step of the task.
|
||||
(This is an automated message, so do not respond to it conversationally.)`,
|
||||
|
||||
tooManyMistakes: (feedback?: string) =>
|
||||
`You seem to be having trouble proceeding. The user has provided the following feedback to help guide you:\n<feedback>\n${feedback}\n</feedback>`,
|
||||
|
||||
missingToolParameterError: (paramName: string) =>
|
||||
`Missing value for required parameter '${paramName}'. Please retry with complete response.\n\n${toolUseInstructionsReminder}`,
|
||||
|
||||
invalidMcpToolArgumentError: (serverName: string, toolName: string) =>
|
||||
`Invalid JSON argument used with ${serverName} for ${toolName}. Please retry with a properly formatted JSON argument.`,
|
||||
|
||||
toolResult: (
|
||||
text: string,
|
||||
images?: string[],
|
||||
): string | Array<Anthropic.TextBlockParam | Anthropic.ImageBlockParam> => {
|
||||
if (images && images.length > 0) {
|
||||
const textBlock: Anthropic.TextBlockParam = { type: "text", text }
|
||||
const imageBlocks: Anthropic.ImageBlockParam[] = formatImagesIntoBlocks(images)
|
||||
// Placing images after text leads to better results
|
||||
return [textBlock, ...imageBlocks]
|
||||
} else {
|
||||
return text
|
||||
}
|
||||
},
|
||||
|
||||
imageBlocks: (images?: string[]): Anthropic.ImageBlockParam[] => {
|
||||
return formatImagesIntoBlocks(images)
|
||||
},
|
||||
|
||||
formatFilesList: (absolutePath: string, files: string[], didHitLimit: boolean): string => {
|
||||
const sorted = files
|
||||
.map((file) => {
|
||||
// convert absolute path to relative path
|
||||
const relativePath = path.relative(absolutePath, file).toPosix()
|
||||
return file.endsWith("/") ? relativePath + "/" : relativePath
|
||||
})
|
||||
// Sort so files are listed under their respective directories to make it clear what files are children of what directories. Since we build file list top down, even if file list is truncated it will show directories that cline can then explore further.
|
||||
.sort((a, b) => {
|
||||
const aParts = a.split("/") // only works if we use toPosix first
|
||||
const bParts = b.split("/")
|
||||
for (let i = 0; i < Math.min(aParts.length, bParts.length); i++) {
|
||||
if (aParts[i] !== bParts[i]) {
|
||||
// If one is a directory and the other isn't at this level, sort the directory first
|
||||
if (i + 1 === aParts.length && i + 1 < bParts.length) {
|
||||
return -1
|
||||
}
|
||||
if (i + 1 === bParts.length && i + 1 < aParts.length) {
|
||||
return 1
|
||||
}
|
||||
// Otherwise, sort alphabetically
|
||||
return aParts[i].localeCompare(bParts[i], undefined, { numeric: true, sensitivity: "base" })
|
||||
}
|
||||
}
|
||||
// If all parts are the same up to the length of the shorter path,
|
||||
// the shorter one comes first
|
||||
return aParts.length - bParts.length
|
||||
})
|
||||
if (didHitLimit) {
|
||||
return `${sorted.join(
|
||||
"\n",
|
||||
)}\n\n(File list truncated. Use list_files on specific subdirectories if you need to explore further.)`
|
||||
} else if (sorted.length === 0 || (sorted.length === 1 && sorted[0] === "")) {
|
||||
return "No files found."
|
||||
} else {
|
||||
return sorted.join("\n")
|
||||
}
|
||||
},
|
||||
|
||||
createPrettyPatch: (filename = "file", oldStr?: string, newStr?: string) => {
|
||||
// strings cannot be undefined or diff throws exception
|
||||
const patch = diff.createPatch(filename.toPosix(), oldStr || "", newStr || "")
|
||||
const lines = patch.split("\n")
|
||||
const prettyPatchLines = lines.slice(4)
|
||||
return prettyPatchLines.join("\n")
|
||||
},
|
||||
}
|
||||
|
||||
// to avoid circular dependency
|
||||
const formatImagesIntoBlocks = (images?: string[]): Anthropic.ImageBlockParam[] => {
|
||||
return images
|
||||
? images.map((dataUrl) => {
|
||||
// data:image/png;base64,base64string
|
||||
const [rest, base64] = dataUrl.split(",")
|
||||
const mimeType = rest.split(":")[1].split(";")[0]
|
||||
return {
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: mimeType, data: base64 },
|
||||
} as Anthropic.ImageBlockParam
|
||||
})
|
||||
: []
|
||||
}
|
||||
|
||||
const toolUseInstructionsReminder = `# Reminder: Instructions for Tool Use
|
||||
|
||||
Tool uses are formatted using XML-style tags. The tool name is enclosed in opening and closing tags, and each parameter is similarly enclosed within its own set of tags. Here's the structure:
|
||||
|
||||
<tool_name>
|
||||
<parameter1_name>value1</parameter1_name>
|
||||
<parameter2_name>value2</parameter2_name>
|
||||
...
|
||||
</tool_name>
|
||||
|
||||
For example:
|
||||
|
||||
<attempt_completion>
|
||||
<result>
|
||||
I have completed the task...
|
||||
</result>
|
||||
</attempt_completion>
|
||||
|
||||
Always adhere to this format for all tool uses to ensure proper parsing and execution.`
|
||||
@ -1,52 +0,0 @@
|
||||
import { ToolArgs } from "./types"
|
||||
|
||||
export function getBrowserActionDescription(args: ToolArgs): string | undefined {
|
||||
if (!args.supportsComputerUse) {
|
||||
return undefined
|
||||
}
|
||||
return `## browser_action
|
||||
Description: Request to interact with a Puppeteer-controlled browser. Use this tool for research, information gathering, citation verification, or content reference when writing. Every action, except \`close\`, will be responded to with a screenshot of the browser's current state, along with any new console logs. You may only perform one browser action per message, and wait for the user's response including a screenshot and logs to determine the next action.
|
||||
- The sequence of actions **must always start with** launching the browser at a URL, and **must always end with** closing the browser. If you need to visit a new URL that is not possible to navigate to from the current webpage, you must first close the browser, then launch again at the new URL.
|
||||
- While the browser is active, only the \`browser_action\` tool can be used. No other tools should be called during this time. You may proceed to use other tools only after closing the browser. For example if you need to save research findings to a document, you must close the browser, then use other tools to write the information to files.
|
||||
- The browser window has a resolution of **${args.browserViewportSize}** pixels. When performing any click actions, ensure the coordinates are within this resolution range.
|
||||
- Before clicking on any elements such as icons, links, or buttons, you must consult the provided screenshot of the page to determine the coordinates of the element. The click should be targeted at the **center of the element**, not on its edges.
|
||||
Parameters:
|
||||
- action: (required) The action to perform. The available actions are:
|
||||
* launch: Launch a new Puppeteer-controlled browser instance at the specified URL. This **must always be the first action**.
|
||||
- Use with the \`url\` parameter to provide the URL.
|
||||
- Ensure the URL is valid and includes the appropriate protocol (e.g. https://en.wikipedia.org/wiki/Writing, https://scholar.google.com, etc.)
|
||||
* click: Click at a specific x,y coordinate.
|
||||
- Use with the \`coordinate\` parameter to specify the location.
|
||||
- Always click in the center of an element (icon, button, link, etc.) based on coordinates derived from a screenshot.
|
||||
* type: Type a string of text on the keyboard. You might use this after clicking on a text field to input text.
|
||||
- Use with the \`text\` parameter to provide the string to type.
|
||||
* scroll_down: Scroll down the page by one page height.
|
||||
* scroll_up: Scroll up the page by one page height.
|
||||
* close: Close the Puppeteer-controlled browser instance. This **must always be the final browser action**.
|
||||
- Example: \`<action>close</action>\`
|
||||
- url: (optional) Use this for providing the URL for the \`launch\` action.
|
||||
* Example: <url>https://en.wikipedia.org/wiki/Writing</url>
|
||||
- coordinate: (optional) The X and Y coordinates for the \`click\` action. Coordinates should be within the **${args.browserViewportSize}** resolution.
|
||||
* Example: <coordinate>450,300</coordinate>
|
||||
- text: (optional) Use this for providing the text for the \`type\` action.
|
||||
* Example: <text>academic writing research</text>
|
||||
Usage:
|
||||
<browser_action>
|
||||
<action>Action to perform (e.g., launch, click, type, scroll_down, scroll_up, close)</action>
|
||||
<url>URL to launch the browser at (optional)</url>
|
||||
<coordinate>x,y coordinates (optional)</coordinate>
|
||||
<text>Text to type (optional)</text>
|
||||
</browser_action>
|
||||
|
||||
Example: Requesting to launch a browser at a research resource
|
||||
<browser_action>
|
||||
<action>launch</action>
|
||||
<url>https://scholar.google.com</url>
|
||||
</browser_action>
|
||||
|
||||
Example: Requesting to type a search query
|
||||
<browser_action>
|
||||
<action>type</action>
|
||||
<text>academic writing styles comparison</text>
|
||||
</browser_action>`
|
||||
}
|
||||
@ -1,17 +0,0 @@
|
||||
import { ToolArgs } from "./types"
|
||||
|
||||
export function getExecuteCommandDescription(args: ToolArgs): string | undefined {
|
||||
return `## execute_command
|
||||
Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: ${args.cwd}
|
||||
Parameters:
|
||||
- command: (required) The CLI command to execute. This should be valid for the current operating system. Ensure the command is properly formatted and does not contain any harmful instructions.
|
||||
Usage:
|
||||
<execute_command>
|
||||
<command>Your command here</command>
|
||||
</execute_command>
|
||||
|
||||
Example: Requesting to convert a markdown file to PDF using pandoc
|
||||
<execute_command>
|
||||
<command>pandoc document.md -o document.pdf</command>
|
||||
</execute_command>`
|
||||
}
|
||||
@ -5,8 +5,6 @@ import { McpHub } from "../../mcp/McpHub"
|
||||
import { getAccessMcpResourceDescription } from "./access-mcp-resource"
|
||||
import { getAskFollowupQuestionDescription } from "./ask-followup-question"
|
||||
import { getAttemptCompletionDescription } from "./attempt-completion"
|
||||
import { getBrowserActionDescription } from "./browser-action"
|
||||
import { getExecuteCommandDescription } from "./execute-command"
|
||||
import { getFetchUrlsContentDescription } from "./fetch-url-content"
|
||||
import { getInsertContentDescription } from "./insert-content"
|
||||
import { getListFilesDescription } from "./list-files"
|
||||
@ -22,7 +20,6 @@ import { getWriteToFileDescription } from "./write-to-file"
|
||||
|
||||
// Map of tool names to their description functions
|
||||
const toolDescriptionMap: Record<string, (args: ToolArgs) => string | undefined> = {
|
||||
execute_command: (args) => getExecuteCommandDescription(args),
|
||||
read_file: (args) => getReadFileDescription(args),
|
||||
write_to_file: (args) => getWriteToFileDescription(args),
|
||||
search_files: (args) => getSearchFilesDescription(args),
|
||||
@ -41,7 +38,7 @@ const toolDescriptionMap: Record<string, (args: ToolArgs) => string | undefined>
|
||||
export function getToolDescriptionsForMode(
|
||||
mode: Mode,
|
||||
cwd: string,
|
||||
searchTool: string,
|
||||
searchTool: string,
|
||||
supportsComputerUse: boolean,
|
||||
diffStrategy?: DiffStrategy,
|
||||
browserViewportSize?: string,
|
||||
@ -95,8 +92,6 @@ export function getToolDescriptionsForMode(
|
||||
|
||||
// Export individual description functions for backward compatibility
|
||||
export {
|
||||
getAccessMcpResourceDescription, getAskFollowupQuestionDescription,
|
||||
getAttemptCompletionDescription, getBrowserActionDescription, getExecuteCommandDescription, getInsertContentDescription,
|
||||
getListFilesDescription, getReadFileDescription, getSearchAndReplaceDescription, getSearchFilesDescription, getSwitchModeDescription, getUseMcpToolDescription, getWriteToFileDescription
|
||||
getAccessMcpResourceDescription, getAskFollowupQuestionDescription, getAttemptCompletionDescription, getInsertContentDescription, getListFilesDescription, getReadFileDescription, getSearchAndReplaceDescription, getSearchFilesDescription, getSwitchModeDescription, getUseMcpToolDescription, getWriteToFileDescription
|
||||
}
|
||||
|
||||
|
||||
@ -65,34 +65,33 @@ export class DBManager {
|
||||
// })
|
||||
// }
|
||||
|
||||
private async loadExistingDatabase() {
|
||||
try {
|
||||
const databaseFileExists = await this.app.vault.adapter.exists(
|
||||
this.dbPath,
|
||||
)
|
||||
if (!databaseFileExists) {
|
||||
return null
|
||||
}
|
||||
const fileBuffer = await this.app.vault.adapter.readBinary(this.dbPath)
|
||||
const fileBlob = new Blob([fileBuffer], { type: 'application/x-gzip' })
|
||||
const { fsBundle, wasmModule, vectorExtensionBundlePath } =
|
||||
await this.loadPGliteResources()
|
||||
this.db = await PGlite.create({
|
||||
loadDataDir: fileBlob,
|
||||
fsBundle: fsBundle,
|
||||
wasmModule: wasmModule,
|
||||
extensions: {
|
||||
vector: vectorExtensionBundlePath,
|
||||
live
|
||||
},
|
||||
})
|
||||
// return drizzle(this.pgClient)
|
||||
} catch (error) {
|
||||
console.error('Error loading database:', error)
|
||||
console.log(this.dbPath)
|
||||
return null
|
||||
}
|
||||
}
|
||||
// private async loadExistingDatabase() {
|
||||
// try {
|
||||
// const databaseFileExists = await this.app.vault.adapter.exists(
|
||||
// this.dbPath,
|
||||
// )
|
||||
// if (!databaseFileExists) {
|
||||
// return null
|
||||
// }
|
||||
// const fileBuffer = await this.app.vault.adapter.readBinary(this.dbPath)
|
||||
// const fileBlob = new Blob([fileBuffer], { type: 'application/x-gzip' })
|
||||
// const { fsBundle, wasmModule, vectorExtensionBundlePath } =
|
||||
// await this.loadPGliteResources()
|
||||
// this.db = await PGlite.create({
|
||||
// loadDataDir: fileBlob,
|
||||
// fsBundle: fsBundle,
|
||||
// wasmModule: wasmModule,
|
||||
// extensions: {
|
||||
// vector: vectorExtensionBundlePath,
|
||||
// live
|
||||
// },
|
||||
// })
|
||||
// // return drizzle(this.pgClient)
|
||||
// } catch (error) {
|
||||
// console.error('Error loading database:', error)
|
||||
// return null
|
||||
// }
|
||||
// }
|
||||
|
||||
// private async migrateDatabase(): Promise<void> {
|
||||
// if (!this.db) {
|
||||
@ -115,7 +114,7 @@ export class DBManager {
|
||||
// }
|
||||
|
||||
async save(): Promise<void> {
|
||||
console.log("need remove")
|
||||
console.warn("need remove")
|
||||
}
|
||||
|
||||
async cleanup() {
|
||||
|
||||
@ -162,10 +162,6 @@ export class VectorRepository {
|
||||
}
|
||||
}
|
||||
|
||||
const queryVectorLength = `SELECT count(1) FROM "${tableName}"`;
|
||||
const queryVectorLengthResult = await this.db.query(queryVectorLength)
|
||||
console.log('queryVectorLengthResult, ', queryVectorLengthResult)
|
||||
|
||||
const query = `
|
||||
SELECT
|
||||
id, path, mtime, content, metadata,
|
||||
|
||||
@ -550,7 +550,7 @@ export function parseMsgBlocks(
|
||||
urls = parsedUrls
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to parse URLs JSON', error)
|
||||
// console.error('Failed to parse URLs JSON', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -32,7 +32,7 @@ function cosineSimilarity(vecA: number[], vecB: number[]): number {
|
||||
async function serperSearch(query: string, serperApiKey: string, serperSearchEngine: string): Promise<SearchResult[]> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const url = `${SERPER_BASE_URL}?q=${encodeURIComponent(query)}&engine=${serperSearchEngine}&api_key=${serperApiKey}&num=20`;
|
||||
|
||||
console.log("serper search url: ", url)
|
||||
https.get(url, (res: any) => {
|
||||
let data = '';
|
||||
|
||||
@ -67,6 +67,7 @@ async function serperSearch(query: string, serperApiKey: string, serperSearchEng
|
||||
}
|
||||
});
|
||||
}).on('error', (error: Error) => {
|
||||
console.error("serper search error: ", error)
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
@ -195,8 +196,8 @@ export async function fetchUrlContent(url: string, apiKey: string): Promise<stri
|
||||
export async function webSearch(
|
||||
query: string,
|
||||
serperApiKey: string,
|
||||
jinaApiKey: string,
|
||||
serperSearchEngine: string,
|
||||
jinaApiKey: string,
|
||||
ragEngine: RAGEngine
|
||||
): Promise<string> {
|
||||
try {
|
||||
|
||||
36
styles.css
36
styles.css
@ -1835,3 +1835,39 @@ button.infio-chat-input-model-select {
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.infio-chat-code-block-url-list {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.infio-chat-code-block-url-list li {
|
||||
padding: 8px 16px;
|
||||
border-bottom: 1px solid var(--background-modifier-border);
|
||||
}
|
||||
|
||||
.infio-chat-code-block-url-list li:last-child {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
.infio-chat-code-block-url-link {
|
||||
color: var(--text-accent);
|
||||
text-decoration: none;
|
||||
word-break: break-all;
|
||||
}
|
||||
|
||||
.infio-chat-code-block-url-link:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.infio-chat-code-block-status-button {
|
||||
color: #008000;
|
||||
background: none;
|
||||
border: none;
|
||||
padding: 4px 8px;
|
||||
cursor: default;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user