Skip to content

Commit

Permalink
adding autoedits support (#5845)
Browse files Browse the repository at this point in the history
## Context
The PR enables a basic setup for `auto-edits`. The goal of the PR is for
us to check the model quality by manually triggering the `auto-edits`
and have a vibe check. The actual integration of next edits should be
done with the existing autocomplete infra. So, this PR is supposed to
live behind a feature flag just for the internal testing and model
iterations.

1. The PR takes 4 lines surrounding the cursor and than propose a diff
for those 4 lines.
2. The current implementation uses a fine-tuned gpt-4o-mini model. The
name of the model is:
`ft:gpt-4o-mini-2024-07-18:sourcegraph-production::AFXNjNiC`

## Steps to run the autoedits (in debug mode):
1. Set the following following setting in the vscode config
```
"cody.experimental.autoedits": {
    "provider": "openai",
    "model": "ft:gpt-4o-mini-2024-07-18:sourcegraph-production::AGgXey7l",
    "apiKey": "<openai_token>",
    "tokenLimit": {
      "prefixTokens": 2500,
      "suffixTokens": 2500,
      "maxPrefixLinesInArea": 12,
      "maxSuffixLinesInArea": 5,
      "codeToRewritePrefixLines": 2, 
      "codeToRewriteSuffixLines": 3,
      "contextSpecificTokenLimit": {
        "recent-view-port": 1000,
        "diagnostics": 1000,
        "recent-copy": 1000,
        "jaccard-similarity": 1000,
        "recent-edits": 1000
      }
    }
  },
```
2. Press `ctrl+shift+enter` to trigger the 
3. The debug console `Cody by Sourcegraph` will show the diff in the
console.
4. The suggestion will also be shown on the UI as a `ghost text` and we
can press `tab` to apply the changes and `escape` to reject the changes.


## Test plan
Updated CI checks and manual testing. Please see a demo below


https://github.com/user-attachments/assets/7c1cf50d-c1fa-48bc-a71e-364e184404ce

---------

Co-authored-by: Beatrix <[email protected]>
  • Loading branch information
hitesh-1997 and abeatrix authored Oct 16, 2024
1 parent f84a318 commit 9b9f64c
Show file tree
Hide file tree
Showing 26 changed files with 1,289 additions and 85 deletions.
18 changes: 18 additions & 0 deletions lib/shared/src/configuration.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,23 @@ export interface AuthCredentials {
tokenSource?: TokenSource | undefined
}

export interface AutoEditsTokenLimit {
prefixTokens: number
suffixTokens: number
maxPrefixLinesInArea: number
maxSuffixLinesInArea: number
codeToRewritePrefixLines: number
codeToRewriteSuffixLines: number
contextSpecificTokenLimit: Record<string, number>
}

export interface AutoEditsModelConfig {
provider: string
model: string
apiKey: string
tokenLimit: AutoEditsTokenLimit
}

export interface NetConfiguration {
mode?: string | undefined | null
proxy?: {
Expand Down Expand Up @@ -62,6 +79,7 @@ interface RawClientConfiguration {

experimentalTracing: boolean
experimentalSupercompletions: boolean
experimentalAutoedits: AutoEditsModelConfig | undefined
experimentalCommitMessage: boolean
experimentalNoodle: boolean
experimentalMinionAnthropicKey: string | undefined
Expand Down
4 changes: 4 additions & 0 deletions lib/shared/src/prompt/prompt-string.ts
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,10 @@ export class PromptString {
return internal_createPromptString(document.getText(range), [document.uri])
}

public static fromStructuredGitDiff(uri: vscode.Uri, diff: string) {
return internal_createPromptString(diff, [uri])
}

public static fromGitDiff(uri: vscode.Uri, oldContent: string, newContent: string) {
const diff = createGitDiff(displayPath(uri), oldContent, newContent)
return internal_createPromptString(diff, [uri])
Expand Down
15 changes: 15 additions & 0 deletions vscode/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -729,6 +729,21 @@
"args": ["previous"],
"key": "shift+ctrl+up",
"when": "cody.activated && !editorReadonly && cody.hasActionableSupercompletion"
},
{
"command": "cody.supersuggest.accept",
"key": "tab",
"when": "editorTextFocus && cody.activated && cody.supersuggest.active"
},
{
"command": "cody.supersuggest.dismiss",
"key": "escape",
"when": "editorTextFocus && cody.activated && cody.supersuggest.active"
},
{
"command": "cody.experimental.suggest",
"key": "ctrl+shift+enter",
"when": "cody.activated"
}
],
"submenus": [
Expand Down
110 changes: 110 additions & 0 deletions vscode/src/autoedits/autoedits-provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
import {
type AutoEditsTokenLimit,
type DocumentContext,
logDebug,
tokensToChars,
} from '@sourcegraph/cody-shared'
import { Observable } from 'observable-fns'
import * as vscode from 'vscode'
import { ContextMixer } from '../completions/context/context-mixer'
import { DefaultContextStrategyFactory } from '../completions/context/context-strategy'
import { getCurrentDocContext } from '../completions/get-current-doc-context'
import { getConfiguration } from '../configuration'
import type { PromptProvider } from './prompt-provider'
import { DeepSeekPromptProvider } from './providers/deepseek'
import { OpenAIPromptProvider } from './providers/openai'
import { AutoEditsRenderer } from './renderer'

const AUTOEDITS_CONTEXT_STRATEGY = 'auto-edits'

export interface AutoEditsProviderOptions {
document: vscode.TextDocument
position: vscode.Position
}

export class AutoeditsProvider implements vscode.Disposable {
private disposables: vscode.Disposable[] = []
private contextMixer: ContextMixer = new ContextMixer({
strategyFactory: new DefaultContextStrategyFactory(Observable.of(AUTOEDITS_CONTEXT_STRATEGY)),
dataCollectionEnabled: false,
})
private autoEditsTokenLimit: AutoEditsTokenLimit | undefined
private provider: PromptProvider | undefined
private model: string | undefined
private apiKey: string | undefined
private renderer: AutoEditsRenderer = new AutoEditsRenderer()

constructor() {
const config = getConfiguration().experimentalAutoedits
if (config === undefined) {
logDebug('AutoEdits', 'No Configuration found in the settings')
return
}
this.initizlizePromptProvider(config.provider)
this.autoEditsTokenLimit = config.tokenLimit as AutoEditsTokenLimit
this.model = config.model
this.apiKey = config.apiKey
this.disposables.push(
this.contextMixer,
this.renderer,
vscode.commands.registerCommand('cody.experimental.suggest', () => this.getAutoedit())
)
}

private initizlizePromptProvider(provider: string) {
if (provider === 'openai') {
this.provider = new OpenAIPromptProvider()
} else if (provider === 'deepseek') {
this.provider = new DeepSeekPromptProvider()
} else {
logDebug('AutoEdits', `provider ${provider} not supported`)
}
}

public getAutoedit() {
this.predictAutoeditAtDocAndPosition({
document: vscode.window.activeTextEditor!.document,
position: vscode.window.activeTextEditor!.selection.active,
})
}

public async predictAutoeditAtDocAndPosition(options: AutoEditsProviderOptions) {
if (!this.provider || !this.autoEditsTokenLimit || !this.model || !this.apiKey) {
logDebug('AutoEdits', 'No Provider or Token Limit found in the settings')
return
}
const start = Date.now()
const docContext = this.getDocContext(options.document, options.position)
const { context } = await this.contextMixer.getContext({
document: options.document,
position: options.position,
docContext: docContext,
maxChars: 100000,
})
const { codeToReplace, promptResponse: prompt } = this.provider.getPrompt(
docContext,
options.document,
context,
this.autoEditsTokenLimit
)
const response = await this.provider.getModelResponse(this.model, this.apiKey, prompt)
const timeToResponse = Date.now() - start
logDebug('AutoEdits: (Time LLM Query):', timeToResponse.toString())
await this.renderer.render(options, codeToReplace, response)
}

private getDocContext(document: vscode.TextDocument, position: vscode.Position): DocumentContext {
return getCurrentDocContext({
document,
position,
maxPrefixLength: tokensToChars(this.autoEditsTokenLimit?.prefixTokens ?? 0),
maxSuffixLength: tokensToChars(this.autoEditsTokenLimit?.suffixTokens ?? 0),
})
}

public dispose() {
for (const disposable of this.disposables) {
disposable.dispose()
}
}
}
50 changes: 50 additions & 0 deletions vscode/src/autoedits/prompt-provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import type { AutoEditsTokenLimit, PromptString } from '@sourcegraph/cody-shared'
import type * as vscode from 'vscode'
import type {
AutocompleteContextSnippet,
DocumentContext,
} from '../../../lib/shared/src/completions/types'
import type * as utils from './prompt-utils'
export type CompletionsPrompt = PromptString
export type ChatPrompt = {
role: 'system' | 'user' | 'assistant'
content: PromptString
}[]
export type PromptProviderResponse = CompletionsPrompt | ChatPrompt

export interface PromptResponseData {
codeToReplace: utils.CodeToReplaceData
promptResponse: PromptProviderResponse
}

export interface PromptProvider {
getPrompt(
docContext: DocumentContext,
document: vscode.TextDocument,
context: AutocompleteContextSnippet[],
tokenBudget: AutoEditsTokenLimit
): PromptResponseData

postProcessResponse(completion: string | null): string

getModelResponse(model: string, apiKey: string, prompt: PromptProviderResponse): Promise<string>
}

export async function getModelResponse(url: string, body: string, apiKey: string): Promise<any> {
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
body: body,
})
if (response.status !== 200) {
const errorText = await response.text()
throw new Error(`HTTP error! status: ${response.status}, message: ${errorText}`)
}
const data = await response.json()
return data
}

// ################################################################################################################
Loading

0 comments on commit 9b9f64c

Please sign in to comment.