Skip to content

Commit

Permalink
v1 of user expertise
Browse files Browse the repository at this point in the history
  • Loading branch information
PrashamTrivedi committed Aug 12, 2024
1 parent 205cdb1 commit d7ef82e
Show file tree
Hide file tree
Showing 5 changed files with 76 additions and 45 deletions.
40 changes: 21 additions & 19 deletions commands/analyse.mts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import {handler as setExpertiseHandler} from './setExpertise.mjs'




// Export these functions for testing
export {
analyseDirectoryStructure,
Expand Down Expand Up @@ -78,6 +79,7 @@ export async function handler(argv: Arguments) {
}
const config = readConfig()
const rootDir = config.ANALYSIS_DIR
const userExpertise = JSON.stringify(config.userExpertise)

const projectName = argv.path as string

Expand All @@ -101,7 +103,7 @@ export async function handler(argv: Arguments) {


const {directoryInferrence, directoryStructureWithContent} = await analyseDirectoryStructure(path, isVerbose, isRoot,
dirToWriteAnalysis, useOpenAi, isProjectRoot, ignore, llmInterface)
dirToWriteAnalysis, useOpenAi, isProjectRoot, ignore, llmInterface, userExpertise)

if (isVerbose) {
console.log({project: argv.path, directoryInferrence})
Expand All @@ -113,11 +115,11 @@ export async function handler(argv: Arguments) {
// console.log(`Analysing ${chalk.redBright(projectName)}'s file structure to getting started.`)
if (!directoryInferrence.isMonorepo) {

await inferDependenciesAndWriteAnalysis(sourceCodePath, directoryInferrence, useOpenAi, allowStreaming, isVerbose, dirToWriteAnalysis, isProjectRoot, llmInterface)
await inferDependenciesAndWriteAnalysis(sourceCodePath, directoryInferrence, useOpenAi, allowStreaming, isVerbose, dirToWriteAnalysis, isProjectRoot, llmInterface, userExpertise)
const directoryStructureWithoutLockFile = await getDirectoryWithoutLockfile(directoryInferrence, directoryStructureWithContent, isVerbose)

await analyzeCode(directoryStructureWithoutLockFile, useOpenAi, allowStreaming, isVerbose,
dirToWriteAnalysis, isProjectRoot, llmInterface)
dirToWriteAnalysis, isProjectRoot, llmInterface, userExpertise)
} else {

if (isVerbose) {
Expand All @@ -132,14 +134,14 @@ export async function handler(argv: Arguments) {
try {

const {directoryInferrence, directoryStructureWithContent} = await analyseDirectoryStructure(sourceCodePath, isVerbose,
false, analysisRootDir, useOpenAi, isProjectRoot, ignore, llmInterface)
false, analysisRootDir, useOpenAi, isProjectRoot, ignore, llmInterface, userExpertise)

await inferDependenciesAndWriteAnalysis(sourceCodePath, directoryInferrence, useOpenAi, allowStreaming,
isVerbose, analysisRootDir, isProjectRoot, llmInterface)
isVerbose, analysisRootDir, isProjectRoot, llmInterface, userExpertise)

const directoryStructureWithoutLockFile = await getDirectoryWithoutLockfile(directory, directoryStructureWithContent, isVerbose)
await analyzeCode(directoryStructureWithoutLockFile, useOpenAi, allowStreaming, isVerbose, analysisRootDir,
isProjectRoot, llmInterface)
isProjectRoot, llmInterface, userExpertise)
} catch (error) {
const errorAnalysisSkipped = `Error analysing ${directory}: Moving on to next directory...`
console.error(errorAnalysisSkipped)
Expand All @@ -163,7 +165,7 @@ export async function handler(argv: Arguments) {

async function analyseDirectoryStructure(path: string, isVerbose: boolean | undefined,
isRoot: boolean, projectName: string, useOpenAi: boolean, isProjectRoot: boolean | undefined,
ignore: string[], llm: LlmInterface) {
ignore: string[], llm: LlmInterface, userExpertise?: string) {
const spinner = ora('Analyzing the directory structure...').start()
const directoryStructureWithContent = await getDirStructure(path, ignore, isVerbose)

Expand All @@ -190,7 +192,7 @@ async function analyseDirectoryStructure(path: string, isVerbose: boolean | unde
}
spinner.text = "Analyzing the project directory for codebase shape..."

const directoryInferrenceResponse = await llm.inferProjectDirectory(JSON.stringify(directoryStructure), useOpenAi, false, isVerbose)
const directoryInferrenceResponse = await llm.inferProjectDirectory(JSON.stringify(directoryStructure), useOpenAi, false, isVerbose, userExpertise)
const directoryInferrence = JSON.parse(directoryInferrenceResponse ?? "")

if (isVerbose) {
Expand All @@ -205,30 +207,30 @@ async function analyseDirectoryStructure(path: string, isVerbose: boolean | unde
}

async function analyzeCode(directoryStructureWithoutLockFile: FileNode, useOpenAi: boolean, allowStreaming: boolean,
isVerbose: boolean, projectName: string, isProjectRoot: boolean, llmInterface: LlmInterface) {
isVerbose: boolean, projectName: string, isProjectRoot: boolean, llmInterface: LlmInterface, userExpertise?: string) {

await analyzeAndWriteCodeInference(directoryStructureWithoutLockFile, useOpenAi, allowStreaming,
isVerbose, projectName, isProjectRoot, llmInterface)
isVerbose, projectName, isProjectRoot, llmInterface, userExpertise)
}

async function analyzeAndWriteCodeInference(directoryStructureWithoutLockFile: FileNode,
useOpenAi: boolean, allowStreaming: boolean, isVerbose: boolean,
projectName: string, isProjectRoot: boolean, llmInterface: LlmInterface) {
projectName: string, isProjectRoot: boolean, llmInterface: LlmInterface, userExpertise?: string) {
let codeInferrenceResponse: string | undefined = await analyzeCodebase(directoryStructureWithoutLockFile, useOpenAi
, allowStreaming, isVerbose, llmInterface)
, allowStreaming, isVerbose, llmInterface, userExpertise)
const interestingCodeResponse: string | undefined = await analyseInterestingCode(directoryStructureWithoutLockFile, useOpenAi,
allowStreaming, isVerbose, llmInterface)
allowStreaming, isVerbose, llmInterface, userExpertise)
// Concatenate the code inferrence and interesting code
codeInferrenceResponse += interestingCodeResponse
writeAnalysis(projectName, "codeInferrence", codeInferrenceResponse, false, isProjectRoot)
}

async function analyseInterestingCode(directoryStructureWithoutLockFile: FileNode,
useOpenAi: boolean, allowStreaming: boolean, isVerbose: boolean, llmInterface: LlmInterface) {
useOpenAi: boolean, allowStreaming: boolean, isVerbose: boolean, llmInterface: LlmInterface, userExpertise?: string) {
const spinner = ora('Analysing interesting code').start()
try {

const interestingCode = await llmInterface.inferInterestingCode(JSON.stringify(directoryStructureWithoutLockFile), useOpenAi, allowStreaming, isVerbose)
const interestingCode = await llmInterface.inferInterestingCode(JSON.stringify(directoryStructureWithoutLockFile), useOpenAi, allowStreaming, isVerbose, userExpertise)
let interestingCodeResponse = ""
if (allowStreaming) {
spinner.stop().clear()
Expand All @@ -254,10 +256,10 @@ async function analyseInterestingCode(directoryStructureWithoutLockFile: FileNod
}

async function analyzeCodebase(directoryStructureWithoutLockFile: FileNode, useOpenAi: boolean,
allowStreaming: boolean, isVerbose: boolean, llmInterface: LlmInterface) {
allowStreaming: boolean, isVerbose: boolean, llmInterface: LlmInterface, userExpertise?: string) {
const spinner = ora('Reading Codebase and inferring code...').start()
try {
const codeInferrence = await llmInterface.inferCode(JSON.stringify(directoryStructureWithoutLockFile), useOpenAi, allowStreaming, isVerbose)
const codeInferrence = await llmInterface.inferCode(JSON.stringify(directoryStructureWithoutLockFile), useOpenAi, allowStreaming, isVerbose, userExpertise)
let codeInferrenceResponse = ""
if (allowStreaming) {
spinner.stop().clear()
Expand Down Expand Up @@ -287,7 +289,7 @@ async function analyzeCodebase(directoryStructureWithoutLockFile: FileNode, useO

async function inferDependenciesAndWriteAnalysis(sourceCodePath: string, directoryInferrence: any,
useOpenAi: boolean, allowStreaming: boolean, isVerbose: boolean,
projectName: string, isProjectRoot: boolean, llmInterface: LlmInterface) {
projectName: string, isProjectRoot: boolean, llmInterface: LlmInterface, userExpertise?: string) {
const spinner = ora('Inferring dependencies...').start()
if (isVerbose) {
console.log({sourceCodePath, projectName, directoryInferrence})
Expand All @@ -297,7 +299,7 @@ async function inferDependenciesAndWriteAnalysis(sourceCodePath: string, directo
return
}
const depenencyFile = fs.readFileSync(`${sourceCodePath}/${directoryInferrence.dependenciesFile}`, 'utf-8')
const dependencyInferrence = await llmInterface.inferDependency(depenencyFile, directoryInferrence.workflow, useOpenAi, allowStreaming, isVerbose)
const dependencyInferrence = await llmInterface.inferDependency(depenencyFile, directoryInferrence.workflow, useOpenAi, allowStreaming, isVerbose, userExpertise)

let dependencyInferrenceResponse = ""
if (allowStreaming) {
Expand Down
4 changes: 2 additions & 2 deletions commands/setExpertise.mts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import {readConfig, writeConfig} from '../utils.mjs'
import chalk from 'chalk'
import {confirm, select} from '@inquirer/prompts'
export const command = 'set-expertise'
export const command = 'setExpertise'

export const describe = 'Set the user expertise level for various programming languages and frameworks'

Expand Down Expand Up @@ -87,4 +87,4 @@ export async function handler() {

export const usage = '$0 <cmd>'

export const aliases = ['expertise', 'skill-level', 'h', 'help']
export const aliases = ['expertise', 'skillLevel', 'profile', 'h', 'help']
4 changes: 3 additions & 1 deletion directoryProcessor.mts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ export async function getDirStructure(dirPath: string, otherIgnorePaths: string[

pathsToIgnore.push(...gitIgnore)
}
console.log({otherIgnorePaths})
if (verbose) {
console.log({otherIgnorePaths})
}
if (otherIgnorePaths.length > 0) {
pathsToIgnore.push(...otherIgnorePaths)
}
Expand Down
58 changes: 37 additions & 21 deletions openai.mts
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ interface Tool {
}

export interface LlmInterface {
inferProjectDirectory(projectDirectory: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean): Promise<string | undefined>
inferDependency(dependencyFile: string, workflow: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean): Promise<string | undefined | Stream<ChatCompletionChunk>>
inferCode(code: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean): Promise<string | undefined | Stream<ChatCompletionChunk>>
inferInterestingCode(code: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean): Promise<string | undefined | Stream<ChatCompletionChunk>>
generateReadme(directoryStructure: string, dependencyInference: string, codeInference: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean): Promise<string | undefined | Stream<ChatCompletionChunk>>
generateMonorepoReadme(monorepoInferrenceInfo: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean): Promise<string | undefined | Stream<ChatCompletionChunk>>
inferProjectDirectory(projectDirectory: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean, userExpertise?: string): Promise<string | undefined>
inferDependency(dependencyFile: string, workflow: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean, userExpertise?: string): Promise<string | undefined | Stream<ChatCompletionChunk>>
inferCode(code: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean, userExpertise?: string): Promise<string | undefined | Stream<ChatCompletionChunk>>
inferInterestingCode(code: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean, userExpertise?: string): Promise<string | undefined | Stream<ChatCompletionChunk>>
generateReadme(directoryStructure: string, dependencyInference: string, codeInference: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean, userExpertise?: string): Promise<string | undefined | Stream<ChatCompletionChunk>>
generateMonorepoReadme(monorepoInferrenceInfo: string, useOpenAi?: boolean, isStreaming?: boolean, isVerbose?: boolean, userExpertise?: string): Promise<string | undefined | Stream<ChatCompletionChunk>>
listModels(isVerbose?: boolean): Promise<string[]>
}

Expand All @@ -54,16 +54,20 @@ export class OpenAIInferrence implements LlmInterface {
return tokens.length
}

private createPrompt(systemPrompt: string, userPrompt: string, isVerbose: boolean): ChatCompletionMessageParam[] {
private createPrompt(systemPrompt: string, userPrompt: string, isVerbose: boolean, userExpertise?: string): ChatCompletionMessageParam[] {
let finalSystemPrompt = systemPrompt;
if (userExpertise) {
finalSystemPrompt += `\n<Expertise>${JSON.stringify(userExpertise)}</Expertise>`;
}
const compatibilityMessage: ChatCompletionMessageParam[] = [{
role: "system",
content: systemPrompt
content: finalSystemPrompt
}, {
role: "user",
content: userPrompt
}]
if (isVerbose) {
console.log(`System Prompt: ${systemPrompt}`)
console.log(`System Prompt: ${finalSystemPrompt}`)
console.log(`User Prompt: ${userPrompt}`)
}
return compatibilityMessage
Expand Down Expand Up @@ -163,15 +167,17 @@ export class OpenAIInferrence implements LlmInterface {
projectDirectory: string,
useOpenAi: boolean = true,
isStreaming: boolean = false,
isVerbose: boolean = false
isVerbose: boolean = false,
userExpertise?: string
): Promise<string | undefined> {
const openai = this.getOpenAiClient(useOpenAi)
const model = await this.getModel(useOpenAi)

const compatibilityMessage = this.createPrompt(
`${prompts.commonSystemPrompt.prompt}\n${prompts.rootUnderstanding.prompt}`,
`<FileStructure>${JSON.stringify(projectDirectory)}</FileStructure>`,
isVerbose
isVerbose,
userExpertise
)

await this.calculateTokensAndCheckLimit(compatibilityMessage, model, isVerbose)
Expand All @@ -197,15 +203,17 @@ export class OpenAIInferrence implements LlmInterface {
workflow: string,
useOpenAi: boolean = true,
isStreaming: boolean = false,
isVerbose: boolean = false
isVerbose: boolean = false,
userExpertise?: string
): Promise<string | undefined | Stream<ChatCompletionChunk>> {
// @ts-expect-error Exclude streaming from coverage
const openai = this.getOpenAiClient(useOpenAi, isVerbose)
const model = await this.getModel(useOpenAi)
const compatibilityMessage = this.createPrompt(
`${prompts.commonSystemPrompt.prompt}\n${prompts.dependencyUnderstanding.prompt}`,
`<DependencyFile>${JSON.stringify(dependencyFile)}</DependencyFile>\n<Workflow>${workflow}</Workflow>`,
isVerbose
isVerbose,
userExpertise
)

await this.calculateTokensAndCheckLimit(compatibilityMessage, model, isVerbose)
Expand All @@ -217,15 +225,17 @@ export class OpenAIInferrence implements LlmInterface {
code: string,
useOpenAi: boolean = true,
isStreaming: boolean = false,
isVerbose: boolean = false
isVerbose: boolean = false,
userExpertise?: string
): Promise<string | undefined | Stream<ChatCompletionChunk>> {
// @ts-expect-error Exclude streaming from coverage
const openai = this.getOpenAiClient(useOpenAi, isVerbose)
const model = await this.getModel(useOpenAi)
const compatibilityMessage = this.createPrompt(
`${prompts.commonSystemPrompt.prompt}\n${prompts.codeUnderstanding.prompt}`,
`<Code>${JSON.stringify(code)}</Code>`,
isVerbose
isVerbose,
userExpertise
)
await this.calculateTokensAndCheckLimit(compatibilityMessage, model, isVerbose)

Expand All @@ -236,15 +246,17 @@ export class OpenAIInferrence implements LlmInterface {
code: string,
useOpenAi: boolean = true,
isStreaming: boolean = false,
isVerbose: boolean = false
isVerbose: boolean = false,
userExpertise?: string
): Promise<string | undefined | Stream<ChatCompletionChunk>> {
// @ts-expect-error Exclude streaming from coverage
const openai = this.getOpenAiClient(useOpenAi, isVerbose)
const model = await this.getModel(useOpenAi)
const compatibilityMessage = this.createPrompt(
prompts.interestingCodeParts.prompt,
`<Code>${JSON.stringify(code)}</Code>`,
isVerbose
isVerbose,
userExpertise
)
await this.calculateTokensAndCheckLimit(compatibilityMessage, model, isVerbose)
return this.callApiAndReturnResult(openai, model, compatibilityMessage, isStreaming, isVerbose)
Expand All @@ -256,15 +268,17 @@ export class OpenAIInferrence implements LlmInterface {
codeInference: string,
useOpenAi: boolean = true,
isStreaming: boolean = false,
isVerbose: boolean = false
isVerbose: boolean = false,
userExpertise?: string
): Promise<string | undefined | Stream<ChatCompletionChunk>> {
// @ts-expect-error Exclude streaming from coverage
const openai = this.getOpenAiClient(useOpenAi, isVerbose)
const model = await this.getModel(useOpenAi)
const compatibilityMessage = this.createPrompt(
prompts.readmePrompt.prompt,
`<DirectoryStructure>${JSON.stringify(directoryStructure)}</DirectoryStructure>\n<DependencyInferrence>${JSON.stringify(dependencyInference)}</DependencyInferrence>\n<CodeInferrence>${JSON.stringify(codeInference)}</CodeInferrence>`,
isVerbose
isVerbose,
userExpertise
)
await this.calculateTokensAndCheckLimit(compatibilityMessage, model, isVerbose)

Expand All @@ -275,15 +289,17 @@ export class OpenAIInferrence implements LlmInterface {
monorepoInferrenceInfo: string,
useOpenAi: boolean = true,
isStreaming: boolean = false,
isVerbose: boolean = false
isVerbose: boolean = false,
userExpertise?: string
): Promise<string | undefined | Stream<ChatCompletionChunk>> {
// @ts-expect-error Exclude streaming from coverage
const openai = this.getOpenAiClient(useOpenAi, isVerbose)
const model = await this.getModel(useOpenAi)
const compatibilityMessage = this.createPrompt(
prompts.consolidatedInferrenceForMonoRepo.prompt,
`<MonoRepoInferrence>${JSON.stringify(monorepoInferrenceInfo)}</MonoRepoInferrence>`,
isVerbose
isVerbose,
userExpertise
)
await this.calculateTokensAndCheckLimit(compatibilityMessage, model, isVerbose)

Expand Down
Loading

0 comments on commit d7ef82e

Please sign in to comment.