Skip to content

Commit

Permalink
Adding tests that check for behaviors of expertise command
Browse files Browse the repository at this point in the history
  • Loading branch information
PrashamTrivedi committed Aug 12, 2024
1 parent 809a237 commit 1d53aae
Show file tree
Hide file tree
Showing 4 changed files with 213 additions and 1 deletion.
41 changes: 40 additions & 1 deletion _tests_/getDirStructure.test.mts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ import yargs from 'yargs'
import {hideBin} from "yargs/helpers"
import {command, describe as commandDescribe, builder, handler} from '../commands/getDirStructure.mjs'
import * as directoryProcessor from '../directoryProcessor.mjs'
import * as utils from '../utils.mjs'
import * as inquirer from '@inquirer/prompts'
import * as setExpertise from '../commands/setExpertise.mjs'

const yargsSetup = yargs(hideBin(process.argv))

Expand All @@ -26,10 +29,16 @@ describe("Get Directory Structure Command Tests", () => {
beforeEach(() => {
vi.mock('fs')
vi.mock('../directoryProcessor.mjs')
vi.mock('../utils.mjs')
vi.mock('@inquirer/prompts')
vi.mock('../commands/setExpertise.mjs')
vi.spyOn(console, 'log').mockImplementation(() => { })
vi.spyOn(console, 'error').mockImplementation(() => { })

; (directoryProcessor.getDirStructure as Mock).mockResolvedValue(mockFileStructure)
; (directoryProcessor.getDirStructure as Mock).mockResolvedValue(mockFileStructure)
vi.mocked(utils.readConfig).mockReturnValue({ANALYSIS_DIR: '/test', userExpertise: 'intermediate'} as any)
vi.mocked(inquirer.confirm).mockResolvedValue(false)
vi.mocked(setExpertise.handler).mockResolvedValue(undefined)
})

afterEach(() => {
Expand Down Expand Up @@ -142,5 +151,35 @@ describe("Get Directory Structure Command Tests", () => {
// expect(consoleSpy).toHaveBeenCalledWith("Error: Project path is required")
// })

it('should prompt for user expertise if not set', async () => {
vi.mocked(utils.readConfig).mockReturnValue({ANALYSIS_DIR: '/test'} as any)
vi.mocked(inquirer.confirm).mockResolvedValue(true)

const parser = yargsSetup.command({command, describe: commandDescribe, builder, handler})
await new Promise((resolve) => {
parser.parse(`dirStructure ${mockProjectPath}`, (_err: any, argv: unknown) => {
resolve(argv)
})
})

expect(inquirer.confirm).toHaveBeenCalledWith({
message: "Would you like to set your expertise now?",
default: true
})
expect(setExpertise.handler).toHaveBeenCalled()
})

it('should not prompt for user expertise if already set', async () => {
vi.mocked(utils.readConfig).mockReturnValue({ANALYSIS_DIR: '/test', userExpertise: 'intermediate'} as any)

const parser = yargsSetup.command({command, describe: commandDescribe, builder, handler})
await new Promise((resolve) => {
parser.parse(`dirStructure ${mockProjectPath}`, (_err: any, argv: unknown) => {
resolve(argv)
})
})

expect(inquirer.confirm).not.toHaveBeenCalled()
expect(setExpertise.handler).not.toHaveBeenCalled()
})
})
32 changes: 32 additions & 0 deletions _tests_/openai.test.mts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,38 @@ describe('OpenAIInferrence', () => {
vi.resetAllMocks()
})

describe('Prompt Generation', () => {
it('should generate a prompt without user expertise', () => {
const systemPrompt = 'System prompt'
const userPrompt = 'User prompt'
const result = (openAIInferrence as any).createPrompt(systemPrompt, userPrompt, false)
expect(result).toEqual([
{role: 'system', content: 'System prompt'},
{role: 'user', content: 'User prompt'}
])
})

it('should generate a prompt with user expertise', () => {
const systemPrompt = 'System prompt'
const userPrompt = 'User prompt'
const userExpertise = 'Expert'
const result = (openAIInferrence as any).createPrompt(systemPrompt, userPrompt, false, userExpertise)
expect(result).toEqual([
{role: 'system', content: 'System prompt\n<Expertise>"Expert"</Expertise>'},
{role: 'user', content: 'User prompt'}
])
})

it('should log prompts when verbose is true', () => {
const consoleSpy = vi.spyOn(console, 'log')
const systemPrompt = 'System prompt'
const userPrompt = 'User prompt';
(openAIInferrence as any).createPrompt(systemPrompt, userPrompt, true)
expect(consoleSpy).toHaveBeenCalledWith('System Prompt: System prompt')
expect(consoleSpy).toHaveBeenCalledWith('User Prompt: User prompt')
})
})

describe('inferProjectDirectory', () => {
it('should return a valid inference for a project directory', async () => {
const mockResponse = {
Expand Down
58 changes: 58 additions & 0 deletions _tests_/prepareReport.test.mts
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,20 @@ import {handler} from '../commands/prepareReport.mjs'
import * as utils from '../utils.mjs'
import OpenAIInferrence from '../openai.mjs'
import ora from 'ora'
import * as inquirer from '@inquirer/prompts'
import * as setExpertise from '../commands/setExpertise.mjs'

vi.mock('../utils.mjs')
vi.mock('../openai.mjs')
vi.mock('ora')
vi.mock('@inquirer/prompts')
vi.mock('../commands/setExpertise.mjs')

describe('prepareReport command', () => {
beforeEach(() => {
vi.resetAllMocks()
vi.mocked(inquirer.confirm).mockResolvedValue(false)
vi.mocked(setExpertise.handler).mockResolvedValue(undefined)
})

it('should fail if no analysis is found', async () => {
Expand Down Expand Up @@ -151,4 +157,56 @@ describe('prepareReport command', () => {
codeInference: 'mock code inference',
})
})

it('should prompt for user expertise if not set', async () => {
const mockAnalysis = {
directoryStructure: 'mock directory structure',
dependencyInference: 'mock dependency inference',
codeInferrence: 'mock code inference',
}
const mockReport = 'Generated README content'

vi.mocked(utils.readConfig).mockReturnValue({ANALYSIS_DIR: 'test-dir'})
vi.mocked(utils.getAnalysis).mockReturnValue(mockAnalysis)
vi.mocked(OpenAIInferrence).mockImplementation(() => ({
generateReadme: vi.fn().mockResolvedValue(mockReport),
} as any))
vi.mocked(ora).mockReturnValue({
start: vi.fn().mockReturnThis(),
stopAndPersist: vi.fn(),
} as any)
vi.mocked(inquirer.confirm).mockResolvedValue(true)

await handler({path: 'test-project', verbose: false, streaming: false} as any)

expect(inquirer.confirm).toHaveBeenCalledWith({
message: "Would you like to set your expertise now?",
default: true
})
expect(setExpertise.handler).toHaveBeenCalled()
})

it('should not prompt for user expertise if already set', async () => {
const mockAnalysis = {
directoryStructure: 'mock directory structure',
dependencyInference: 'mock dependency inference',
codeInferrence: 'mock code inference',
}
const mockReport = 'Generated README content'

vi.mocked(utils.readConfig).mockReturnValue({ANALYSIS_DIR: 'test-dir', userExpertise: 'intermediate'})
vi.mocked(utils.getAnalysis).mockReturnValue(mockAnalysis)
vi.mocked(OpenAIInferrence).mockImplementation(() => ({
generateReadme: vi.fn().mockResolvedValue(mockReport),
} as any))
vi.mocked(ora).mockReturnValue({
start: vi.fn().mockReturnThis(),
stopAndPersist: vi.fn(),
} as any)

await handler({path: 'test-project', verbose: false, streaming: false} as any)

expect(inquirer.confirm).not.toHaveBeenCalled()
expect(setExpertise.handler).not.toHaveBeenCalled()
})
})
83 changes: 83 additions & 0 deletions _tests_/setExpertise.test.mts
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import { describe, it, expect, vi, beforeEach } from 'vitest'
import { handler } from '../commands/setExpertise.mjs'
import * as utils from '../utils.mjs'
import * as inquirer from '@inquirer/prompts'
import chalk from 'chalk'

vi.mock('../utils.mjs')
vi.mock('@inquirer/prompts')
vi.mock('chalk', () => ({
default: {
blue: vi.fn((str) => str),
yellow: vi.fn((str) => str),
green: vi.fn((str) => str),
red: vi.fn((str) => str),
},
}))

describe('setExpertise command', () => {
beforeEach(() => {
vi.resetAllMocks()
vi.mocked(utils.readConfig).mockReturnValue({})
vi.mocked(utils.writeConfig).mockResolvedValue(undefined)
})

it('should set expertise levels for languages and frameworks', async () => {
vi.mocked(inquirer.confirm)
.mockResolvedValueOnce(true) // JavaScript
.mockResolvedValueOnce(true) // React
.mockResolvedValueOnce(false) // Angular
.mockResolvedValueOnce(false) // Vue.js
.mockResolvedValueOnce(false) // Express
.mockResolvedValueOnce(false) // Node.js
.mockResolvedValueOnce(false) // Python

vi.mocked(inquirer.select)
.mockResolvedValueOnce('intermediate') // JavaScript
.mockResolvedValueOnce('expert') // React

await handler()

expect(utils.writeConfig).toHaveBeenCalledWith({
userExpertise: {
JavaScript: 'intermediate',
React: 'expert',
},
})

expect(chalk.green).toHaveBeenCalledWith('Expertise levels have been successfully set and saved!')
})

it('should handle when user has no experience in any language', async () => {
vi.mocked(inquirer.confirm).mockResolvedValue(false)

await handler()

expect(utils.writeConfig).toHaveBeenCalledWith({ userExpertise: {} })
expect(chalk.green).toHaveBeenCalledWith('Expertise levels have been successfully set and saved!')
})

it('should handle errors during the process', async () => {
const mockError = new Error('Test error')
vi.mocked(utils.readConfig).mockImplementation(() => { throw mockError })

const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {})

await handler()

expect(consoleErrorSpy).toHaveBeenCalledWith(
'Error setting user expertise levels:',
mockError
)
})

it('should display welcome messages', async () => {
vi.mocked(inquirer.confirm).mockResolvedValue(false)
const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {})

await handler()

expect(consoleLogSpy).toHaveBeenCalledWith('Welcome to the expertise assessment questionnaire!')
expect(consoleLogSpy).toHaveBeenCalledWith('Please answer the following questions about your programming expertise.')
})
})

0 comments on commit 1d53aae

Please sign in to comment.