From 767590151728baa77f2b3297a2b802c32121c8c2 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 1 Dec 2017 10:18:47 -0800 Subject: [PATCH 001/103] Basic tokenizer --- .vscode/launch.json | 11 +- .vscode/tasks.json | 6 +- gulpfile.js | 89 +++++++++++--- package.json | 9 +- src/client/language/characterStream.ts | 134 +++++++++++++++++++++ src/client/language/definitions.ts | 81 +++++++++++++ src/client/language/textIterator.ts | 53 ++++++++ src/client/language/textRangeCollection.ts | 103 ++++++++++++++++ src/client/language/tokenizer.ts | 119 ++++++++++++++++++ src/client/providers/completionProvider.ts | 11 +- 10 files changed, 584 insertions(+), 32 deletions(-) create mode 100644 src/client/language/characterStream.ts create mode 100644 src/client/language/definitions.ts create mode 100644 src/client/language/textIterator.ts create mode 100644 src/client/language/textRangeCollection.ts create mode 100644 src/client/language/tokenizer.ts diff --git a/.vscode/launch.json b/.vscode/launch.json index 3fbf982ae0b3..a69c3396ff4e 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -15,7 +15,7 @@ "outFiles": [ "${workspaceFolder}/out/**/*.js" ], - "preLaunchTask": "compile" + "preLaunchTask": "Compile" }, { "name": "Launch Extension as debugServer", // https://code.visualstudio.com/docs/extensions/example-debuggers @@ -30,7 +30,8 @@ "outFiles": [ "${workspaceFolder}/out/client/**/*.js" ], - "cwd": "${workspaceFolder}" + "cwd": "${workspaceFolder}", + "preLaunchTask": "Compile" }, { "name": "Launch Tests", @@ -47,7 +48,7 @@ "outFiles": [ "${workspaceFolder}/out/**/*.js" ], - "preLaunchTask": "compile" + "preLaunchTask": "Compile" }, { "name": "Launch Multiroot Tests", @@ -64,7 +65,7 @@ "outFiles": [ "${workspaceFolder}/out/**/*.js" ], - "preLaunchTask": "compile" + "preLaunchTask": "Compile" } ], "compounds": [ @@ -76,4 +77,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 155b94220ae6..ccf99a2c6f20 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -13,7 +13,7 @@ "script": "compile", "isBackground": true, "problemMatcher": [ - "$tsc", + "$tsc-watch", { "base": "$tslint5", "fileLocation": "relative" @@ -36,7 +36,7 @@ "panel": "shared" }, "problemMatcher": [ - "$tsc", + "$tsc-watch", { "base": "$tslint5", "fileLocation": "relative" @@ -72,4 +72,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/gulpfile.js b/gulpfile.js index 868a7281cc41..6c3f7819d003 100644 --- a/gulpfile.js +++ b/gulpfile.js @@ -22,7 +22,7 @@ const colors = require('colors/safe'); * named according to the checks performed on them. Each subset contains * the following one, as described in mathematical notation: * - * all ⊃ eol ⊇ indentation ⊃ copyright ⊃ typescript + * all ⊃ eol ⊇ indentation ⊃ typescript */ const all = [ @@ -115,12 +115,12 @@ const hygiene = (some, options) => { .toString('utf8') .split(/\r\n|\r|\n/) .forEach((line, i) => { - if (/^\s*$/.test(line)) { + if (/^\s*$/.test(line) || /^\S+.*$/.test(line)) { // Empty or whitespace lines are OK. } else if (/^(\s\s\s\s)+.*/.test(line)) { // Good indent. } else if (/^[\t]+.*/.test(line)) { - console.error(file.relative + '(' + (i + 1) + ',1): Bad whitespace indentation'); + console.error(file.relative + '(' + (i + 1) + ',1): Bad whitespace indentation (use 4 spaces instead of tabs or other)'); errorCount++; } }); @@ -137,7 +137,7 @@ const hygiene = (some, options) => { tsfmt: true }).then(result => { if (result.error) { - console.error(result.message); + console.error(result.message.trim()); errorCount++; } cb(null, file); @@ -147,14 +147,14 @@ const hygiene = (some, options) => { }); }); + const program = require('tslint').Linter.createProgram("./tsconfig.json"); + const linter = new tslint.Linter(options, program); const tsl = es.through(function (file) { const configuration = tslint.Configuration.findConfiguration(null, '.'); const options = { formatter: 'json' }; const contents = file.contents.toString('utf8'); - const program = require('tslint').Linter.createProgram("./tsconfig.json"); - const linter = new tslint.Linter(options, program); linter.lint(file.relative, contents, configuration.results); const result = linter.getResult(); if (result.failureCount > 0 || result.errorCount > 0) { @@ -206,22 +206,16 @@ const hygiene = (some, options) => { .pipe(filter(f => !f.stat.isDirectory())) .pipe(filter(eolFilter)) .pipe(options.skipEOL ? es.through() : eol) - .pipe(filter(indentationFilter)); - - if (!options.skipIndentationCheck) { - result = result - .pipe(indentation); - } + .pipe(filter(indentationFilter)) + .pipe(indentation); // Type script checks. let typescript = result - .pipe(filter(tslintFilter)); + .pipe(filter(tslintFilter)) + .pipe(formatting); - if (!options.skipFormatCheck) { - typescript = typescript - .pipe(formatting); - } - typescript = typescript.pipe(tsl) + typescript = typescript + .pipe(tsl) .pipe(tscFilesTracker) .pipe(tsc()); @@ -244,16 +238,32 @@ gulp.task('hygiene-staged', () => run({ mode: 'changes' })); gulp.task('hygiene-watch', ['hygiene-staged', 'hygiene-watch-runner']); gulp.task('hygiene-watch-runner', function () { + /** + * @type {Deferred} + */ + let runPromise; + return watch(all, { events: ['add', 'change'] }, function (event) { + // Damn bounce does not work, do our own checks. const start = new Date(); + if (runPromise && !runPromise.completed) { + console.log(`[${start.toLocaleTimeString()}] Already running`); + return; + } console.log(`[${start.toLocaleTimeString()}] Starting '${colors.cyan('hygiene-watch-runner')}'...`); + + runPromise = new Deferred(); // Skip indentation and formatting checks to speed up linting. - return run({ mode: 'watch', skipFormatCheck: true, skipIndentationCheck: true }) + run({ mode: 'watch', skipFormatCheck: true, skipIndentationCheck: true }) .then(() => { const end = new Date(); const time = (end.getTime() - start.getTime()) / 1000; console.log(`[${end.toLocaleTimeString()}] Finished '${colors.cyan('hygiene-watch-runner')}' after ${time} seconds`); - }); + runPromise.resolve(); + }) + .catch(runPromise.reject.bind); + + return runPromise.promise; }); }); @@ -402,7 +412,46 @@ function getModifiedFiles() { }); }); } + // this allows us to run hygiene as a git pre-commit hook. if (require.main === module) { run({ exitOnError: true, mode: 'staged' }); } + +class Deferred { + constructor(scope) { + this.scope = scope; + this._resolved = false; + this._rejected = false; + + this._promise = new Promise((resolve, reject) => { + this._resolve = resolve; + this._reject = reject; + }); + } + resolve(value) { + this._resolve.apply(this.scope ? this.scope : this, arguments); + this._resolved = true; + } + /** + * Rejects the promise + * @param {any} reason + * @memberof Deferred + */ + reject(reason) { + this._reject.apply(this.scope ? this.scope : this, arguments); + this._rejected = true; + } + get promise() { + return this._promise; + } + get resolved() { + return this._resolved === true; + } + get rejected() { + return this._rejected === true; + } + get completed() { + return this._rejected || this._resolved; + } +} \ No newline at end of file diff --git a/package.json b/package.json index 0155540cb629..17145aa47ed6 100644 --- a/package.json +++ b/package.json @@ -1529,7 +1529,7 @@ "fuzzy": "^0.1.3", "get-port": "^3.2.0", "iconv-lite": "^0.4.19", - "inversify": "^4.5.2", + "inversify": "^4.5.1", "line-by-line": "^0.1.5", "lodash": "^4.17.4", "minimatch": "^3.0.3", @@ -1583,5 +1583,10 @@ "typescript": "^2.5.2", "typescript-formatter": "^6.0.0", "vscode": "^1.1.5" + }, + "__metadata": { + "id": "f1f59ae4-9318-4f3c-a9b5-81b2eaa5f8a5", + "publisherDisplayName": "Microsoft", + "publisherId": "998b010b-e2af-44a5-a6cd-0b5fd3b9b6f8" } -} +} \ No newline at end of file diff --git a/src/client/language/characterStream.ts b/src/client/language/characterStream.ts new file mode 100644 index 000000000000..fbb65f903644 --- /dev/null +++ b/src/client/language/characterStream.ts @@ -0,0 +1,134 @@ +'use strict'; + +// tslint:disable-next-line:import-name +import Char from 'typescript-char'; +import { ICharacterStream, ITextIterator } from './definitions'; +import { TextIterator } from './textIterator'; + +export class CharacterStream implements ICharacterStream { + private text: ITextIterator; + private pos: number; + private curChar: number; + private endOfStream: boolean; + + constructor(text: string | ITextIterator) { + const iter = text as ITextIterator; + const s = text as string; + + this.text = iter !== null ? iter : new TextIterator(s); + this.pos = 0; + this.curChar = text.length > 0 ? text.charCodeAt(0) : 0; + this.endOfStream = text.length === 0; + } + + public getText(): string { + return this.text.getText(); + } + + public get position(): number { + return this.pos; + } + + public set position(value: number) { + this.pos = value; + this.checkBounds(); + } + + public get currentChar(): number { + return this.curChar; + } + + public get nextChar(): number { + return this.position + 1 < this.text.length ? this.text.charCodeAt(this.position + 1) : 0; + } + + public get prevChar(): number { + return this.position - 1 >= 0 ? this.text.charCodeAt(this.position - 1) : 0; + } + + public isEndOfStream(): boolean { + return this.endOfStream; + } + + public lookAhead(offset: number): number { + const pos = this.position + offset; + return pos < 0 || pos >= this.text.length ? 0 : this.text.charCodeAt(pos); + } + + public advance(offset: number) { + this.position += offset; + } + + public moveNext(): boolean { + if (this.pos < this.text.length - 1) { + // Most common case, no need to check bounds extensively + this.pos += 1; + this.curChar = this.text.charCodeAt(this.pos); + return true; + } + this.advance(1); + return !this.endOfStream; + } + + public isAtWhiteSpace(): boolean { + return this.curChar <= Char.Space || this.curChar === 0x200B; + } + + public isAtLineBreak(): boolean { + return this.curChar === Char.CarriageReturn || this.curChar === Char.DataLineEscape; + } + + public skipLineBreak(): void { + if (this.curChar === Char.CarriageReturn) { + this.moveNext(); + if (this.currentChar === Char.LineFeed) { + this.moveNext(); + } + } else if (this.curChar === Char.LineFeed) { + this.moveNext(); + } + } + + public skipWhitespace(): void { + while (!this.endOfStream && this.isAtWhiteSpace()) { + this.moveNext(); + } + } + + public skipToEol(): void { + while (!this.endOfStream && !this.isAtLineBreak()) { + this.moveNext(); + } + } + + public skipToWhitespace(): void { + while (!this.endOfStream && !this.isAtWhiteSpace()) { + this.moveNext(); + } + } + + public isAtString(): boolean { + return this.curChar === 0x22 || this.curChar === 0x27; + } + + public charCodeAt(index: number): number { + return this.text.charCodeAt(index); + } + + public get length(): number { + return this.text.length; + } + + private checkBounds(): void { + if (this.pos < 0) { + this.pos = 0; + } + + this.endOfStream = this.pos >= this.text.length; + if (this.endOfStream) { + this.pos = this.text.length; + } + + this.curChar = this.endOfStream ? 0 : this.text.charCodeAt(this.pos); + } +} diff --git a/src/client/language/definitions.ts b/src/client/language/definitions.ts new file mode 100644 index 000000000000..a4f7a22b4da7 --- /dev/null +++ b/src/client/language/definitions.ts @@ -0,0 +1,81 @@ +'use strict'; + +export interface ITextRange { + readonly start: number; + readonly end: number; + readonly length: number; + contains(position: number): boolean; +} + +export class TextRange implements ITextRange { + public readonly start: number; + public readonly length: number; + + constructor(start: number, length: number) { + if (start < 0) { + throw new Error('start must be non-negative'); + } + if (length < 0) { + throw new Error('length must be non-negative'); + } + this.start = start; + this.length = length; + } + + public static fromBounds(start: number, end: number) { + return new TextRange(start, end - start); + } + + public get end(): number { + return this.start + this.length; + } + + public contains(position: number): boolean { + return position >= this.start && position < this.end; + } +} + +export interface ITextRangeCollection extends ITextRange { + count: number; + getItemAt(index: number): T; + getItemAtPosition(position: number): number; + getItemContaining(position: number): number; +} + +export interface ITextIterator { + readonly length: number; + charCodeAt(index: number): number; + getText(): string; +} + +export interface ICharacterStream extends ITextIterator { + position: number; + readonly currentChar: number; + readonly nextChar: number; + readonly prevChar: number; + getText(): string; + isEndOfStream(): boolean; + lookAhead(offset: number): number; + advance(offset: number); + moveNext(): boolean; + isAtWhiteSpace(): boolean; + isAtLineBreak(): boolean; + isAtString(): boolean; + skipLineBreak(): void; + skipWhitespace(): void; + skipToEol(): void; + skipToWhitespace(): void; +} + +export enum TokenType { + String +} + +export interface IToken extends ITextRange { + readonly type: TokenType; +} + +export interface ITokenizer { + Tokenize(text: string): ITextRangeCollection; + Tokenize(text: string, start: number, length: number): ITextRangeCollection; +} diff --git a/src/client/language/textIterator.ts b/src/client/language/textIterator.ts new file mode 100644 index 000000000000..8af0e1caefda --- /dev/null +++ b/src/client/language/textIterator.ts @@ -0,0 +1,53 @@ +'use strict'; + +import { Position, Range, TextDocument } from 'vscode'; +import { ITextIterator } from './definitions'; + +export class TextIterator implements ITextIterator { + private text: string; + + constructor(text: string) { + this.text = text; + } + + public charCodeAt(index: number): number { + if (index >= 0 && index < this.length) { + return this.text.charCodeAt[index]; + } + return 0; + } + + public get length(): number { + return this.text.length; + } + + public getText(): string { + return this.text; + } +} + +export class DocumentTextIterator implements ITextIterator { + public readonly length: number; + + private document: TextDocument; + + constructor(document: TextDocument) { + this.document = document; + + const lastIndex = this.document.lineCount - 1; + const lastLine = this.document.lineAt(lastIndex); + const end = new Position(lastIndex, lastLine.range.end.character); + this.length = this.document.offsetAt(end); + } + + public charCodeAt(index: number): number { + const position = this.document.positionAt(index); + return this.document + .getText(new Range(position, position.translate(0, 1))) + .charCodeAt(position.character); + } + + public getText(): string { + return this.document.getText(); + } +} diff --git a/src/client/language/textRangeCollection.ts b/src/client/language/textRangeCollection.ts new file mode 100644 index 000000000000..5448445c3092 --- /dev/null +++ b/src/client/language/textRangeCollection.ts @@ -0,0 +1,103 @@ +'use strict'; + +import { ITextRange, ITextRangeCollection } from './definitions'; + +export class TextRangeCollection implements ITextRangeCollection { + private items: T[]; + + constructor(items: T[]) { + this.items = items; + } + + public get start(): number { + return this.items.length > 0 ? this.items[0].start : 0; + } + + public get end(): number { + return this.items.length > 0 ? this.items[this.items.length - 1].end : 0; + } + + public get length(): number { + return this.end - this.start; + } + + public get count(): number { + return this.items.length; + } + + public contains(position: number) { + return position >= this.start && position < this.end; + } + + public getItemAt(index: number): T { + if (index < 0 || index >= this.items.length) { + throw new Error('index is out of range'); + } + return this.items[index] as T; + } + + public getItemAtPosition(position: number): number { + if (this.count === 0) { + return -1; + } + if (position < this.start) { + return -1; + } + if (position >= this.end) { + return -1; + } + + let min = 0; + let max = this.count - 1; + + while (min <= max) { + const mid = min + (max - min) / 2; + const item = this.items[mid]; + + if (item.start === position) { + return mid; + } + + if (position < item.start) { + max = mid - 1; + } else { + min = mid + 1; + } + } + return -1; + } + + public getItemContaining(position: number): number { + if (this.count === 0) { + return -1; + } + if (position < this.start) { + return -1; + } + if (position > this.end) { + return -1; + } + + let min = 0; + let max = this.count - 1; + + while (min <= max) { + const mid = min + (max - min) / 2; + const item = this[mid]; + + if (item.Contains(position)) { + return mid; + } + if (mid < this.count - 1 && item.end <= position && position < this.items[mid + 1].start) { + return -1; + } + + if (position < item.Start) { + max = mid - 1; + } else { + min = mid + 1; + } + } + return -1; + } +} diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts new file mode 100644 index 000000000000..25af381dfc26 --- /dev/null +++ b/src/client/language/tokenizer.ts @@ -0,0 +1,119 @@ +'use strict'; + +import Char from 'typescript-char'; +import { CharacterStream } from './characterStream'; +import { ICharacterStream, ITextRangeCollection, IToken, ITokenizer, TextRange, TokenType } from './definitions'; +import { TextRangeCollection } from './textRangeCollection'; + +enum QuoteType { + None, + Single, + Double, + TripleSingle, + TripleDouble +} + +class Token extends TextRange implements IToken { + public readonly type: TokenType; + + constructor(type: TokenType, start: number, length: number) { + super(start, length); + this.type = type; + } +} + +export class Tokenizer implements ITokenizer { + private cs: ICharacterStream; + private tokens: IToken[] = []; + + public Tokenize(text: string): ITextRangeCollection; + public Tokenize(text: string, start: number, length: number): ITextRangeCollection; + + public Tokenize(text: string, start?: number, length?: number): ITextRangeCollection { + if (start === undefined) { + start = 0; + } else if (start < 0 || start >= text.length) { + throw new Error('Invalid range start'); + } + + if (length === undefined) { + length = text.length; + } else if (length < 0 || start + length >= text.length) { + throw new Error('Invalid range length'); + } + + this.cs = new CharacterStream(text); + this.cs.position = start; + + const end = start + length; + while (!this.cs.isEndOfStream()) { + this.AddNextToken(); + if (this.cs.position >= end) { + break; + } + } + return new TextRangeCollection(this.tokens); + } + + private AddNextToken(): void { + this.cs.skipWhitespace(); + if (this.cs.isEndOfStream()) { + return; + } + + if (!this.handleCharacter()) { + this.cs.moveNext(); + } + } + + private handleCharacter(): boolean { + const quoteType = this.getQuoteType(); + if (quoteType !== QuoteType.None) { + this.handleString(quoteType); + return true; + } + return false; + } + + private getQuoteType(): QuoteType { + if (this.cs.currentChar === Char.SingleQuote) { + return this.cs.nextChar === Char.SingleQuote && this.cs.lookAhead(2) === Char.SingleQuote + ? QuoteType.TripleSingle + : QuoteType.Single; + } + if (this.cs.currentChar === Char.DoubleQuote) { + return this.cs.nextChar === Char.DoubleQuote && this.cs.lookAhead(2) === Char.DoubleQuote + ? QuoteType.TripleDouble + : QuoteType.Double; + } + return QuoteType.None; + } + + private handleString(quoteType: QuoteType): void { + const start = this.cs.position; + if (quoteType === QuoteType.Single || quoteType === QuoteType.Double) { + this.cs.moveNext(); + this.skipToSingleEndQuote(quoteType === QuoteType.Single + ? Char.SingleQuote + : Char.DoubleQuote); + } else { + this.cs.advance(3); + this.skipToTripleEndQuote(quoteType === QuoteType.TripleSingle + ? Char.SingleQuote + : Char.DoubleQuote); + } + this.tokens.push(new Token(TokenType.String, start, this.cs.position - start)); + } + + private skipToSingleEndQuote(quote: number): void { + while (!this.cs.isEndOfStream() && this.cs.currentChar !== quote) { + this.cs.moveNext(); + } + } + + private skipToTripleEndQuote(quote: number): void { + while (!this.cs.isEndOfStream() && (this.cs.currentChar !== quote || this.cs.nextChar !== quote || this.cs.lookAhead(2) !== quote)) { + this.cs.moveNext(); + } + } +} diff --git a/src/client/providers/completionProvider.ts b/src/client/providers/completionProvider.ts index b453d19a7f19..020fb71daffc 100644 --- a/src/client/providers/completionProvider.ts +++ b/src/client/providers/completionProvider.ts @@ -1,8 +1,9 @@ 'use strict'; import * as vscode from 'vscode'; -import { ProviderResult, SnippetString, Uri } from 'vscode'; +import { Position, ProviderResult, SnippetString, Uri } from 'vscode'; import { PythonSettings } from '../common/configSettings'; +import { Tokenizer } from '../language/tokenizer'; import { JediFactory } from '../languageServices/jediProxyFactory'; import { captureTelemetry } from '../telemetry'; import { COMPLETION } from '../telemetry/constants'; @@ -47,7 +48,7 @@ export class PythonCompletionItemProvider implements vscode.CompletionItemProvid return Promise.resolve([]); } // If starts with a """ (possible doc string), then return - if (lineText.trim().startsWith('"""')) { + if (this.isPositionInsideString(document, position)) { return Promise.resolve([]); } const type = proxy.CommandType.Completions; @@ -66,4 +67,10 @@ export class PythonCompletionItemProvider implements vscode.CompletionItemProvid return PythonCompletionItemProvider.parseData(data, document.uri); }); } + + private isPositionInsideString(document: vscode.TextDocument, position: vscode.Position): boolean { + const text = document.getText(new vscode.Range(new Position(0, 0), position)); + const t = new Tokenizer(); + return t.Tokenize(text).getItemContaining(document.offsetAt(position)) >= 0; + } } From eb4266980d1308fbae7a0017a9da673b788a9e45 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 1 Dec 2017 12:19:00 -0800 Subject: [PATCH 002/103] Fixed property names --- src/client/language/textRangeCollection.ts | 6 +++--- src/client/language/tokenizer.ts | 3 +++ src/client/providers/completionProvider.ts | 3 ++- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/client/language/textRangeCollection.ts b/src/client/language/textRangeCollection.ts index 5448445c3092..0464dc945382 100644 --- a/src/client/language/textRangeCollection.ts +++ b/src/client/language/textRangeCollection.ts @@ -83,16 +83,16 @@ export class TextRangeCollection implements ITextRangeColl while (min <= max) { const mid = min + (max - min) / 2; - const item = this[mid]; + const item = this.items[mid]; - if (item.Contains(position)) { + if (item.contains(position)) { return mid; } if (mid < this.count - 1 && item.end <= position && position < this.items[mid + 1].start) { return -1; } - if (position < item.Start) { + if (position < item.start) { max = mid - 1; } else { min = mid + 1; diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index 25af381dfc26..5cb0d4e3e474 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -1,5 +1,6 @@ 'use strict'; +// tslint:disable-next-line:import-name import Char from 'typescript-char'; import { CharacterStream } from './characterStream'; import { ICharacterStream, ITextRangeCollection, IToken, ITokenizer, TextRange, TokenType } from './definitions'; @@ -109,11 +110,13 @@ export class Tokenizer implements ITokenizer { while (!this.cs.isEndOfStream() && this.cs.currentChar !== quote) { this.cs.moveNext(); } + this.cs.moveNext(); } private skipToTripleEndQuote(quote: number): void { while (!this.cs.isEndOfStream() && (this.cs.currentChar !== quote || this.cs.nextChar !== quote || this.cs.lookAhead(2) !== quote)) { this.cs.moveNext(); } + this.cs.advance(3); } } diff --git a/src/client/providers/completionProvider.ts b/src/client/providers/completionProvider.ts index 020fb71daffc..b097b1633a9e 100644 --- a/src/client/providers/completionProvider.ts +++ b/src/client/providers/completionProvider.ts @@ -69,7 +69,8 @@ export class PythonCompletionItemProvider implements vscode.CompletionItemProvid } private isPositionInsideString(document: vscode.TextDocument, position: vscode.Position): boolean { - const text = document.getText(new vscode.Range(new Position(0, 0), position)); + const tokenizeTo = position.translate(1, 0); + const text = document.getText(new vscode.Range(new Position(0, 0), tokenizeTo)); const t = new Tokenizer(); return t.Tokenize(text).getItemContaining(document.offsetAt(position)) >= 0; } From 275697428d0b725083b2054b24e9aaa08b0db7b1 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 1 Dec 2017 15:10:20 -0800 Subject: [PATCH 003/103] Tests, round I --- src/client/language/characterStream.ts | 63 ++++++------- src/client/language/definitions.ts | 2 + src/client/language/textIterator.ts | 4 +- src/test/.vscode/settings.json | 3 +- src/test/index.ts | 3 +- src/test/language/characterStream.test.ts | 108 ++++++++++++++++++++++ src/test/language/textIterator.test.ts | 24 +++++ src/test/language/textRange.test.ts | 52 +++++++++++ 8 files changed, 222 insertions(+), 37 deletions(-) create mode 100644 src/test/language/characterStream.test.ts create mode 100644 src/test/language/textIterator.test.ts create mode 100644 src/test/language/textRange.test.ts diff --git a/src/client/language/characterStream.ts b/src/client/language/characterStream.ts index fbb65f903644..e90ef7f2e6b7 100644 --- a/src/client/language/characterStream.ts +++ b/src/client/language/characterStream.ts @@ -7,18 +7,15 @@ import { TextIterator } from './textIterator'; export class CharacterStream implements ICharacterStream { private text: ITextIterator; - private pos: number; - private curChar: number; - private endOfStream: boolean; + private _position: number; + private _currentChar: number; + private _isEndOfStream: boolean; constructor(text: string | ITextIterator) { - const iter = text as ITextIterator; - const s = text as string; - - this.text = iter !== null ? iter : new TextIterator(s); - this.pos = 0; - this.curChar = text.length > 0 ? text.charCodeAt(0) : 0; - this.endOfStream = text.length === 0; + this.text = typeof text === 'string' ? new TextIterator(text) : text; + this._position = 0; + this._currentChar = text.length > 0 ? text.charCodeAt(0) : 0; + this._isEndOfStream = text.length === 0; } public getText(): string { @@ -26,16 +23,16 @@ export class CharacterStream implements ICharacterStream { } public get position(): number { - return this.pos; + return this._position; } public set position(value: number) { - this.pos = value; + this._position = value; this.checkBounds(); } public get currentChar(): number { - return this.curChar; + return this._currentChar; } public get nextChar(): number { @@ -47,11 +44,11 @@ export class CharacterStream implements ICharacterStream { } public isEndOfStream(): boolean { - return this.endOfStream; + return this._isEndOfStream; } public lookAhead(offset: number): number { - const pos = this.position + offset; + const pos = this._position + offset; return pos < 0 || pos >= this.text.length ? 0 : this.text.charCodeAt(pos); } @@ -60,55 +57,55 @@ export class CharacterStream implements ICharacterStream { } public moveNext(): boolean { - if (this.pos < this.text.length - 1) { + if (this._position < this.text.length - 1) { // Most common case, no need to check bounds extensively - this.pos += 1; - this.curChar = this.text.charCodeAt(this.pos); + this._position += 1; + this._currentChar = this.text.charCodeAt(this._position); return true; } this.advance(1); - return !this.endOfStream; + return !this.isEndOfStream(); } public isAtWhiteSpace(): boolean { - return this.curChar <= Char.Space || this.curChar === 0x200B; + return this.currentChar <= Char.Space || this.currentChar === 0x200B; // Unicode whitespace } public isAtLineBreak(): boolean { - return this.curChar === Char.CarriageReturn || this.curChar === Char.DataLineEscape; + return this.currentChar === Char.CarriageReturn || this.currentChar === Char.LineFeed; } public skipLineBreak(): void { - if (this.curChar === Char.CarriageReturn) { + if (this._currentChar === Char.CarriageReturn) { this.moveNext(); if (this.currentChar === Char.LineFeed) { this.moveNext(); } - } else if (this.curChar === Char.LineFeed) { + } else if (this._currentChar === Char.LineFeed) { this.moveNext(); } } public skipWhitespace(): void { - while (!this.endOfStream && this.isAtWhiteSpace()) { + while (!this.isEndOfStream() && this.isAtWhiteSpace()) { this.moveNext(); } } public skipToEol(): void { - while (!this.endOfStream && !this.isAtLineBreak()) { + while (!this.isEndOfStream() && !this.isAtLineBreak()) { this.moveNext(); } } public skipToWhitespace(): void { - while (!this.endOfStream && !this.isAtWhiteSpace()) { + while (!this.isEndOfStream() && !this.isAtWhiteSpace()) { this.moveNext(); } } public isAtString(): boolean { - return this.curChar === 0x22 || this.curChar === 0x27; + return this.currentChar === Char.SingleQuote || this.currentChar === Char.DoubleQuote; } public charCodeAt(index: number): number { @@ -120,15 +117,15 @@ export class CharacterStream implements ICharacterStream { } private checkBounds(): void { - if (this.pos < 0) { - this.pos = 0; + if (this._position < 0) { + this._position = 0; } - this.endOfStream = this.pos >= this.text.length; - if (this.endOfStream) { - this.pos = this.text.length; + this._isEndOfStream = this._position >= this.text.length; + if (this._isEndOfStream) { + this._position = this.text.length; } - this.curChar = this.endOfStream ? 0 : this.text.charCodeAt(this.pos); + this._currentChar = this._isEndOfStream ? 0 : this.text.charCodeAt(this._position); } } diff --git a/src/client/language/definitions.ts b/src/client/language/definitions.ts index a4f7a22b4da7..3e965b0b91bf 100644 --- a/src/client/language/definitions.ts +++ b/src/client/language/definitions.ts @@ -8,6 +8,8 @@ export interface ITextRange { } export class TextRange implements ITextRange { + public static readonly empty = TextRange.fromBounds(0, 0); + public readonly start: number; public readonly length: number; diff --git a/src/client/language/textIterator.ts b/src/client/language/textIterator.ts index 8af0e1caefda..927078da3939 100644 --- a/src/client/language/textIterator.ts +++ b/src/client/language/textIterator.ts @@ -11,8 +11,8 @@ export class TextIterator implements ITextIterator { } public charCodeAt(index: number): number { - if (index >= 0 && index < this.length) { - return this.text.charCodeAt[index]; + if (index >= 0 && index < this.text.length) { + return this.text.charCodeAt(index); } return 0; } diff --git a/src/test/.vscode/settings.json b/src/test/.vscode/settings.json index 2218e2cecd87..12cde5b9dc53 100644 --- a/src/test/.vscode/settings.json +++ b/src/test/.vscode/settings.json @@ -21,5 +21,6 @@ "python.linting.pydocstyleEnabled": false, "python.linting.pylamaEnabled": false, "python.linting.mypyEnabled": false, - "python.formatting.provider": "yapf" + "python.formatting.provider": "yapf", + "python.pythonPath": "python" } \ No newline at end of file diff --git a/src/test/index.ts b/src/test/index.ts index 0202b4e2dc43..acce5db2392a 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -8,6 +8,7 @@ testRunner.configure({ ui: 'tdd', useColors: true, timeout: 25000, - retries: 3 + retries: 3, + grep: 'Language.CharacterStream' } as {}); module.exports = testRunner; diff --git a/src/test/language/characterStream.test.ts b/src/test/language/characterStream.test.ts new file mode 100644 index 000000000000..d1c003d6b2d9 --- /dev/null +++ b/src/test/language/characterStream.test.ts @@ -0,0 +1,108 @@ +import * as assert from 'assert'; +// tslint:disable-next-line:import-name +import Char from 'typescript-char'; +import { CharacterStream } from '../../client/language/characterStream'; +import { ICharacterStream, TextRange } from '../../client/language/definitions'; +import { TextIterator } from '../../client/language/textIterator'; + +// tslint:disable-next-line:max-func-body-length +suite('Language.CharacterStream', () => { + test('Iteration (string)', async () => { + const content = 'some text'; + const cs = new CharacterStream(content); + testIteration(cs, content); + }); + test('Iteration (iterator)', async () => { + const content = 'some text'; + const cs = new CharacterStream(new TextIterator(content)); + testIteration(cs, content); + }); + test('Positioning', async () => { + const content = 'some text'; + const cs = new CharacterStream(content); + assert.equal(cs.position, 0); + cs.advance(1); + assert.equal(cs.position, 1); + cs.advance(1); + assert.equal(cs.position, 2); + cs.advance(2); + assert.equal(cs.position, 4); + cs.advance(-3); + assert.equal(cs.position, 1); + cs.advance(-3); + assert.equal(cs.position, 0); + cs.advance(100); + assert.equal(cs.position, content.length); + }); + test('Characters', async () => { + const content = 'some \ttext "" \' \' \n text \r\n more text'; + const cs = new CharacterStream(content); + for (let i = 0; i < content.length; i += 1) { + assert.equal(cs.currentChar, content.charCodeAt(i)); + + assert.equal(cs.nextChar, i < content.length - 1 ? content.charCodeAt(i + 1) : 0); + assert.equal(cs.prevChar, i > 0 ? content.charCodeAt(i - 1) : 0); + + assert.equal(cs.lookAhead(2), i < content.length - 2 ? content.charCodeAt(i + 2) : 0); + assert.equal(cs.lookAhead(-2), i > 1 ? content.charCodeAt(i - 2) : 0); + + const ch = content.charCodeAt(i); + const isLineBreak = ch === Char.LineFeed || ch === Char.CarriageReturn; + assert.equal(cs.isAtWhiteSpace(), ch === Char.Tab || ch === Char.Space || isLineBreak); + assert.equal(cs.isAtLineBreak(), isLineBreak); + assert.equal(cs.isAtString(), ch === Char.SingleQuote || ch === Char.DoubleQuote); + + cs.moveNext(); + } + }); + test('Skip', async () => { + const content = 'some \ttext "" \' \' \n text \r\n more text'; + const cs = new CharacterStream(content); + + cs.skipWhitespace(); + assert.equal(cs.position, 0); + + cs.skipToWhitespace(); + assert.equal(cs.position, 4); + + cs.skipToWhitespace(); + assert.equal(cs.position, 4); + + cs.skipWhitespace(); + assert.equal(cs.position, 6); + + cs.skipLineBreak(); + assert.equal(cs.position, 6); + + cs.skipToEol(); + assert.equal(cs.position, 18); + + cs.skipLineBreak(); + assert.equal(cs.position, 19); + }); +}); + +function testIteration(cs: ICharacterStream, content: string) { + assert.equal(cs.position, 0); + assert.equal(cs.length, content.length); + assert.equal(cs.isEndOfStream(), false); + + for (let i = -2; i < content.length + 2; i += 1) { + const ch = cs.charCodeAt(i); + if (i < 0 || i >= content.length) { + assert.equal(ch, 0); + } else { + assert.equal(ch, content.charCodeAt(i)); + } + } + + for (let i = 0; i < content.length; i += 1) { + assert.equal(cs.isEndOfStream(), false); + assert.equal(cs.position, i); + assert.equal(cs.currentChar, content.charCodeAt(i)); + cs.moveNext(); + } + + assert.equal(cs.isEndOfStream(), true); + assert.equal(cs.position, content.length); +} diff --git a/src/test/language/textIterator.test.ts b/src/test/language/textIterator.test.ts new file mode 100644 index 000000000000..dddfe3478ec5 --- /dev/null +++ b/src/test/language/textIterator.test.ts @@ -0,0 +1,24 @@ +import * as assert from 'assert'; +import { TextIterator } from '../../client/language/textIterator'; + +// tslint:disable-next-line:max-func-body-length +suite('Language.TextIterator', () => { + test('Construction', async () => { + const content = 'some text'; + const ti = new TextIterator(content); + assert.equal(ti.length, content.length); + assert.equal(ti.getText(), content); + }); + test('Iteration', async () => { + const content = 'some text'; + const ti = new TextIterator(content); + for (let i = -2; i < content.length + 2; i += 1) { + const ch = ti.charCodeAt(i); + if (i < 0 || i >= content.length) { + assert.equal(ch, 0); + } else { + assert.equal(ch, content.charCodeAt(i)); + } + } + }); +}); diff --git a/src/test/language/textRange.test.ts b/src/test/language/textRange.test.ts new file mode 100644 index 000000000000..4e0da8feb06c --- /dev/null +++ b/src/test/language/textRange.test.ts @@ -0,0 +1,52 @@ +import * as assert from 'assert'; +import { TextRange } from '../../client/language/definitions'; + +// tslint:disable-next-line:max-func-body-length +suite('Language.TextRange', () => { + test('Empty static', async () => { + const e = TextRange.empty; + assert.equal(e.start, 0); + assert.equal(e.end, 0); + assert.equal(e.length, 0); + }); + test('Construction', async () => { + let r = new TextRange(10, 20); + assert.equal(r.start, 10); + assert.equal(r.end, 30); + assert.equal(r.length, 20); + r = new TextRange(10, 0); + assert.equal(r.start, 10); + assert.equal(r.end, 10); + assert.equal(r.length, 0); + }); + test('From bounds', async () => { + let r = TextRange.fromBounds(7, 9); + assert.equal(r.start, 7); + assert.equal(r.end, 9); + assert.equal(r.length, 2); + + r = TextRange.fromBounds(5, 5); + assert.equal(r.start, 5); + assert.equal(r.end, 5); + assert.equal(r.length, 0); + }); + test('Contains', async () => { + const r = TextRange.fromBounds(7, 9); + assert.equal(r.contains(-1), false); + assert.equal(r.contains(6), false); + assert.equal(r.contains(7), true); + assert.equal(r.contains(8), true); + assert.equal(r.contains(9), false); + assert.equal(r.contains(10), false); + }); + test('Exceptions', async () => { + assert.throws( + () => { const e = new TextRange(0, -1); }, + Error + ); + assert.throws( + () => { const e = TextRange.fromBounds(3, 1); }, + Error + ); + }); +}); From c2c1ced6cf64be60ca808c269291af191df3b3b2 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 1 Dec 2017 16:07:28 -0800 Subject: [PATCH 004/103] Tests, round II --- src/client/language/definitions.ts | 3 +- src/client/language/tokenizer.ts | 13 +++++ src/client/providers/completionProvider.ts | 15 +++--- src/test/index.ts | 2 +- src/test/language/textRangeCollection.test.ts | 53 +++++++++++++++++++ src/test/language/tokenizer.test.ts | 39 ++++++++++++++ 6 files changed, 115 insertions(+), 10 deletions(-) create mode 100644 src/test/language/textRangeCollection.test.ts create mode 100644 src/test/language/tokenizer.test.ts diff --git a/src/client/language/definitions.ts b/src/client/language/definitions.ts index 3e965b0b91bf..d001adccbd88 100644 --- a/src/client/language/definitions.ts +++ b/src/client/language/definitions.ts @@ -70,7 +70,8 @@ export interface ICharacterStream extends ITextIterator { } export enum TokenType { - String + String, + Comment } export interface IToken extends ITextRange { diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index 5cb0d4e3e474..6d63f4168414 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -73,9 +73,22 @@ export class Tokenizer implements ITokenizer { this.handleString(quoteType); return true; } + switch (this.cs.currentChar) { + case Char.Hash: + this.handleComment(); + break; + default: + break; + } return false; } + private handleComment(): void { + const start = this.cs.position; + this.cs.skipToEol(); + this.tokens.push(new Token(TokenType.Comment, start, this.cs.position - start)); + } + private getQuoteType(): QuoteType { if (this.cs.currentChar === Char.SingleQuote) { return this.cs.nextChar === Char.SingleQuote && this.cs.lookAhead(2) === Char.SingleQuote diff --git a/src/client/providers/completionProvider.ts b/src/client/providers/completionProvider.ts index b097b1633a9e..10c930348e33 100644 --- a/src/client/providers/completionProvider.ts +++ b/src/client/providers/completionProvider.ts @@ -3,6 +3,7 @@ import * as vscode from 'vscode'; import { Position, ProviderResult, SnippetString, Uri } from 'vscode'; import { PythonSettings } from '../common/configSettings'; +import { TokenType } from '../language/definitions'; import { Tokenizer } from '../language/tokenizer'; import { JediFactory } from '../languageServices/jediProxyFactory'; import { captureTelemetry } from '../telemetry'; @@ -43,12 +44,8 @@ export class PythonCompletionItemProvider implements vscode.CompletionItemProvid if (lineText.match(/^\s*\/\//)) { return Promise.resolve([]); } - // If starts with a comment, then return - if (lineText.trim().startsWith('#')) { - return Promise.resolve([]); - } - // If starts with a """ (possible doc string), then return - if (this.isPositionInsideString(document, position)) { + // Suppress completion inside string and comments + if (this.isPositionInsideStringOrComment(document, position)) { return Promise.resolve([]); } const type = proxy.CommandType.Completions; @@ -68,10 +65,12 @@ export class PythonCompletionItemProvider implements vscode.CompletionItemProvid }); } - private isPositionInsideString(document: vscode.TextDocument, position: vscode.Position): boolean { + private isPositionInsideStringOrComment(document: vscode.TextDocument, position: vscode.Position): boolean { const tokenizeTo = position.translate(1, 0); const text = document.getText(new vscode.Range(new Position(0, 0), tokenizeTo)); const t = new Tokenizer(); - return t.Tokenize(text).getItemContaining(document.offsetAt(position)) >= 0; + const tokens = t.Tokenize(text); + const index = tokens.getItemContaining(document.offsetAt(position)); + return index >= 0 && (tokens[index].TokenType === TokenType.String || tokens[index].TokenType === TokenType.Comment); } } diff --git a/src/test/index.ts b/src/test/index.ts index acce5db2392a..6480c37439b6 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -9,6 +9,6 @@ testRunner.configure({ useColors: true, timeout: 25000, retries: 3, - grep: 'Language.CharacterStream' + grep: 'Language.Tokenizer' } as {}); module.exports = testRunner; diff --git a/src/test/language/textRangeCollection.test.ts b/src/test/language/textRangeCollection.test.ts new file mode 100644 index 000000000000..5b5dfad8c8b6 --- /dev/null +++ b/src/test/language/textRangeCollection.test.ts @@ -0,0 +1,53 @@ +import * as assert from 'assert'; +import { TextRange } from '../../client/language/definitions'; +import { TextRangeCollection } from '../../client/language/textRangeCollection'; + +// tslint:disable-next-line:max-func-body-length +suite('Language.TextRangeCollection', () => { + test('Empty', async () => { + const items: TextRange[] = []; + const c = new TextRangeCollection(items); + assert.equal(c.start, 0); + assert.equal(c.end, 0); + assert.equal(c.length, 0); + assert.equal(c.count, 0); + }); + test('Basic', async () => { + const items: TextRange[] = []; + items.push(new TextRange(2, 1)); + items.push(new TextRange(4, 2)); + const c = new TextRangeCollection(items); + assert.equal(c.start, 2); + assert.equal(c.end, 6); + assert.equal(c.length, 4); + assert.equal(c.count, 2); + + assert.equal(c.getItemAt(0).start, 2); + assert.equal(c.getItemAt(0).length, 1); + + assert.equal(c.getItemAt(1).start, 4); + assert.equal(c.getItemAt(1).length, 2); + }); + test('Contains position', async () => { + const items: TextRange[] = []; + items.push(new TextRange(2, 1)); + items.push(new TextRange(4, 2)); + const c = new TextRangeCollection(items); + const results = [-1, -1, 0, -1, 1, 1, -1]; + for (let i = 0; i < results.length; i += 1) { + const index = c.getItemContaining(i); + assert.equal(index, results[i]); + } + }); + test('Item at position', async () => { + const items: TextRange[] = []; + items.push(new TextRange(2, 1)); + items.push(new TextRange(4, 2)); + const c = new TextRangeCollection(items); + const results = [-1, -1, 0, -1, 1, 1, -1]; + for (let i = 0; i < results.length; i += 1) { + const index = c.getItemAtPosition(i); + assert.equal(index, results[i]); + } + }); +}); diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts new file mode 100644 index 000000000000..29874f75c26a --- /dev/null +++ b/src/test/language/tokenizer.test.ts @@ -0,0 +1,39 @@ +import * as assert from 'assert'; +import { TextRange, TokenType } from '../../client/language/definitions'; +import { TextRangeCollection } from '../../client/language/textRangeCollection'; +import { Tokenizer } from '../../client/language/tokenizer'; + +// tslint:disable-next-line:max-func-body-length +suite('Language.Tokenizer', () => { + test('Empty', async () => { + const t = new Tokenizer(); + const tokens = t.Tokenize(''); + assert.equal(tokens instanceof TextRangeCollection, true); + assert.equal(tokens.count, 0); + assert.equal(tokens.length, 0); + }); + test('Strings', async () => { + const t = new Tokenizer(); + const tokens = t.Tokenize(' "string" """line1\n#line2"""\t\'un#closed'); + assert.equal(tokens.count, 3); + + const ranges = [1, 8, 10, 18, 29, 10]; + for (let i = 0; i < ranges.length / 2; i += 2) { + assert.equal(tokens.getItemAt(i).start, ranges[i]); + assert.equal(tokens.getItemAt(i).length, ranges[i + 1]); + assert.equal(tokens.getItemAt(i).type, TokenType.String); + } + }); + test('Comments', async () => { + const t = new Tokenizer(); + const tokens = t.Tokenize(' #co"""mment1\n\t\n#comm\'ent2 '); + assert.equal(tokens.count, 2); + + const ranges = [1, 12, 15, 11]; + for (let i = 0; i < ranges.length / 2; i += 2) { + assert.equal(tokens.getItemAt(i).start, ranges[i]); + assert.equal(tokens.getItemAt(i).length, ranges[i + 1]); + assert.equal(tokens.getItemAt(i).type, TokenType.Comment); + } + }); +}); From 14864a580d0affdc9f8fda616fe20e2dbdbb2df9 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Sun, 3 Dec 2017 16:42:46 -0800 Subject: [PATCH 005/103] tokenizer test --- src/test/language/tokenizer.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 29874f75c26a..1ce0165e4241 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -18,9 +18,9 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.count, 3); const ranges = [1, 8, 10, 18, 29, 10]; - for (let i = 0; i < ranges.length / 2; i += 2) { - assert.equal(tokens.getItemAt(i).start, ranges[i]); - assert.equal(tokens.getItemAt(i).length, ranges[i + 1]); + for (let i = 0; i < tokens.count; i += 1) { + assert.equal(tokens.getItemAt(i).start, ranges[2 * i]); + assert.equal(tokens.getItemAt(i).length, ranges[2 * i + 1]); assert.equal(tokens.getItemAt(i).type, TokenType.String); } }); From 0ed51d64ee215356c7e5665c3b642c386e997f2d Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Sun, 3 Dec 2017 16:46:19 -0800 Subject: [PATCH 006/103] Remove temorary change --- src/test/index.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/test/index.ts b/src/test/index.ts index 6480c37439b6..0202b4e2dc43 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -8,7 +8,6 @@ testRunner.configure({ ui: 'tdd', useColors: true, timeout: 25000, - retries: 3, - grep: 'Language.Tokenizer' + retries: 3 } as {}); module.exports = testRunner; From 51b544ca9df3515857f6d07411b7bf36bbb415d0 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Sun, 3 Dec 2017 17:08:35 -0800 Subject: [PATCH 007/103] Fix merge issue --- package.json | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/package.json b/package.json index 17145aa47ed6..e910867b6620 100644 --- a/package.json +++ b/package.json @@ -1529,7 +1529,7 @@ "fuzzy": "^0.1.3", "get-port": "^3.2.0", "iconv-lite": "^0.4.19", - "inversify": "^4.5.1", + "inversify": "^4.5.2", "line-by-line": "^0.1.5", "lodash": "^4.17.4", "minimatch": "^3.0.3", @@ -1583,10 +1583,5 @@ "typescript": "^2.5.2", "typescript-formatter": "^6.0.0", "vscode": "^1.1.5" - }, - "__metadata": { - "id": "f1f59ae4-9318-4f3c-a9b5-81b2eaa5f8a5", - "publisherDisplayName": "Microsoft", - "publisherId": "998b010b-e2af-44a5-a6cd-0b5fd3b9b6f8" } } \ No newline at end of file From 3cd11e649febd2cc750f9c8bd238a7f9e0a222d5 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Sun, 3 Dec 2017 17:18:09 -0800 Subject: [PATCH 008/103] Merge conflict --- .vscode/settings.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 1884ba9a0255..de5f2d58fde1 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -17,6 +17,5 @@ "python.linting.enabled": false, "python.formatting.formatOnSave": false, "python.unitTest.promptToConfigure": false, - "python.workspaceSymbols.enabled": false, - "python.formatting.provider": "yapf" + "python.workspaceSymbols.enabled": false } From 82e0ad16d200cfdb2ba4be1270305a882015ac9b Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Sun, 3 Dec 2017 17:25:50 -0800 Subject: [PATCH 009/103] Merge conflict --- .vscode/settings.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index de5f2d58fde1..1884ba9a0255 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -17,5 +17,6 @@ "python.linting.enabled": false, "python.formatting.formatOnSave": false, "python.unitTest.promptToConfigure": false, - "python.workspaceSymbols.enabled": false + "python.workspaceSymbols.enabled": false, + "python.formatting.provider": "yapf" } From 9295c1ab90d1a328351b771d35e5c99103d32ecb Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Mon, 4 Dec 2017 11:23:18 -0800 Subject: [PATCH 010/103] Completion test --- package-lock.json | 5 +++ package.json | 3 +- src/test/autocomplete/base.test.ts | 53 +++++++++++++++++++---- src/test/pythonFiles/autocomp/suppress.py | 6 +++ 4 files changed, 58 insertions(+), 9 deletions(-) create mode 100644 src/test/pythonFiles/autocomp/suppress.py diff --git a/package-lock.json b/package-lock.json index 10e608b1634d..50cf5f0c7820 100644 --- a/package-lock.json +++ b/package-lock.json @@ -5457,6 +5457,11 @@ "integrity": "sha1-PFtv1/beCRQmkCfwPAlGdY92c6Q=", "dev": true }, + "typescript-char": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/typescript-char/-/typescript-char-0.0.0.tgz", + "integrity": "sha1-VY/tpzfHZaYQtzfu+7F3Xum8jas=" + }, "typescript-formatter": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/typescript-formatter/-/typescript-formatter-6.1.0.tgz", diff --git a/package.json b/package.json index e910867b6620..23dc208e4ccf 100644 --- a/package.json +++ b/package.json @@ -1539,6 +1539,7 @@ "semver": "^5.4.1", "tmp": "0.0.29", "tree-kill": "^1.1.0", + "typescript-char": "^0.0.0", "uint64be": "^1.0.1", "untildify": "^3.0.2", "vscode-debugadapter": "^1.0.1", @@ -1584,4 +1585,4 @@ "typescript-formatter": "^6.0.0", "vscode": "^1.1.5" } -} \ No newline at end of file +} diff --git a/src/test/autocomplete/base.test.ts b/src/test/autocomplete/base.test.ts index a5398b7beb43..59a7e51c14f4 100644 --- a/src/test/autocomplete/base.test.ts +++ b/src/test/autocomplete/base.test.ts @@ -1,19 +1,18 @@ // Note: This example test is leveraging the Mocha test framework. // Please refer to their documentation on https://mochajs.org/ for help. - // The module 'assert' provides assertion methods from node import * as assert from 'assert'; import { EOL } from 'os'; +import * as path from 'path'; // You can import and use all API from the 'vscode' module // as well as import your extension to test it import * as vscode from 'vscode'; -import * as path from 'path'; import * as settings from '../../client/common/configSettings'; -import { initialize, closeActiveWindows, initializeTest } from '../initialize'; -import { execPythonFile } from '../../client/common/utils'; import { PythonSettings } from '../../client/common/configSettings'; +import { execPythonFile } from '../../client/common/utils'; import { rootWorkspaceUri } from '../common'; +import { closeActiveWindows, initialize, initializeTest } from '../initialize'; const autoCompPath = path.join(__dirname, '..', '..', '..', 'src', 'test', 'pythonFiles', 'autocomp'); const fileOne = path.join(autoCompPath, 'one.py'); @@ -23,7 +22,9 @@ const fileLambda = path.join(autoCompPath, 'lamb.py'); const fileDecorator = path.join(autoCompPath, 'deco.py'); const fileEncoding = path.join(autoCompPath, 'four.py'); const fileEncodingUsed = path.join(autoCompPath, 'five.py'); +const fileSuppress = path.join(autoCompPath, 'suppress.py'); +// tslint:disable-next-line:max-func-body-length suite('Autocomplete', () => { let isPython3: Promise; suiteSetup(async () => { @@ -31,9 +32,9 @@ suite('Autocomplete', () => { const version = await execPythonFile(rootWorkspaceUri, PythonSettings.getInstance(rootWorkspaceUri).pythonPath, ['--version'], __dirname, true); isPython3 = Promise.resolve(version.indexOf('3.') >= 0); }); - setup(() => initializeTest()); - suiteTeardown(() => closeActiveWindows()); - teardown(() => closeActiveWindows()); + setup(initializeTest); + suiteTeardown(closeActiveWindows); + teardown(closeActiveWindows); test('For "sys."', done => { let textEditor: vscode.TextEditor; @@ -115,7 +116,7 @@ suite('Autocomplete', () => { const position = new vscode.Position(10, 9); const list = await vscode.commands.executeCommand('vscode.executeCompletionItemProvider', textDocument.uri, position); assert.notEqual(list.items.filter(item => item.label === 'sleep').length, 0, 'sleep not found'); - assert.notEqual(list.items.filter(item => item.documentation.toString().startsWith("Delay execution for a given number of seconds. The argument may be")).length, 0, 'Documentation incorrect'); + assert.notEqual(list.items.filter(item => item.documentation.toString().startsWith('Delay execution for a given number of seconds. The argument may be')).length, 0, 'Documentation incorrect'); }); test('For custom class', done => { @@ -173,4 +174,40 @@ suite('Autocomplete', () => { assert.equal(list.items.filter(item => item.label === 'showMessage')[0].documentation, documentation, 'showMessage unicode documentation is incorrect'); }).then(done, done); }); + + // https://github.com/Microsoft/vscode-python/issues/110 + test('Suppress in strings/comments', done => { + let textEditor: vscode.TextEditor; + let textDocument: vscode.TextDocument; + const positions = [ + new vscode.Position(0, 1), // false + new vscode.Position(0, 9), // true + new vscode.Position(0, 12), // false + new vscode.Position(1, 1), // false + new vscode.Position(1, 3), // false + new vscode.Position(2, 7), // false + new vscode.Position(3, 0), // false + new vscode.Position(4, 2), // false + new vscode.Position(4, 8), // false + new vscode.Position(5, 4) // false + ]; + const expected = [ + false, true, false, false, false, false, false, false, false, false + ]; + vscode.workspace.openTextDocument(fileSuppress).then(document => { + textDocument = document; + return vscode.window.showTextDocument(textDocument); + }).then(editor => { + assert(vscode.window.activeTextEditor, 'No active editor'); + textEditor = editor; + for (let i = 0; i < positions.length; i += 1) { + vscode.commands.executeCommand('vscode.executeCompletionItemProvider', + textDocument.uri, positions[i]).then(list => { + const result = list.items.filter(item => item.label === 'abs').length; + assert.equal(result > 0, expected[i], + `Expected ${expected[i]} at position ${positions[i].line}:${positions[i].character} but got ${result}`); + }); + } + }).then(done, done); + }); }); diff --git a/src/test/pythonFiles/autocomp/suppress.py b/src/test/pythonFiles/autocomp/suppress.py new file mode 100644 index 000000000000..9f74959ef14b --- /dev/null +++ b/src/test/pythonFiles/autocomp/suppress.py @@ -0,0 +1,6 @@ +"string" #comment +""" +content +""" +#comment +'un#closed From 06eb1a56049bdbcb9db71c550d9cbd869a4fce63 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Mon, 4 Dec 2017 11:26:07 -0800 Subject: [PATCH 011/103] Fix last line --- .vscode/launch.json | 2 +- .vscode/tasks.json | 2 +- gulpfile.js | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index a69c3396ff4e..51590d047be8 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -77,4 +77,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/.vscode/tasks.json b/.vscode/tasks.json index ccf99a2c6f20..8a17b7da905f 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -72,4 +72,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/gulpfile.js b/gulpfile.js index 6c3f7819d003..ecf4dd1d5bca 100644 --- a/gulpfile.js +++ b/gulpfile.js @@ -454,4 +454,4 @@ class Deferred { get completed() { return this._rejected || this._resolved; } -} \ No newline at end of file +} From e9db8e0de99936daef098000181c44db517c0d8c Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Mon, 4 Dec 2017 12:47:05 -0800 Subject: [PATCH 012/103] Fix javascript math --- src/client/language/textRangeCollection.ts | 4 ++-- src/test/index.ts | 3 ++- src/test/language/textRangeCollection.test.ts | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/client/language/textRangeCollection.ts b/src/client/language/textRangeCollection.ts index 0464dc945382..d09becea902f 100644 --- a/src/client/language/textRangeCollection.ts +++ b/src/client/language/textRangeCollection.ts @@ -51,7 +51,7 @@ export class TextRangeCollection implements ITextRangeColl let max = this.count - 1; while (min <= max) { - const mid = min + (max - min) / 2; + const mid = Math.floor(min + (max - min) / 2); const item = this.items[mid]; if (item.start === position) { @@ -82,7 +82,7 @@ export class TextRangeCollection implements ITextRangeColl let max = this.count - 1; while (min <= max) { - const mid = min + (max - min) / 2; + const mid = Math.floor(min + (max - min) / 2); const item = this.items[mid]; if (item.contains(position)) { diff --git a/src/test/index.ts b/src/test/index.ts index 0202b4e2dc43..9eb083463586 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -8,6 +8,7 @@ testRunner.configure({ ui: 'tdd', useColors: true, timeout: 25000, - retries: 3 + retries: 3, + grep: "Language.TextRangeCollection" } as {}); module.exports = testRunner; diff --git a/src/test/language/textRangeCollection.test.ts b/src/test/language/textRangeCollection.test.ts index 5b5dfad8c8b6..44666dd811ce 100644 --- a/src/test/language/textRangeCollection.test.ts +++ b/src/test/language/textRangeCollection.test.ts @@ -44,7 +44,7 @@ suite('Language.TextRangeCollection', () => { items.push(new TextRange(2, 1)); items.push(new TextRange(4, 2)); const c = new TextRangeCollection(items); - const results = [-1, -1, 0, -1, 1, 1, -1]; + const results = [-1, -1, 0, -1, 1, -1, -1]; for (let i = 0; i < results.length; i += 1) { const index = c.getItemAtPosition(i); assert.equal(index, results[i]); From d8ab041f3f8fb459a37bd3adc10de82fadfd3c7b Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 5 Dec 2017 10:07:55 -0800 Subject: [PATCH 013/103] Make test await for results --- src/test/autocomplete/base.test.ts | 27 +++++++++------------------ src/test/index.ts | 3 +-- 2 files changed, 10 insertions(+), 20 deletions(-) diff --git a/src/test/autocomplete/base.test.ts b/src/test/autocomplete/base.test.ts index 59a7e51c14f4..e4b8b854570d 100644 --- a/src/test/autocomplete/base.test.ts +++ b/src/test/autocomplete/base.test.ts @@ -176,9 +176,7 @@ suite('Autocomplete', () => { }); // https://github.com/Microsoft/vscode-python/issues/110 - test('Suppress in strings/comments', done => { - let textEditor: vscode.TextEditor; - let textDocument: vscode.TextDocument; + test('Suppress in strings/comments', async () => { const positions = [ new vscode.Position(0, 1), // false new vscode.Position(0, 9), // true @@ -194,20 +192,13 @@ suite('Autocomplete', () => { const expected = [ false, true, false, false, false, false, false, false, false, false ]; - vscode.workspace.openTextDocument(fileSuppress).then(document => { - textDocument = document; - return vscode.window.showTextDocument(textDocument); - }).then(editor => { - assert(vscode.window.activeTextEditor, 'No active editor'); - textEditor = editor; - for (let i = 0; i < positions.length; i += 1) { - vscode.commands.executeCommand('vscode.executeCompletionItemProvider', - textDocument.uri, positions[i]).then(list => { - const result = list.items.filter(item => item.label === 'abs').length; - assert.equal(result > 0, expected[i], - `Expected ${expected[i]} at position ${positions[i].line}:${positions[i].character} but got ${result}`); - }); - } - }).then(done, done); + const textDocument = await vscode.workspace.openTextDocument(fileSuppress); + await vscode.window.showTextDocument(textDocument); + for (let i = 0; i < positions.length; i += 1) { + const list = await vscode.commands.executeCommand('vscode.executeCompletionItemProvider', textDocument.uri, positions[i]); + const result = list.items.filter(item => item.label === 'abs').length; + assert.equal(result > 0, expected[i], + `Expected ${expected[i]} at position ${positions[i].line}:${positions[i].character} but got ${result}`); + } }); }); diff --git a/src/test/index.ts b/src/test/index.ts index 9eb083463586..0202b4e2dc43 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -8,7 +8,6 @@ testRunner.configure({ ui: 'tdd', useColors: true, timeout: 25000, - retries: 3, - grep: "Language.TextRangeCollection" + retries: 3 } as {}); module.exports = testRunner; From db75cd00e892d8721c92df14b7b77ca92d5c2a0a Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 5 Dec 2017 10:22:37 -0800 Subject: [PATCH 014/103] Add license headers --- src/client/language/characterStream.ts | 2 ++ src/test/autocomplete/base.test.ts | 3 +++ src/test/language/characterStream.test.ts | 4 ++++ src/test/language/textIterator.test.ts | 4 ++++ src/test/language/textRange.test.ts | 4 ++++ src/test/language/textRangeCollection.test.ts | 4 ++++ src/test/language/tokenizer.test.ts | 4 ++++ 7 files changed, 25 insertions(+) diff --git a/src/client/language/characterStream.ts b/src/client/language/characterStream.ts index e90ef7f2e6b7..a4da08659a9d 100644 --- a/src/client/language/characterStream.ts +++ b/src/client/language/characterStream.ts @@ -1,3 +1,5 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. 'use strict'; // tslint:disable-next-line:import-name diff --git a/src/test/autocomplete/base.test.ts b/src/test/autocomplete/base.test.ts index e4b8b854570d..1873f86f0776 100644 --- a/src/test/autocomplete/base.test.ts +++ b/src/test/autocomplete/base.test.ts @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + // Note: This example test is leveraging the Mocha test framework. // Please refer to their documentation on https://mochajs.org/ for help. diff --git a/src/test/language/characterStream.test.ts b/src/test/language/characterStream.test.ts index d1c003d6b2d9..165fdde51ae9 100644 --- a/src/test/language/characterStream.test.ts +++ b/src/test/language/characterStream.test.ts @@ -1,3 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +'use strict'; + import * as assert from 'assert'; // tslint:disable-next-line:import-name import Char from 'typescript-char'; diff --git a/src/test/language/textIterator.test.ts b/src/test/language/textIterator.test.ts index dddfe3478ec5..34daa81534cd 100644 --- a/src/test/language/textIterator.test.ts +++ b/src/test/language/textIterator.test.ts @@ -1,3 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +'use strict'; + import * as assert from 'assert'; import { TextIterator } from '../../client/language/textIterator'; diff --git a/src/test/language/textRange.test.ts b/src/test/language/textRange.test.ts index 4e0da8feb06c..fecf287032a0 100644 --- a/src/test/language/textRange.test.ts +++ b/src/test/language/textRange.test.ts @@ -1,3 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +'use strict'; + import * as assert from 'assert'; import { TextRange } from '../../client/language/definitions'; diff --git a/src/test/language/textRangeCollection.test.ts b/src/test/language/textRangeCollection.test.ts index 44666dd811ce..5c56c3f139c7 100644 --- a/src/test/language/textRangeCollection.test.ts +++ b/src/test/language/textRangeCollection.test.ts @@ -1,3 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +'use strict'; + import * as assert from 'assert'; import { TextRange } from '../../client/language/definitions'; import { TextRangeCollection } from '../../client/language/textRangeCollection'; diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 1ce0165e4241..139441aeea81 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -1,3 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +'use strict'; + import * as assert from 'assert'; import { TextRange, TokenType } from '../../client/language/definitions'; import { TextRangeCollection } from '../../client/language/textRangeCollection'; From 9ab2c47a609629570fc387e3b3697cd60e8397e4 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 5 Dec 2017 10:35:58 -0800 Subject: [PATCH 015/103] Rename definitions to types --- src/client/language/characterStream.ts | 2 +- src/client/language/textIterator.ts | 2 +- src/client/language/textRangeCollection.ts | 2 +- src/client/language/tokenizer.ts | 2 +- src/client/language/{definitions.ts => types.ts} | 0 src/client/providers/completionProvider.ts | 2 +- src/test/language/characterStream.test.ts | 2 +- src/test/language/textRange.test.ts | 2 +- src/test/language/textRangeCollection.test.ts | 2 +- src/test/language/tokenizer.test.ts | 2 +- 10 files changed, 9 insertions(+), 9 deletions(-) rename src/client/language/{definitions.ts => types.ts} (100%) diff --git a/src/client/language/characterStream.ts b/src/client/language/characterStream.ts index a4da08659a9d..a95af7ede457 100644 --- a/src/client/language/characterStream.ts +++ b/src/client/language/characterStream.ts @@ -4,8 +4,8 @@ // tslint:disable-next-line:import-name import Char from 'typescript-char'; -import { ICharacterStream, ITextIterator } from './definitions'; import { TextIterator } from './textIterator'; +import { ICharacterStream, ITextIterator } from './types'; export class CharacterStream implements ICharacterStream { private text: ITextIterator; diff --git a/src/client/language/textIterator.ts b/src/client/language/textIterator.ts index 927078da3939..3984dfbe3458 100644 --- a/src/client/language/textIterator.ts +++ b/src/client/language/textIterator.ts @@ -1,7 +1,7 @@ 'use strict'; import { Position, Range, TextDocument } from 'vscode'; -import { ITextIterator } from './definitions'; +import { ITextIterator } from './types'; export class TextIterator implements ITextIterator { private text: string; diff --git a/src/client/language/textRangeCollection.ts b/src/client/language/textRangeCollection.ts index d09becea902f..47983f8fe0cb 100644 --- a/src/client/language/textRangeCollection.ts +++ b/src/client/language/textRangeCollection.ts @@ -1,6 +1,6 @@ 'use strict'; -import { ITextRange, ITextRangeCollection } from './definitions'; +import { ITextRange, ITextRangeCollection } from './types'; export class TextRangeCollection implements ITextRangeCollection { private items: T[]; diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index 6d63f4168414..8b122da7c346 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -3,8 +3,8 @@ // tslint:disable-next-line:import-name import Char from 'typescript-char'; import { CharacterStream } from './characterStream'; -import { ICharacterStream, ITextRangeCollection, IToken, ITokenizer, TextRange, TokenType } from './definitions'; import { TextRangeCollection } from './textRangeCollection'; +import { ICharacterStream, ITextRangeCollection, IToken, ITokenizer, TextRange, TokenType } from './types'; enum QuoteType { None, diff --git a/src/client/language/definitions.ts b/src/client/language/types.ts similarity index 100% rename from src/client/language/definitions.ts rename to src/client/language/types.ts diff --git a/src/client/providers/completionProvider.ts b/src/client/providers/completionProvider.ts index 10c930348e33..b6d62c340dd7 100644 --- a/src/client/providers/completionProvider.ts +++ b/src/client/providers/completionProvider.ts @@ -3,8 +3,8 @@ import * as vscode from 'vscode'; import { Position, ProviderResult, SnippetString, Uri } from 'vscode'; import { PythonSettings } from '../common/configSettings'; -import { TokenType } from '../language/definitions'; import { Tokenizer } from '../language/tokenizer'; +import { TokenType } from '../language/types'; import { JediFactory } from '../languageServices/jediProxyFactory'; import { captureTelemetry } from '../telemetry'; import { COMPLETION } from '../telemetry/constants'; diff --git a/src/test/language/characterStream.test.ts b/src/test/language/characterStream.test.ts index 165fdde51ae9..63ea71f01746 100644 --- a/src/test/language/characterStream.test.ts +++ b/src/test/language/characterStream.test.ts @@ -6,8 +6,8 @@ import * as assert from 'assert'; // tslint:disable-next-line:import-name import Char from 'typescript-char'; import { CharacterStream } from '../../client/language/characterStream'; -import { ICharacterStream, TextRange } from '../../client/language/definitions'; import { TextIterator } from '../../client/language/textIterator'; +import { ICharacterStream, TextRange } from '../../client/language/types'; // tslint:disable-next-line:max-func-body-length suite('Language.CharacterStream', () => { diff --git a/src/test/language/textRange.test.ts b/src/test/language/textRange.test.ts index fecf287032a0..02cad753c16f 100644 --- a/src/test/language/textRange.test.ts +++ b/src/test/language/textRange.test.ts @@ -3,7 +3,7 @@ 'use strict'; import * as assert from 'assert'; -import { TextRange } from '../../client/language/definitions'; +import { TextRange } from '../../client/language/types'; // tslint:disable-next-line:max-func-body-length suite('Language.TextRange', () => { diff --git a/src/test/language/textRangeCollection.test.ts b/src/test/language/textRangeCollection.test.ts index 5c56c3f139c7..32522e63c778 100644 --- a/src/test/language/textRangeCollection.test.ts +++ b/src/test/language/textRangeCollection.test.ts @@ -3,8 +3,8 @@ 'use strict'; import * as assert from 'assert'; -import { TextRange } from '../../client/language/definitions'; import { TextRangeCollection } from '../../client/language/textRangeCollection'; +import { TextRange } from '../../client/language/types'; // tslint:disable-next-line:max-func-body-length suite('Language.TextRangeCollection', () => { diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 139441aeea81..7642b88acfaa 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -3,9 +3,9 @@ 'use strict'; import * as assert from 'assert'; -import { TextRange, TokenType } from '../../client/language/definitions'; import { TextRangeCollection } from '../../client/language/textRangeCollection'; import { Tokenizer } from '../../client/language/tokenizer'; +import { TextRange, TokenType } from '../../client/language/types'; // tslint:disable-next-line:max-func-body-length suite('Language.Tokenizer', () => { From d587485696c15d2a5dec35fdc9f317dfce3b9a39 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 5 Dec 2017 12:02:27 -0800 Subject: [PATCH 016/103] License headers --- src/client/language/textIterator.ts | 2 ++ src/client/language/textRangeCollection.ts | 2 ++ src/client/language/tokenizer.ts | 2 ++ src/client/language/types.ts | 2 ++ 4 files changed, 8 insertions(+) diff --git a/src/client/language/textIterator.ts b/src/client/language/textIterator.ts index 3984dfbe3458..d5eda4783e2c 100644 --- a/src/client/language/textIterator.ts +++ b/src/client/language/textIterator.ts @@ -1,3 +1,5 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. 'use strict'; import { Position, Range, TextDocument } from 'vscode'; diff --git a/src/client/language/textRangeCollection.ts b/src/client/language/textRangeCollection.ts index 47983f8fe0cb..8ce5a744c9a6 100644 --- a/src/client/language/textRangeCollection.ts +++ b/src/client/language/textRangeCollection.ts @@ -1,3 +1,5 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. 'use strict'; import { ITextRange, ITextRangeCollection } from './types'; diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index 8b122da7c346..60d9fadc7e2e 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -1,3 +1,5 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. 'use strict'; // tslint:disable-next-line:import-name diff --git a/src/client/language/types.ts b/src/client/language/types.ts index d001adccbd88..121ee682c085 100644 --- a/src/client/language/types.ts +++ b/src/client/language/types.ts @@ -1,3 +1,5 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. 'use strict'; export interface ITextRange { From 1ac4932c51f35f95aaf4067b40155d5b18ad3f49 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 11 Dec 2017 14:57:37 -0800 Subject: [PATCH 017/103] Fix typo in completion details (typo) --- src/client/providers/itemInfoSource.ts | 2 +- src/test/index.ts | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index 9dc906e23580..152bf10cc48f 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -117,7 +117,7 @@ export class ItemInfoSource { } const descriptionWithHighlightedCode = this.highlightCode(dnd[1]); - const tooltip = new vscode.MarkdownString(['y```python', signature, '```', descriptionWithHighlightedCode].join(EOL)); + const tooltip = new vscode.MarkdownString(['```python', signature, '```', descriptionWithHighlightedCode].join(EOL)); infos.push(new LanguageItemInfo(tooltip, dnd[0], new vscode.MarkdownString(dnd[1]))); const key = signature + lines.join(''); diff --git a/src/test/index.ts b/src/test/index.ts index eebaa9b59c8d..4d3b12a351ca 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -12,8 +12,7 @@ const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, timeout: 25000, - retries: 3, - grep: 'Autocomplete' + retries: 3 }; testRunner.configure(options); module.exports = testRunner; From 2aa5a6c32b0e1c8df853f65acb7ceb27bd519f78 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 11 Dec 2017 17:06:59 -0800 Subject: [PATCH 018/103] Fix hover test --- src/client/providers/itemInfoSource.ts | 2 +- src/test/definitions/hover.test.ts | 70 ++++++++++++++------------ 2 files changed, 40 insertions(+), 32 deletions(-) diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index 152bf10cc48f..3cb471959bac 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -116,7 +116,7 @@ export class ItemInfoSource { lines.shift(); } - const descriptionWithHighlightedCode = this.highlightCode(dnd[1]); + const descriptionWithHighlightedCode = this.highlightCode(lines.join(EOL)); const tooltip = new vscode.MarkdownString(['```python', signature, '```', descriptionWithHighlightedCode].join(EOL)); infos.push(new LanguageItemInfo(tooltip, dnd[0], new vscode.MarkdownString(dnd[1]))); diff --git a/src/test/definitions/hover.test.ts b/src/test/definitions/hover.test.ts index 07f06b8843ca..adc7832b80fc 100644 --- a/src/test/definitions/hover.test.ts +++ b/src/test/definitions/hover.test.ts @@ -1,15 +1,14 @@ // Note: This example test is leveraging the Mocha test framework. // Please refer to their documentation on https://mochajs.org/ for help. - // The module 'assert' provides assertion methods from node import * as assert from 'assert'; import { EOL } from 'os'; // You can import and use all API from the 'vscode' module // as well as import your extension to test it -import * as vscode from 'vscode'; import * as path from 'path'; -import { initialize, closeActiveWindows, initializeTest } from '../initialize'; +import * as vscode from 'vscode'; +import { closeActiveWindows, initialize, initializeTest } from '../initialize'; import { normalizeMarkedString } from '../textUtils'; const autoCompPath = path.join(__dirname, '..', '..', '..', 'src', 'test', 'pythonFiles', 'autocomp'); @@ -21,11 +20,12 @@ const fileEncodingUsed = path.join(autoCompPath, 'five.py'); const fileHover = path.join(autoCompPath, 'hoverTest.py'); const fileStringFormat = path.join(hoverPath, 'stringFormat.py'); +// tslint:disable-next-line:max-func-body-length suite('Hover Definition', () => { - suiteSetup(() => initialize()); - setup(() => initializeTest()); - suiteTeardown(() => closeActiveWindows()); - teardown(() => closeActiveWindows()); + suiteSetup(initialize); + setup(initializeTest); + suiteTeardown(closeActiveWindows); + teardown(closeActiveWindows); test('Method', done => { let textEditor: vscode.TextEditor; @@ -43,6 +43,7 @@ suite('Hover Definition', () => { assert.equal(`${def[0].range.start.line},${def[0].range.start.character}`, '30,4', 'Start position is incorrect'); assert.equal(`${def[0].range.end.line},${def[0].range.end.character}`, '30,11', 'End position is incorrect'); assert.equal(def[0].contents.length, 1, 'Invalid content items'); + // tslint:disable-next-line:prefer-template const expectedContent = '```python' + EOL + 'def method1()' + EOL + '```' + EOL + 'This is method1'; assert.equal(normalizeMarkedString(def[0].contents[0]), expectedContent, 'function signature incorrect'); }).then(done, done); @@ -63,6 +64,7 @@ suite('Hover Definition', () => { assert.equal(def.length, 1, 'Definition length is incorrect'); assert.equal(`${def[0].range.start.line},${def[0].range.start.character}`, '1,9', 'Start position is incorrect'); assert.equal(`${def[0].range.end.line},${def[0].range.end.character}`, '1,12', 'End position is incorrect'); + // tslint:disable-next-line:prefer-template assert.equal(normalizeMarkedString(def[0].contents[0]), '```python' + EOL + 'def fun()' + EOL + '```' + EOL + 'This is fun', 'Invalid conents'); }).then(done, done); }); @@ -82,6 +84,7 @@ suite('Hover Definition', () => { assert.equal(def.length, 1, 'Definition length is incorrect'); assert.equal(`${def[0].range.start.line},${def[0].range.start.character}`, '25,4', 'Start position is incorrect'); assert.equal(`${def[0].range.end.line},${def[0].range.end.character}`, '25,7', 'End position is incorrect'); + // tslint:disable-next-line:prefer-template assert.equal(normalizeMarkedString(def[0].contents[0]), '```python' + EOL + 'def bar()' + EOL + '```' + EOL + '说明 - keep this line, it works' + EOL + 'delete following line, it works' + EOL + '如果存在需要等待审批或正在执行的任务,将不刷新页面', 'Invalid conents'); @@ -103,6 +106,7 @@ suite('Hover Definition', () => { assert.equal(def.length, 1, 'Definition length is incorrect'); assert.equal(`${def[0].range.start.line},${def[0].range.start.character}`, '1,5', 'Start position is incorrect'); assert.equal(`${def[0].range.end.line},${def[0].range.end.character}`, '1,16', 'End position is incorrect'); + // tslint:disable-next-line:prefer-template assert.equal(normalizeMarkedString(def[0].contents[0]), '```python' + EOL + 'def showMessage()' + EOL + '```' + EOL + @@ -158,19 +162,20 @@ suite('Hover Definition', () => { assert.equal(def.length, 1, 'Definition length is incorrect'); assert.equal(`${def[0].range.start.line},${def[0].range.start.character}`, '11,12', 'Start position is incorrect'); assert.equal(`${def[0].range.end.line},${def[0].range.end.character}`, '11,18', 'End position is incorrect'); - let documentation = "```python" + EOL + - "class Random(x=None)" + EOL + - "```" + EOL + - "Random number generator base class used by bound module functions." + EOL + - "" + EOL + - "Used to instantiate instances of Random to get generators that don't" + EOL + - "share state." + EOL + - "" + EOL + - "Class Random can also be subclassed if you want to use a different basic" + EOL + - "generator of your own devising: in that case, override the following" + EOL + EOL + - "`methods` random(), seed(), getstate(), and setstate()." + EOL + EOL + - "Optionally, implement a getrandbits() method so that randrange()" + EOL + - "can cover arbitrarily large ranges."; + // tslint:disable-next-line:prefer-template + const documentation = '```python' + EOL + + 'class Random(x=None)' + EOL + + '```' + EOL + + 'Random number generator base class used by bound module functions.' + EOL + + '' + EOL + + 'Used to instantiate instances of Random to get generators that don\'t' + EOL + + 'share state.' + EOL + + '' + EOL + + 'Class Random can also be subclassed if you want to use a different basic' + EOL + + 'generator of your own devising: in that case, override the following' + EOL + EOL + + '`methods` random(), seed(), getstate(), and setstate().' + EOL + EOL + + 'Optionally, implement a getrandbits() method so that randrange()' + EOL + + 'can cover arbitrarily large ranges.'; assert.equal(normalizeMarkedString(def[0].contents[0]), documentation, 'Invalid conents'); }).then(done, done); @@ -191,6 +196,7 @@ suite('Hover Definition', () => { assert.equal(def.length, 1, 'Definition length is incorrect'); assert.equal(`${def[0].range.start.line},${def[0].range.start.character}`, '12,5', 'Start position is incorrect'); assert.equal(`${def[0].range.end.line},${def[0].range.end.character}`, '12,12', 'End position is incorrect'); + // tslint:disable-next-line:prefer-template assert.equal(normalizeMarkedString(def[0].contents[0]), '```python' + EOL + 'def randint(a, b)' + EOL + '```' + EOL + @@ -213,6 +219,7 @@ suite('Hover Definition', () => { assert.equal(def.length, 1, 'Definition length is incorrect'); assert.equal(`${def[0].range.start.line},${def[0].range.start.character}`, '8,11', 'Start position is incorrect'); assert.equal(`${def[0].range.end.line},${def[0].range.end.character}`, '8,15', 'End position is incorrect'); + // tslint:disable-next-line:prefer-template assert.equal(normalizeMarkedString(def[0].contents[0]), '```python' + EOL + 'def acos(x)' + EOL + '```' + EOL + @@ -235,6 +242,7 @@ suite('Hover Definition', () => { assert.equal(def.length, 1, 'Definition length is incorrect'); assert.equal(`${def[0].range.start.line},${def[0].range.start.character}`, '14,9', 'Start position is incorrect'); assert.equal(`${def[0].range.end.line},${def[0].range.end.character}`, '14,15', 'End position is incorrect'); + // tslint:disable-next-line:prefer-template assert.equal(normalizeMarkedString(def[0].contents[0]), '```python' + EOL + 'class Thread(group=None, target=None, name=None, args=(), kwargs=None, verbose=None)' + EOL + '```' + EOL + @@ -262,14 +270,14 @@ suite('Hover Definition', () => { assert.equal(def.length, 1, 'Definition length is incorrect'); assert.equal(def[0].contents.length, 1, 'Only expected one result'); const contents = normalizeMarkedString(def[0].contents[0]); - if (contents.indexOf("```python") === -1) { - assert.fail(contents, "", "First line is incorrect", "compare"); + if (contents.indexOf('```python') === -1) { + assert.fail(contents, '', 'First line is incorrect', 'compare'); } - if (contents.indexOf("Random number generator base class used by bound module functions.") === -1) { - assert.fail(contents, "", "'Random number generator' message missing", "compare"); + if (contents.indexOf('Random number generator base class used by bound module functions.') === -1) { + assert.fail(contents, '', '\'Random number generator\' message missing', 'compare'); } - if (contents.indexOf("Class Random can also be subclassed if you want to use a different basic") === -1) { - assert.fail(contents, "", "'Class Random message' missing", "compare"); + if (contents.indexOf('Class Random can also be subclassed if you want to use a different basic') === -1) { + assert.fail(contents, '', '\'Class Random message\' missing', 'compare'); } }).then(done, done); }); @@ -282,12 +290,12 @@ suite('Hover Definition', () => { assert.equal(def.length, 1, 'Definition length is incorrect'); assert.equal(def[0].contents.length, 1, 'Only expected one result'); const contents = normalizeMarkedString(def[0].contents[0]); - if (contents.indexOf("def capitalize") === -1) { - assert.fail(contents, "", "'def capitalize' is missing", "compare"); + if (contents.indexOf('def capitalize') === -1) { + assert.fail(contents, '', '\'def capitalize\' is missing', 'compare'); } - if (contents.indexOf("Return a capitalized version of S") === -1 && - contents.indexOf("Return a copy of the string S with only its first character") === -1) { - assert.fail(contents, "", "'Return a capitalized version of S/Return a copy of the string S with only its first character' message missing", "compare"); + if (contents.indexOf('Return a capitalized version of S') === -1 && + contents.indexOf('Return a copy of the string S with only its first character') === -1) { + assert.fail(contents, '', '\'Return a capitalized version of S/Return a copy of the string S with only its first character\' message missing', 'compare'); } }); }); From 560d2af430b6188a78b086b51d4d2d669f685c61 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 12 Dec 2017 16:10:28 -0800 Subject: [PATCH 019/103] Russian translations --- package.nls.ru.json | 50 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 package.nls.ru.json diff --git a/package.nls.ru.json b/package.nls.ru.json new file mode 100644 index 000000000000..4b082501ca78 --- /dev/null +++ b/package.nls.ru.json @@ -0,0 +1,50 @@ +{ + "python.command.python.sortImports.title": "Отсортировать Imports", + "python.command.python.startREPL.title": "Открыть REPL", + "python.command.python.buildWorkspaceSymbols.title": "Собрать символы рабочего пространства", + "python.command.python.runtests.title": "Запустить все тесты", + "python.command.python.debugtests.title": "Запустить все тесты под отладчиком", + "python.command.python.execInTerminal.title": "Выполнить файл в консоли", + "python.command.python.setInterpreter.title": "Выбрать интерпретатор", + "python.command.python.updateSparkLibrary.title": "Обновить библиотеки PySpark", + "python.command.python.refactorExtractVariable.title": "Создать переменную", + "python.command.python.refactorExtractMethod.title": "Создать метод", + "python.command.python.viewTestOutput.title": "Показать вывод теста", + "python.command.python.selectAndRunTestMethod.title": "Запусть тестовый метод...", + "python.command.python.selectAndDebugTestMethod.title": "Отладить тестовый метод...", + "python.command.python.selectAndRunTestFile.title": "Запустить тестовый файл...", + "python.command.python.runCurrentTestFile.title": "Запустить текущий тестовый файл", + "python.command.python.runFailedTests.title": "Запустить непрошедшие тесты", + "python.command.python.execSelectionInTerminal.title": "Выполнить выбранный текст или текущую строку в консоли", + "python.command.python.execSelectionInDjangoShell.title": "Выполнить выбранный текст или текущую строку в оболочке Django", + "python.command.jupyter.runSelectionLine.title": "Выполнить выбранный текст или текущую строку", + "python.command.jupyter.execCurrentCell.title": "Выполнить ячейку", + "python.command.jupyter.execCurrentCellAndAdvance.title": "Выполнить ячейку и перейти к следующей", + "python.command.jupyter.gotToPreviousCell.title": "Перейти к предыдущей ячейке", + "python.command.jupyter.gotToNextCell.title": "Перейти к следующей ячейке", + "python.command.python.goToPythonObject.title": "Перейти к объекту Python", + "python.snippet.launch.standard.label": "Python", + "python.snippet.launch.standard.description": "Отладить программу Python со стандартным выводом", + "python.snippet.launch.pyspark.label": "Python: PySpark", + "python.snippet.launch.pyspark.description": "Отладка PySpark", + "python.snippet.launch.module.label": "Python: Модуль", + "python.snippet.launch.module.description": "Отладка модуля", + "python.snippet.launch.terminal.label": "Python: Интегрированная консоль", + "python.snippet.launch.terminal.description": "Отладка программы Python в интегрированной консоли", + "python.snippet.launch.externalTerminal.label": "Python: Внешний терминал", + "python.snippet.launch.externalTerminal.description": "Отладка программы Python во внешней консоли", + "python.snippet.launch.django.label": "Python: Django", + "python.snippet.launch.django.description": "Отладка приложения Django", + "python.snippet.launch.flask.label": "Python: Flask (0.11.x или новее)", + "python.snippet.launch.flask.description": "Отладка приложения Flask", + "python.snippet.launch.flaskOld.label": "Python: Flask (0.10.x или старее)", + "python.snippet.launch.flaskOld.description": "Отладка приложения Flask (старый стиль)", + "python.snippet.launch.pyramid.label": "Python: Приложение Pyramid", + "python.snippet.launch.pyramid.description": "Отладка приложения Pyramid", + "python.snippet.launch.watson.label": "Python: Приложение Watson", + "python.snippet.launch.watson.description": "Отладка приложения Watson", + "python.snippet.launch.attach.label": "Python: Подключить отладчик", + "python.snippet.launch.attach.description": "Подключить отладчик для удаленной отладки", + "python.snippet.launch.scrapy.label": "Python: Scrapy", + "python.snippet.launch.scrapy.description": "Scrapy в интергрированной консоли" +} From 31aa087843c9bb84ca29c322110828909256ee15 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 13 Dec 2017 10:32:54 -0800 Subject: [PATCH 020/103] Update to better translation --- package.nls.ru.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.nls.ru.json b/package.nls.ru.json index 4b082501ca78..1ab5894793da 100644 --- a/package.nls.ru.json +++ b/package.nls.ru.json @@ -7,8 +7,8 @@ "python.command.python.execInTerminal.title": "Выполнить файл в консоли", "python.command.python.setInterpreter.title": "Выбрать интерпретатор", "python.command.python.updateSparkLibrary.title": "Обновить библиотеки PySpark", - "python.command.python.refactorExtractVariable.title": "Создать переменную", - "python.command.python.refactorExtractMethod.title": "Создать метод", + "python.command.python.refactorExtractVariable.title": "Извлечь в переменную", + "python.command.python.refactorExtractMethod.title": "Извлечь в метод", "python.command.python.viewTestOutput.title": "Показать вывод теста", "python.command.python.selectAndRunTestMethod.title": "Запусть тестовый метод...", "python.command.python.selectAndDebugTestMethod.title": "Отладить тестовый метод...", From 593ae0558c05398ca818ad033f06a4d10587ec2c Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 13 Dec 2017 10:34:13 -0800 Subject: [PATCH 021/103] Fix typo --- package.nls.ru.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.nls.ru.json b/package.nls.ru.json index 1ab5894793da..e0e22abbc06d 100644 --- a/package.nls.ru.json +++ b/package.nls.ru.json @@ -46,5 +46,5 @@ "python.snippet.launch.attach.label": "Python: Подключить отладчик", "python.snippet.launch.attach.description": "Подключить отладчик для удаленной отладки", "python.snippet.launch.scrapy.label": "Python: Scrapy", - "python.snippet.launch.scrapy.description": "Scrapy в интергрированной консоли" + "python.snippet.launch.scrapy.description": "Scrapy в интегрированной консоли" } From e6d69bb7a88eacae85b09a9ebab0026c707fb239 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 13 Dec 2017 14:45:16 -0800 Subject: [PATCH 022/103] #70 How to get all parameter info when filling in a function param list --- src/client/providers/signatureProvider.ts | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/client/providers/signatureProvider.ts b/src/client/providers/signatureProvider.ts index bf6480a4d3a2..af2ba64d9692 100644 --- a/src/client/providers/signatureProvider.ts +++ b/src/client/providers/signatureProvider.ts @@ -1,5 +1,6 @@ 'use strict'; +import { EOL } from 'os'; import * as vscode from 'vscode'; import { CancellationToken, Position, SignatureHelp, TextDocument } from 'vscode'; import { JediFactory } from '../languageServices/jediProxyFactory'; @@ -55,8 +56,13 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { signature.activeParameter = def.paramindex; // Don't display the documentation, as vs code doesn't format the docmentation. // i.e. line feeds are not respected, long content is stripped. + const docLines = def.docstring.splitLines(); + const label = docLines[0].trim(); + const documentation = docLines.length > 1 ? docLines.filter((line, index) => index > 0).join(EOL) : ''; + const sig = { - label: def.description, + label: label, + documentation: documentation, parameters: [] }; sig.parameters = def.params.map(arg => { @@ -65,7 +71,7 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { } return { documentation: arg.docstring.length > 0 ? arg.docstring : arg.description, - label: arg.description.length > 0 ? arg.description : arg.name + label: arg.name }; }); signature.signatures.push(sig); @@ -85,7 +91,7 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { source: document.getText() }; return this.jediFactory.getJediProxyHandler(document.uri).sendCommand(cmd, token).then(data => { - return PythonSignatureProvider.parseData(data); + return data ? PythonSignatureProvider.parseData(data) : undefined; }); } } From b5a23d3f24b5e55edff0838376a962c029d7a4cc Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 13 Dec 2017 16:03:31 -0800 Subject: [PATCH 023/103] Fix #70 How to get all parameter info when filling in a function param list --- src/test/pythonFiles/signature/one.py | 6 ++ src/test/pythonFiles/signature/two.py | 1 + src/test/signature/signature.test.ts | 95 +++++++++++++++++++++++++++ 3 files changed, 102 insertions(+) create mode 100644 src/test/pythonFiles/signature/one.py create mode 100644 src/test/pythonFiles/signature/two.py create mode 100644 src/test/signature/signature.test.ts diff --git a/src/test/pythonFiles/signature/one.py b/src/test/pythonFiles/signature/one.py new file mode 100644 index 000000000000..baa4045489e7 --- /dev/null +++ b/src/test/pythonFiles/signature/one.py @@ -0,0 +1,6 @@ +class Person: + def __init__(self, name, age = 23): + self.name = name + self.age = age + +p1 = Person('Bob', ) diff --git a/src/test/pythonFiles/signature/two.py b/src/test/pythonFiles/signature/two.py new file mode 100644 index 000000000000..beaa970c7eb5 --- /dev/null +++ b/src/test/pythonFiles/signature/two.py @@ -0,0 +1 @@ +pow(c, 1, diff --git a/src/test/signature/signature.test.ts b/src/test/signature/signature.test.ts new file mode 100644 index 000000000000..d3578b62b8b3 --- /dev/null +++ b/src/test/signature/signature.test.ts @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +'use strict'; + +import * as assert from 'assert'; +import { EOL } from 'os'; +import * as path from 'path'; +import * as vscode from 'vscode'; +import { PythonSettings } from '../../client/common/configSettings'; +import { execPythonFile } from '../../client/common/utils'; +import { rootWorkspaceUri } from '../common'; +import { closeActiveWindows, initialize, initializeTest } from '../initialize'; + +const autoCompPath = path.join(__dirname, '..', '..', '..', 'src', 'test', 'pythonFiles', 'signature'); +const fileOne = path.join(autoCompPath, 'one.py'); +const fileTwo = path.join(autoCompPath, 'two.py'); + +class SignatureHelpResult { + constructor( + public line: number, + public index: number, + public signaturesCount: number, + public activeParameter: number, + public parameterName: string | null) { } +} + +// tslint:disable-next-line:max-func-body-length +suite('Signatures', () => { + suiteSetup(async () => { + await initialize(); + const version = await execPythonFile(rootWorkspaceUri, PythonSettings.getInstance(rootWorkspaceUri).pythonPath, ['--version'], __dirname, true); + }); + setup(initializeTest); + suiteTeardown(closeActiveWindows); + teardown(closeActiveWindows); + + test('For ctor', async () => { + const expected = [ + new SignatureHelpResult(5, 11, 0, 0, null), + new SignatureHelpResult(5, 12, 1, 0, 'name'), + new SignatureHelpResult(5, 13, 1, 0, 'name'), + new SignatureHelpResult(5, 14, 1, 0, 'name'), + new SignatureHelpResult(5, 15, 1, 0, 'name'), + new SignatureHelpResult(5, 16, 1, 0, 'name'), + new SignatureHelpResult(5, 17, 1, 0, 'name'), + new SignatureHelpResult(5, 18, 1, 1, 'age'), + new SignatureHelpResult(5, 19, 1, 1, 'age'), + new SignatureHelpResult(5, 20, 0, 0, null) + ]; + + const document = await openDocument(fileOne); + for (const e of expected) { + await checkSignature(e, document!.uri); + } + }); + + test('For intrinsic', async () => { + const expected = [ + new SignatureHelpResult(0, 0, 0, 0, null), + new SignatureHelpResult(0, 1, 0, 0, null), + new SignatureHelpResult(0, 2, 0, 0, null), + new SignatureHelpResult(0, 3, 0, 0, null), + new SignatureHelpResult(0, 4, 1, 0, 'x'), + new SignatureHelpResult(0, 5, 1, 0, 'x'), + new SignatureHelpResult(0, 6, 1, 1, 'y'), + new SignatureHelpResult(0, 7, 1, 1, 'y'), + new SignatureHelpResult(0, 8, 1, 1, 'y'), + new SignatureHelpResult(0, 9, 1, 2, 'z'), + new SignatureHelpResult(0, 10, 1, 2, 'z'), + new SignatureHelpResult(1, 0, 1, 2, 'z') + ]; + + const document = await openDocument(fileTwo); + for (const e of expected) { + await checkSignature(e, document!.uri); + } + }); +}); + +async function openDocument(documentPath: string): Promise { + const document = await vscode.workspace.openTextDocument(documentPath); + await vscode.window.showTextDocument(document!); + return document; +} + +async function checkSignature(expected: SignatureHelpResult, uri: vscode.Uri) { + const position = new vscode.Position(expected.line, expected.index); + const actual = await vscode.commands.executeCommand('vscode.executeSignatureHelpProvider', uri, position); + assert.equal(actual!.signatures.length, expected.signaturesCount); + if (expected.signaturesCount > 0) { + assert.equal(actual!.activeParameter, expected.activeParameter); + const parameter = actual!.signatures[0].parameters[expected.activeParameter]; + assert.equal(parameter.label, expected.parameterName); + } +} From cd200f7913a959c472ebfe1e194edc584f4f9249 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 13 Dec 2017 16:05:04 -0800 Subject: [PATCH 024/103] Clean up --- src/test/signature/signature.test.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/test/signature/signature.test.ts b/src/test/signature/signature.test.ts index d3578b62b8b3..0295a49352f1 100644 --- a/src/test/signature/signature.test.ts +++ b/src/test/signature/signature.test.ts @@ -3,7 +3,6 @@ 'use strict'; import * as assert from 'assert'; -import { EOL } from 'os'; import * as path from 'path'; import * as vscode from 'vscode'; import { PythonSettings } from '../../client/common/configSettings'; @@ -28,7 +27,6 @@ class SignatureHelpResult { suite('Signatures', () => { suiteSetup(async () => { await initialize(); - const version = await execPythonFile(rootWorkspaceUri, PythonSettings.getInstance(rootWorkspaceUri).pythonPath, ['--version'], __dirname, true); }); setup(initializeTest); suiteTeardown(closeActiveWindows); From 7c33228d2dd127f485ac7afde0c2776f9e293411 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 13 Dec 2017 16:05:48 -0800 Subject: [PATCH 025/103] Clean imports --- src/test/signature/signature.test.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/test/signature/signature.test.ts b/src/test/signature/signature.test.ts index 0295a49352f1..6cac87049e45 100644 --- a/src/test/signature/signature.test.ts +++ b/src/test/signature/signature.test.ts @@ -5,9 +5,6 @@ import * as assert from 'assert'; import * as path from 'path'; import * as vscode from 'vscode'; -import { PythonSettings } from '../../client/common/configSettings'; -import { execPythonFile } from '../../client/common/utils'; -import { rootWorkspaceUri } from '../common'; import { closeActiveWindows, initialize, initializeTest } from '../initialize'; const autoCompPath = path.join(__dirname, '..', '..', '..', 'src', 'test', 'pythonFiles', 'signature'); From c4a6b90d7d0e47c3dc3259a5001abd3d1fea4ab5 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 14 Dec 2017 09:39:20 -0800 Subject: [PATCH 026/103] CR feedback --- src/client/providers/signatureProvider.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client/providers/signatureProvider.ts b/src/client/providers/signatureProvider.ts index af2ba64d9692..d86afae0df7e 100644 --- a/src/client/providers/signatureProvider.ts +++ b/src/client/providers/signatureProvider.ts @@ -57,12 +57,12 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { // Don't display the documentation, as vs code doesn't format the docmentation. // i.e. line feeds are not respected, long content is stripped. const docLines = def.docstring.splitLines(); - const label = docLines[0].trim(); - const documentation = docLines.length > 1 ? docLines.filter((line, index) => index > 0).join(EOL) : ''; + const label = docLines.shift().trim(); + const documentation = docLines.join(EOL).trim(); const sig = { - label: label, - documentation: documentation, + label, + documentation, parameters: [] }; sig.parameters = def.params.map(arg => { @@ -91,7 +91,7 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { source: document.getText() }; return this.jediFactory.getJediProxyHandler(document.uri).sendCommand(cmd, token).then(data => { - return data ? PythonSignatureProvider.parseData(data) : undefined; + return data ? PythonSignatureProvider.parseData(data) : new SignatureHelp(); }); } } From f85b848a82c4e522831f060bd7b2c241ca8e5f0b Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 14 Dec 2017 14:55:13 -0800 Subject: [PATCH 027/103] Trim whitespace for test stability --- src/client/providers/signatureProvider.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/providers/signatureProvider.ts b/src/client/providers/signatureProvider.ts index d86afae0df7e..d6f24484cbb7 100644 --- a/src/client/providers/signatureProvider.ts +++ b/src/client/providers/signatureProvider.ts @@ -71,7 +71,7 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { } return { documentation: arg.docstring.length > 0 ? arg.docstring : arg.description, - label: arg.name + label: arg.name.trim() }; }); signature.signatures.push(sig); From 37c210ba5fa5f2cfe3cdace9e749ffbe1cb2365d Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 14 Dec 2017 16:53:04 -0800 Subject: [PATCH 028/103] More tests --- pythonFiles/completion.py | 5 --- src/client/providers/signatureProvider.ts | 18 ++++++-- src/test/pythonFiles/signature/three.py | 1 + src/test/pythonFiles/signature/two.py | 2 +- src/test/signature/signature.test.ts | 53 ++++++++++++++++------- 5 files changed, 53 insertions(+), 26 deletions(-) create mode 100644 src/test/pythonFiles/signature/three.py diff --git a/pythonFiles/completion.py b/pythonFiles/completion.py index f072349d0999..ce798d246bab 100644 --- a/pythonFiles/completion.py +++ b/pythonFiles/completion.py @@ -6,7 +6,6 @@ import traceback import platform -WORD_RE = re.compile(r'\w') jediPreview = False class RedirectStdout(object): @@ -111,8 +110,6 @@ def _get_call_signatures(self, script): continue if param.name == 'self' and pos == 0: continue - if WORD_RE.match(param.name) is None: - continue try: name, value = param.description.split('=') except ValueError: @@ -155,8 +152,6 @@ def _get_call_signatures_with_args(self, script): continue if param.name == 'self' and pos == 0: continue - if WORD_RE.match(param.name) is None: - continue try: name, value = param.description.split('=') except ValueError: diff --git a/src/client/providers/signatureProvider.ts b/src/client/providers/signatureProvider.ts index d6f24484cbb7..43caa67cd9cf 100644 --- a/src/client/providers/signatureProvider.ts +++ b/src/client/providers/signatureProvider.ts @@ -54,11 +54,21 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { data.definitions.forEach(def => { signature.activeParameter = def.paramindex; - // Don't display the documentation, as vs code doesn't format the docmentation. + // Don't display the documentation, as vs code doesn't format the documentation. // i.e. line feeds are not respected, long content is stripped. - const docLines = def.docstring.splitLines(); - const label = docLines.shift().trim(); - const documentation = docLines.join(EOL).trim(); + + // Some functions do not come with parameter docs + let label: string; + let documentation: string; + + if (def.params && def.params.length > 0) { + const docLines = def.docstring.splitLines(); + label = docLines.shift().trim(); + documentation = docLines.join(EOL).trim(); + } else { + label = ''; + documentation = def.docstring; + } const sig = { label, diff --git a/src/test/pythonFiles/signature/three.py b/src/test/pythonFiles/signature/three.py new file mode 100644 index 000000000000..fe666b9ff4c8 --- /dev/null +++ b/src/test/pythonFiles/signature/three.py @@ -0,0 +1 @@ +print(a, b, z) diff --git a/src/test/pythonFiles/signature/two.py b/src/test/pythonFiles/signature/two.py index beaa970c7eb5..ae7a551707b8 100644 --- a/src/test/pythonFiles/signature/two.py +++ b/src/test/pythonFiles/signature/two.py @@ -1 +1 @@ -pow(c, 1, +range(c, 1, diff --git a/src/test/signature/signature.test.ts b/src/test/signature/signature.test.ts index 6cac87049e45..5fa42aee2ea0 100644 --- a/src/test/signature/signature.test.ts +++ b/src/test/signature/signature.test.ts @@ -10,6 +10,7 @@ import { closeActiveWindows, initialize, initializeTest } from '../initialize'; const autoCompPath = path.join(__dirname, '..', '..', '..', 'src', 'test', 'pythonFiles', 'signature'); const fileOne = path.join(autoCompPath, 'one.py'); const fileTwo = path.join(autoCompPath, 'two.py'); +const fileThree = path.join(autoCompPath, 'three.py'); class SignatureHelpResult { constructor( @@ -44,8 +45,8 @@ suite('Signatures', () => { ]; const document = await openDocument(fileOne); - for (const e of expected) { - await checkSignature(e, document!.uri); + for (let i = 0; i < expected.length; i += 1) { + await checkSignature(expected[i], document!.uri, i); } }); @@ -55,19 +56,39 @@ suite('Signatures', () => { new SignatureHelpResult(0, 1, 0, 0, null), new SignatureHelpResult(0, 2, 0, 0, null), new SignatureHelpResult(0, 3, 0, 0, null), - new SignatureHelpResult(0, 4, 1, 0, 'x'), - new SignatureHelpResult(0, 5, 1, 0, 'x'), - new SignatureHelpResult(0, 6, 1, 1, 'y'), - new SignatureHelpResult(0, 7, 1, 1, 'y'), - new SignatureHelpResult(0, 8, 1, 1, 'y'), - new SignatureHelpResult(0, 9, 1, 2, 'z'), - new SignatureHelpResult(0, 10, 1, 2, 'z'), - new SignatureHelpResult(1, 0, 1, 2, 'z') + new SignatureHelpResult(0, 4, 0, 0, null), + new SignatureHelpResult(0, 5, 0, 0, null), + new SignatureHelpResult(0, 6, 1, 0, 'start'), + new SignatureHelpResult(0, 7, 1, 0, 'start'), + new SignatureHelpResult(0, 8, 1, 1, 'stop'), + new SignatureHelpResult(0, 9, 1, 1, 'stop'), + new SignatureHelpResult(0, 10, 1, 1, 'stop'), + new SignatureHelpResult(0, 11, 1, 2, 'step'), + new SignatureHelpResult(1, 0, 1, 2, 'step') ]; const document = await openDocument(fileTwo); - for (const e of expected) { - await checkSignature(e, document!.uri); + for (let i = 0; i < expected.length; i += 1) { + await checkSignature(expected[i], document!.uri, i); + } + }); + + test('For ellipsis', async () => { + const expected = [ + new SignatureHelpResult(0, 4, 0, 0, null), + new SignatureHelpResult(0, 5, 0, 0, null), + new SignatureHelpResult(0, 6, 1, 0, 'value'), + new SignatureHelpResult(0, 7, 1, 0, 'value'), + new SignatureHelpResult(0, 8, 1, 1, '...'), + new SignatureHelpResult(0, 9, 1, 1, '...'), + new SignatureHelpResult(0, 10, 1, 1, '...'), + new SignatureHelpResult(0, 11, 1, 2, 'sep'), + new SignatureHelpResult(0, 12, 1, 2, 'sep') + ]; + + const document = await openDocument(fileThree); + for (let i = 0; i < expected.length; i += 1) { + await checkSignature(expected[i], document!.uri, i); } }); }); @@ -78,13 +99,13 @@ async function openDocument(documentPath: string): Promise('vscode.executeSignatureHelpProvider', uri, position); - assert.equal(actual!.signatures.length, expected.signaturesCount); + assert.equal(actual!.signatures.length, expected.signaturesCount, `Signature count does not match, case ${caseIndex}`); if (expected.signaturesCount > 0) { - assert.equal(actual!.activeParameter, expected.activeParameter); + assert.equal(actual!.activeParameter, expected.activeParameter, `Parameter index does not match, case ${caseIndex}`); const parameter = actual!.signatures[0].parameters[expected.activeParameter]; - assert.equal(parameter.label, expected.parameterName); + assert.equal(parameter.label, expected.parameterName, `Parameter name is incorrect, case ${caseIndex}`); } } From 61a56504912b81b163eb1de4547032f243d0920f Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 14 Dec 2017 17:02:09 -0800 Subject: [PATCH 029/103] Better handle no-parameters documentation --- src/client/providers/signatureProvider.ts | 26 +++++++++++++---------- src/test/index.ts | 3 ++- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/client/providers/signatureProvider.ts b/src/client/providers/signatureProvider.ts index 43caa67cd9cf..12dad261c39b 100644 --- a/src/client/providers/signatureProvider.ts +++ b/src/client/providers/signatureProvider.ts @@ -60,13 +60,14 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { // Some functions do not come with parameter docs let label: string; let documentation: string; + const validParamInfo = def.params && def.params.length > 0 && def.docstring.startsWith(`${def.name}(`); - if (def.params && def.params.length > 0) { + if (validParamInfo) { const docLines = def.docstring.splitLines(); label = docLines.shift().trim(); documentation = docLines.join(EOL).trim(); } else { - label = ''; + label = def.description; documentation = def.docstring; } @@ -75,15 +76,18 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { documentation, parameters: [] }; - sig.parameters = def.params.map(arg => { - if (arg.docstring.length === 0) { - arg.docstring = extractParamDocString(arg.name, def.docstring); - } - return { - documentation: arg.docstring.length > 0 ? arg.docstring : arg.description, - label: arg.name.trim() - }; - }); + + if (validParamInfo) { + sig.parameters = def.params.map(arg => { + if (arg.docstring.length === 0) { + arg.docstring = extractParamDocString(arg.name, def.docstring); + } + return { + documentation: arg.docstring.length > 0 ? arg.docstring : arg.description, + label: arg.name.trim() + }; + }); + } signature.signatures.push(sig); }); return signature; diff --git a/src/test/index.ts b/src/test/index.ts index 4d3b12a351ca..3a5cdd7b0602 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -12,7 +12,8 @@ const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, timeout: 25000, - retries: 3 + retries: 3, + grep: 'Signatures' }; testRunner.configure(options); module.exports = testRunner; From a10305e115dff47eaf5a00764709a004076341c7 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Fri, 15 Dec 2017 09:40:02 -0800 Subject: [PATCH 030/103] Better handle ellipsis and Python3 --- src/client/common/utils.ts | 4 +- .../signature/{two.py => basicSig.py} | 1 + .../signature/{one.py => classCtor.py} | 0 src/test/pythonFiles/signature/ellipsis.py | 1 + src/test/pythonFiles/signature/noSigPy3.py | 1 + src/test/pythonFiles/signature/three.py | 1 - src/test/signature/signature.test.ts | 37 ++++++++++++++----- 7 files changed, 33 insertions(+), 12 deletions(-) rename src/test/pythonFiles/signature/{two.py => basicSig.py} (92%) rename src/test/pythonFiles/signature/{one.py => classCtor.py} (100%) create mode 100644 src/test/pythonFiles/signature/ellipsis.py create mode 100644 src/test/pythonFiles/signature/noSigPy3.py delete mode 100644 src/test/pythonFiles/signature/three.py diff --git a/src/client/common/utils.ts b/src/client/common/utils.ts index 34cefb118342..25e9a720ca01 100644 --- a/src/client/common/utils.ts +++ b/src/client/common/utils.ts @@ -340,8 +340,8 @@ export function getSubDirectories(rootDir: string): Promise { subDirs.push(fullPath); } } - catch (ex) { - } + // tslint:disable-next-line:no-empty + catch (ex) {} }); resolve(subDirs); }); diff --git a/src/test/pythonFiles/signature/two.py b/src/test/pythonFiles/signature/basicSig.py similarity index 92% rename from src/test/pythonFiles/signature/two.py rename to src/test/pythonFiles/signature/basicSig.py index ae7a551707b8..66ad4cbd0483 100644 --- a/src/test/pythonFiles/signature/two.py +++ b/src/test/pythonFiles/signature/basicSig.py @@ -1 +1,2 @@ range(c, 1, + diff --git a/src/test/pythonFiles/signature/one.py b/src/test/pythonFiles/signature/classCtor.py similarity index 100% rename from src/test/pythonFiles/signature/one.py rename to src/test/pythonFiles/signature/classCtor.py diff --git a/src/test/pythonFiles/signature/ellipsis.py b/src/test/pythonFiles/signature/ellipsis.py new file mode 100644 index 000000000000..c34faa6d231a --- /dev/null +++ b/src/test/pythonFiles/signature/ellipsis.py @@ -0,0 +1 @@ +print(a, b, c) diff --git a/src/test/pythonFiles/signature/noSigPy3.py b/src/test/pythonFiles/signature/noSigPy3.py new file mode 100644 index 000000000000..3d814698b7fe --- /dev/null +++ b/src/test/pythonFiles/signature/noSigPy3.py @@ -0,0 +1 @@ +pow() diff --git a/src/test/pythonFiles/signature/three.py b/src/test/pythonFiles/signature/three.py deleted file mode 100644 index fe666b9ff4c8..000000000000 --- a/src/test/pythonFiles/signature/three.py +++ /dev/null @@ -1 +0,0 @@ -print(a, b, z) diff --git a/src/test/signature/signature.test.ts b/src/test/signature/signature.test.ts index 5fa42aee2ea0..b42ecd115b16 100644 --- a/src/test/signature/signature.test.ts +++ b/src/test/signature/signature.test.ts @@ -5,12 +5,12 @@ import * as assert from 'assert'; import * as path from 'path'; import * as vscode from 'vscode'; +import { PythonSettings } from '../../client/common/configSettings'; +import { execPythonFile } from '../../client/common/utils'; +import { rootWorkspaceUri } from '../common'; import { closeActiveWindows, initialize, initializeTest } from '../initialize'; const autoCompPath = path.join(__dirname, '..', '..', '..', 'src', 'test', 'pythonFiles', 'signature'); -const fileOne = path.join(autoCompPath, 'one.py'); -const fileTwo = path.join(autoCompPath, 'two.py'); -const fileThree = path.join(autoCompPath, 'three.py'); class SignatureHelpResult { constructor( @@ -23,8 +23,11 @@ class SignatureHelpResult { // tslint:disable-next-line:max-func-body-length suite('Signatures', () => { + let isPython3: Promise; suiteSetup(async () => { await initialize(); + const version = await execPythonFile(rootWorkspaceUri, PythonSettings.getInstance(rootWorkspaceUri).pythonPath, ['--version'], __dirname, true); + isPython3 = Promise.resolve(version.indexOf('3.') >= 0); }); setup(initializeTest); suiteTeardown(closeActiveWindows); @@ -44,7 +47,7 @@ suite('Signatures', () => { new SignatureHelpResult(5, 20, 0, 0, null) ]; - const document = await openDocument(fileOne); + const document = await openDocument(path.join(autoCompPath, 'classCtor.py')); for (let i = 0; i < expected.length; i += 1) { await checkSignature(expected[i], document!.uri, i); } @@ -67,15 +70,17 @@ suite('Signatures', () => { new SignatureHelpResult(1, 0, 1, 2, 'step') ]; - const document = await openDocument(fileTwo); + const document = await openDocument(path.join(autoCompPath, 'basicSig.py')); for (let i = 0; i < expected.length; i += 1) { await checkSignature(expected[i], document!.uri, i); } }); test('For ellipsis', async () => { + if (!await isPython3) { + return; + } const expected = [ - new SignatureHelpResult(0, 4, 0, 0, null), new SignatureHelpResult(0, 5, 0, 0, null), new SignatureHelpResult(0, 6, 1, 0, 'value'), new SignatureHelpResult(0, 7, 1, 0, 'value'), @@ -86,11 +91,23 @@ suite('Signatures', () => { new SignatureHelpResult(0, 12, 1, 2, 'sep') ]; - const document = await openDocument(fileThree); + const document = await openDocument(path.join(autoCompPath, 'ellipsis.py')); for (let i = 0; i < expected.length; i += 1) { await checkSignature(expected[i], document!.uri, i); } }); + + test('For pow', async () => { + let expected: SignatureHelpResult; + if (await isPython3) { + expected = new SignatureHelpResult(0, 4, 1, 0, null); + } else { + expected = new SignatureHelpResult(0, 4, 1, 0, 'x'); + } + + const document = await openDocument(path.join(autoCompPath, 'noSigPy3.py')); + await checkSignature(expected, document!.uri, 0); + }); }); async function openDocument(documentPath: string): Promise { @@ -105,7 +122,9 @@ async function checkSignature(expected: SignatureHelpResult, uri: vscode.Uri, ca assert.equal(actual!.signatures.length, expected.signaturesCount, `Signature count does not match, case ${caseIndex}`); if (expected.signaturesCount > 0) { assert.equal(actual!.activeParameter, expected.activeParameter, `Parameter index does not match, case ${caseIndex}`); - const parameter = actual!.signatures[0].parameters[expected.activeParameter]; - assert.equal(parameter.label, expected.parameterName, `Parameter name is incorrect, case ${caseIndex}`); + if (expected.parameterName) { + const parameter = actual!.signatures[0].parameters[expected.activeParameter]; + assert.equal(parameter.label, expected.parameterName, `Parameter name is incorrect, case ${caseIndex}`); + } } } From 9cb43e77e6e947f0d8ea951ddd19d6aa80b43fdc Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 9 Jan 2018 14:50:08 -0800 Subject: [PATCH 031/103] #385 Auto-Indentation doesn't work after comment --- src/client/extension.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/extension.ts b/src/client/extension.ts index 4dfb091988c4..710141b55f96 100644 --- a/src/client/extension.ts +++ b/src/client/extension.ts @@ -121,7 +121,7 @@ export async function activate(context: vscode.ExtensionContext) { vscode.languages.setLanguageConfiguration(PYTHON.language!, { onEnterRules: [ { - beforeText: /^\s*(?:def|class|for|if|elif|else|while|try|with|finally|except|async).*?:\s*$/, + beforeText: /^\s*(?:def|class|for|if|elif|else|while|try|with|finally|except|async).*$/, action: { indentAction: vscode.IndentAction.Indent } }, { From 5a9c3fd6a56dc94d47bb83328b7bb439ae7b9096 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 9 Jan 2018 15:41:46 -0800 Subject: [PATCH 032/103] #141 Auto indentation broken when return keyword involved --- src/client/extension.ts | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/client/extension.ts b/src/client/extension.ts index 710141b55f96..b84ef1cbca0e 100644 --- a/src/client/extension.ts +++ b/src/client/extension.ts @@ -121,16 +121,17 @@ export async function activate(context: vscode.ExtensionContext) { vscode.languages.setLanguageConfiguration(PYTHON.language!, { onEnterRules: [ { - beforeText: /^\s*(?:def|class|for|if|elif|else|while|try|with|finally|except|async).*$/, + beforeText: /^\s*(?:def|class|for|if|elif|else|while|try|with|finally|except|async)\b.*/, action: { indentAction: vscode.IndentAction.Indent } }, { - beforeText: /^ *#.*$/, + beforeText: /^\s*#.*/, afterText: /.+$/, action: { indentAction: vscode.IndentAction.None, appendText: '# ' } }, { - beforeText: /^\s+(continue|break|return)\b.*$/, + beforeText: /^\s+(continue|break|return)\b.*/, + afterText: /\s+$/, action: { indentAction: vscode.IndentAction.Outdent } } ] From 9800c4a7f5b3b169273450634d01c89784c7ba01 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 9 Jan 2018 15:43:33 -0800 Subject: [PATCH 033/103] Undo changes --- src/test/index.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/test/index.ts b/src/test/index.ts index 6f3209100654..234d1046c161 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -18,8 +18,7 @@ const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, timeout: 25000, - retries: 3, - grep: 'Signatures' + retries: 3 }; testRunner.configure(options, { coverageConfig: '../coverconfig.json' }); module.exports = testRunner; From 30519c7d283d0ad00614324c2f28e8efd968c921 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Feb 2018 11:58:17 -0800 Subject: [PATCH 034/103] #627 Docstrings for builtin methods are not parsed correctly --- src/client/providers/itemInfoSource.ts | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index 3cb471959bac..427f36506902 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -118,7 +118,8 @@ export class ItemInfoSource { const descriptionWithHighlightedCode = this.highlightCode(lines.join(EOL)); const tooltip = new vscode.MarkdownString(['```python', signature, '```', descriptionWithHighlightedCode].join(EOL)); - infos.push(new LanguageItemInfo(tooltip, dnd[0], new vscode.MarkdownString(dnd[1]))); + const documentation = this.escapeMarkdown(dnd[1]); + infos.push(new LanguageItemInfo(tooltip, dnd[0], new vscode.MarkdownString(documentation))); const key = signature + lines.join(''); // Sometimes we have duplicate documentation, one with a period at the end. @@ -137,7 +138,8 @@ export class ItemInfoSource { const lines = item.description.split(EOL); const dd = this.getDetailAndDescription(item, lines); - infos.push(new LanguageItemInfo(tooltip, dd[0], new vscode.MarkdownString(dd[1]))); + const documentation = this.escapeMarkdown(dd[1]); + infos.push(new LanguageItemInfo(tooltip, dd[0], new vscode.MarkdownString(documentation))); const key = signature + lines.join(''); // Sometimes we have duplicate documentation, one with a period at the end. @@ -226,4 +228,22 @@ export class ItemInfoSource { docstring = docstring.replace(/\r?\n[\+=]+\r?\n/g, s => s.replace(/\+/g, '|').replace(/=/g, '-')); return docstring.trim(); } + + private escapeMarkdown(text: string): string { + return text + .replace(/\\/g, '\\\\') + .replace(/\*/g, '\\*') + .replace(/\_/g, '\\_') + .replace(/\{/g, '\\{') + .replace(/\}/g, '\\}') + .replace(/\[/g, '\\[') + .replace(/\]/g, '\\]') + .replace(/\(/g, '\\(') + .replace(/\)/g, '\\)') + .replace(/\#/g, '\\#') + .replace(/\+/g, '\\+') + .replace(/\-/g, '\\-') + .replace(/\./g, '\\.') + .replace(/\!/g, '\\!'); + } } From 96511cb1ba99e7a73161103d277138c45d9ed1cd Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Feb 2018 15:49:30 -0800 Subject: [PATCH 035/103] reStructuredText converter --- .../common/markdown/restTextConverter.ts | 108 +++++++++++++++ src/client/providers/itemInfoSource.ts | 131 +++++------------- 2 files changed, 142 insertions(+), 97 deletions(-) create mode 100644 src/client/common/markdown/restTextConverter.ts diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts new file mode 100644 index 000000000000..fe4a71384aa1 --- /dev/null +++ b/src/client/common/markdown/restTextConverter.ts @@ -0,0 +1,108 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { EOL } from 'os'; +import { isWhiteSpace } from '../../language/characters'; + +export class RestTextConverter { + // tslint:disable-next-line:cyclomatic-complexity + public toMarkdown(docstring: string): string { + // This method uses several regexs to 'translate' reStructruredText + // (Python doc syntax) to Markdown syntax. + + // Determine if this is actually a reStructruredText + if (docstring.indexOf('::') < 0 && docstring.indexOf('..')) { + // If documentation contains markdown symbols such as ** (power of) in code, escape them. + return this.escapeMarkdown(docstring); + } + + const md: string[] = []; + let inCodeBlock = false; + + const lines = docstring.split(/\r?\n/); + for (let i = 0; i < lines.length; i += 1) { + let line = lines[i]; + + if (inCodeBlock) { + if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0))) { + md.push('```'); + inCodeBlock = false; + } + } + + if (line.startsWith('```')) { + md.push(line); + inCodeBlock = true; + continue; + } + + if (i < lines.length - 1 && (lines[i + 1].startsWith('==='))) { + // Section title -> heading level 3 + md.push(`### ${line}`); + i += 1; + continue; + } + + if (i < lines.length - 1 && (lines[i + 1].startsWith('---'))) { + md.push(`#### ${line}`); + i += 1; + continue; + } + + if (line.startsWith('..') && line.indexOf('::') >= 0) { + continue; + } + if (line.indexOf('generated/') >= 0) { + continue; + } + if (line.startsWith('===') || line.startsWith('---')) { + continue; + } + + if (line.endsWith('::')) { + // Literal blocks: begin with `::` + if (line.length > 2) { + md.push(line.substring(0, line.length - 1)); + } + md.push('```'); + inCodeBlock = true; + continue; + } + + line = line.replace(/``/g, '`'); + if (line.length > 0 && isWhiteSpace(line.charCodeAt(0))) { + line = ` ${line} `; // Keep hard line breaks for the indented content + } + + if (md.length > 0 && (md[md.length - 1].length === 0 || md[md.length - 1] === '```') && line.length === 0) { + continue; // Avoid consequent empty lines + } + + md.push(line); + } + + if (inCodeBlock) { + md.push('```'); + } + return md.join(EOL).trim(); + } + + public escapeMarkdown(text: string): string { + // Not complete escape list so it does not interfere + // with subsequent code highlighting (see above). + return text + .replace(/\\/g, '\\\\') + .replace(/\*/g, '\\*') + .replace(/\_/g, '\\_') + .replace(/\{/g, '\\{') + .replace(/\}/g, '\\}') + .replace(/\[/g, '\\[') + .replace(/\]/g, '\\]') + .replace(/\(/g, '\\(') + .replace(/\)/g, '\\)') + .replace(/\#/g, '\\#') + .replace(/\+/g, '\\+') + .replace(/\-/g, '\\-') + .replace(/\!/g, '\\!'); + } +} diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index 427f36506902..f6d14f190cfd 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -4,6 +4,7 @@ import { EOL } from 'os'; import * as vscode from 'vscode'; +import { RestTextConverter } from '../common/markdown/restTextConverter'; import { JediFactory } from '../languageServices/jediProxyFactory'; import * as proxy from './jediProxy'; import { IHoverItem } from './jediProxy'; @@ -16,6 +17,7 @@ export class LanguageItemInfo { } export class ItemInfoSource { + private textConverter = new RestTextConverter(); constructor(private jediFactory: JediFactory) { } public async getItemInfoFromText(documentUri: vscode.Uri, fileName: string, range: vscode.Range, sourceText: string, token: vscode.CancellationToken) @@ -84,22 +86,8 @@ export class ItemInfoSource { const capturedInfo: string[] = []; data.items.forEach(item => { - let { signature } = item; - switch (item.kind) { - case vscode.SymbolKind.Constructor: - case vscode.SymbolKind.Function: - case vscode.SymbolKind.Method: { - signature = `def ${signature}`; - break; - } - case vscode.SymbolKind.Class: { - signature = `class ${signature}`; - break; - } - default: { - signature = typeof item.text === 'string' && item.text.length > 0 ? item.text : currentWord; - } - } + const signature = this.getSignature(item, currentWord); + let tooltip = new vscode.MarkdownString(); if (item.docstring) { let lines = item.docstring.split(/\r?\n/); const dnd = this.getDetailAndDescription(item, lines); @@ -116,9 +104,12 @@ export class ItemInfoSource { lines.shift(); } - const descriptionWithHighlightedCode = this.highlightCode(lines.join(EOL)); - const tooltip = new vscode.MarkdownString(['```python', signature, '```', descriptionWithHighlightedCode].join(EOL)); - const documentation = this.escapeMarkdown(dnd[1]); + // Tooltip is only used in hover + tooltip = tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); + const description = this.textConverter.toMarkdown(lines.join(EOL)); + tooltip = tooltip.appendMarkdown(description); + + const documentation = this.textConverter.toMarkdown(dnd[1]); // Used only in completion list infos.push(new LanguageItemInfo(tooltip, dnd[0], new vscode.MarkdownString(documentation))); const key = signature + lines.join(''); @@ -132,13 +123,13 @@ export class ItemInfoSource { } if (item.description) { - const descriptionWithHighlightedCode = this.highlightCode(item.description); - // tslint:disable-next-line:prefer-template - const tooltip = new vscode.MarkdownString('```python' + `${EOL}${signature}${EOL}` + '```' + `${EOL}${descriptionWithHighlightedCode}`); + tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); + const description = this.textConverter.toMarkdown(item.description); + tooltip.appendMarkdown(description); const lines = item.description.split(EOL); const dd = this.getDetailAndDescription(item, lines); - const documentation = this.escapeMarkdown(dd[1]); + const documentation = this.textConverter.escapeMarkdown(dd[1]); infos.push(new LanguageItemInfo(tooltip, dd[0], new vscode.MarkdownString(documentation))); const key = signature + lines.join(''); @@ -159,7 +150,7 @@ export class ItemInfoSource { let detail: string; let description: string; - if (item.signature && item.signature.length > 0) { + if (item.signature && item.signature.length > 0 && lines.length > 0 && lines[0].indexOf(item.signature) >= 0) { detail = lines.length > 0 ? lines[0] : ''; description = lines.filter((line, index) => index > 0).join(EOL).trim(); } else { @@ -169,81 +160,27 @@ export class ItemInfoSource { return [detail, description]; } - private highlightCode(docstring: string): string { - /********** - * - * Magic. Do not touch. [What is the best comment in source code](https://stackoverflow.com/a/185106) - * - * This method uses several regexs to 'translate' reStructruedText syntax (Python doc syntax) to Markdown syntax. - * - * Let's just keep it unchanged unless a better solution becomes possible. - * - **********/ - // Add 2 line break before and after docstring (used to match a blank line) - docstring = EOL + EOL + docstring.trim() + EOL + EOL; - // Section title -> heading level 2 - docstring = docstring.replace(/(.+\r?\n)[-=]+\r?\n/g, `## $1${EOL}`); - // Directives: '.. directive::' -> '**directive**' - docstring = docstring.replace(/\.\. (.*)::/g, '**$1**'); - // Pattern of 'var : description' - const paramLinePattern = '[\\*\\w_]+ ?:[^:\r\n]+'; - // Add new line after and before param line - docstring = docstring.replace(new RegExp(`(${EOL + paramLinePattern})`, 'g'), `$1${EOL}`); - docstring = docstring.replace(new RegExp(`(${EOL + paramLinePattern + EOL})`, 'g'), `${EOL}$1`); - // 'var : description' -> '`var` description' - docstring = docstring.replace(/\r?\n([\*\w]+) ?: ?([^:\r\n]+\r?\n)/g, `${EOL}\`$1\` $2`); - // Doctest blocks: begin with `>>>` and end with blank line - // tslint:disable-next-line:prefer-template - docstring = docstring.replace(/(>>>[\w\W]+?\r?\n)\r?\n/g, `${'```python' + EOL}$1${'```' + EOL + EOL}`); - // Literal blocks: begin with `::` (literal blocks are indented or quoted; for simplicity, we end literal blocks with blank line) - // tslint:disable-next-line:prefer-template - docstring = docstring.replace(/(\r?\n[^\.]*)::\r?\n\r?\n([\w\W]+?\r?\n)\r?\n/g, `$1${EOL + '```' + EOL}$2${'```' + EOL + EOL}`); - // Remove indentation in Field lists and Literal blocks - let inCodeBlock = false; - let codeIndentation = 0; - const lines = docstring.split(/\r?\n/); - for (let i = 0; i < lines.length; i += 1) { - const line = lines[i]; - if (line.startsWith('```')) { - inCodeBlock = !inCodeBlock; - if (inCodeBlock) { - const match = lines[i + 1].match(/^ */); - codeIndentation = match && match.length > 0 ? match[0].length : 0; - } - continue; + private getSignature(item: proxy.IHoverItem, currentWord: string): string { + let { signature } = item; + switch (item.kind) { + case vscode.SymbolKind.Constructor: + case vscode.SymbolKind.Function: + case vscode.SymbolKind.Method: { + signature = `def ${signature}`; + break; } - if (!inCodeBlock) { - lines[i] = line.replace(/^ {4,8}/, ''); - // Field lists: ':field:' -> '**field**' - lines[i] = lines[i].replace(/:(.+?):/g, '**$1** '); - } else { - if (codeIndentation !== 0) { - lines[i] = line.substring(codeIndentation); - } + case vscode.SymbolKind.Class: { + signature = `class ${signature}`; + break; + } + case vscode.SymbolKind.Module: { + signature = `module ${signature}`; + break; + } + default: { + signature = typeof item.text === 'string' && item.text.length > 0 ? item.text : currentWord; } } - docstring = lines.join(EOL); - // Grid Tables - docstring = docstring.replace(/\r?\n[\+-]+\r?\n/g, EOL); - docstring = docstring.replace(/\r?\n[\+=]+\r?\n/g, s => s.replace(/\+/g, '|').replace(/=/g, '-')); - return docstring.trim(); - } - - private escapeMarkdown(text: string): string { - return text - .replace(/\\/g, '\\\\') - .replace(/\*/g, '\\*') - .replace(/\_/g, '\\_') - .replace(/\{/g, '\\{') - .replace(/\}/g, '\\}') - .replace(/\[/g, '\\[') - .replace(/\]/g, '\\]') - .replace(/\(/g, '\\(') - .replace(/\)/g, '\\)') - .replace(/\#/g, '\\#') - .replace(/\+/g, '\\+') - .replace(/\-/g, '\\-') - .replace(/\./g, '\\.') - .replace(/\!/g, '\\!'); + return signature; } } From c8670b9083413530afb32ff6e229af88b122f73b Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Feb 2018 15:57:24 -0800 Subject: [PATCH 036/103] Fix: period is not an operator --- src/client/language/tokenizer.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index b84a11b62fd1..0a2160fc15c5 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -300,7 +300,7 @@ export class Tokenizer implements ITokenizer { break; default: - break; + return false; } this.tokens.push(new Token(TokenType.Operator, this.cs.position, length)); this.cs.advance(length); From 97f232f1eb433f78d9036951531b590f7b59304c Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Feb 2018 16:14:07 -0800 Subject: [PATCH 037/103] Minor fixes --- .../common/markdown/restTextConverter.ts | 28 ++++++++++++------- src/client/providers/itemInfoSource.ts | 12 ++++++-- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index fe4a71384aa1..eccd60e60edc 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -8,7 +8,9 @@ export class RestTextConverter { // tslint:disable-next-line:cyclomatic-complexity public toMarkdown(docstring: string): string { // This method uses several regexs to 'translate' reStructruredText - // (Python doc syntax) to Markdown syntax. + // https://en.wikipedia.org/wiki/ReStructuredText + // (Python doc syntax) to Markdown syntax. It only translates + // as much as needed to display tooltips in intellisense. // Determine if this is actually a reStructruredText if (docstring.indexOf('::') < 0 && docstring.indexOf('..')) { @@ -24,6 +26,9 @@ export class RestTextConverter { let line = lines[i]; if (inCodeBlock) { + // Pseudo-code block terminates by a line without leading + // whitespace. Pseudo-code blocks are used to preserve + // pre-formatted text. if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0))) { md.push('```'); inCodeBlock = false; @@ -32,35 +37,37 @@ export class RestTextConverter { if (line.startsWith('```')) { md.push(line); - inCodeBlock = true; + inCodeBlock = !inCodeBlock; continue; } if (i < lines.length - 1 && (lines[i + 1].startsWith('==='))) { // Section title -> heading level 3 md.push(`### ${line}`); - i += 1; + i += 1; // Eat line with === continue; } if (i < lines.length - 1 && (lines[i + 1].startsWith('---'))) { + // Subsection title -> heading level 4 md.push(`#### ${line}`); - i += 1; + i += 1; // Eat line with --- continue; } if (line.startsWith('..') && line.indexOf('::') >= 0) { - continue; + continue; // Ignore assorted tags likes .. seealso:: } if (line.indexOf('generated/') >= 0) { - continue; + continue; // ignore generated content } if (line.startsWith('===') || line.startsWith('---')) { continue; } if (line.endsWith('::')) { - // Literal blocks: begin with `::` + // Literal blocks begin with `::`. Such as sequence like + // '... as shown below::' that is followed by a preformatted text. if (line.length > 2) { md.push(line.substring(0, line.length - 1)); } @@ -69,13 +76,14 @@ export class RestTextConverter { continue; } - line = line.replace(/``/g, '`'); + line = line.replace(/``/g, '`'); // Convert double backticks to single if (line.length > 0 && isWhiteSpace(line.charCodeAt(0))) { - line = ` ${line} `; // Keep hard line breaks for the indented content + // Keep hard line breaks for the pre-indented content + line = ` ${line} `; } if (md.length > 0 && (md[md.length - 1].length === 0 || md[md.length - 1] === '```') && line.length === 0) { - continue; // Avoid consequent empty lines + continue; // Avoid more than one empty line in a row } md.push(line); diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index f6d14f190cfd..db147dbfd431 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -105,7 +105,9 @@ export class ItemInfoSource { } // Tooltip is only used in hover - tooltip = tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); + if (signature.length > 0) { + tooltip = tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); + } const description = this.textConverter.toMarkdown(lines.join(EOL)); tooltip = tooltip.appendMarkdown(description); @@ -123,7 +125,9 @@ export class ItemInfoSource { } if (item.description) { - tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); + if (signature.length > 0) { + tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); + } const description = this.textConverter.toMarkdown(item.description); tooltip.appendMarkdown(description); @@ -174,7 +178,9 @@ export class ItemInfoSource { break; } case vscode.SymbolKind.Module: { - signature = `module ${signature}`; + if (signature.length > 0) { + signature = `module ${signature}`; + } break; } default: { From 768bffe5b56b8a24ac8c07c52b8eba819af4a61f Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Feb 2018 12:24:22 -0800 Subject: [PATCH 038/103] Restructure --- .../common/markdown/restTextConverter.ts | 218 ++++++++++++------ 1 file changed, 151 insertions(+), 67 deletions(-) diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index eccd60e60edc..bbb11347775a 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -5,112 +5,196 @@ import { EOL } from 'os'; import { isWhiteSpace } from '../../language/characters'; export class RestTextConverter { + private inPreBlock = false; + private inCodeBlock = false; + private md: string[] = []; + // tslint:disable-next-line:cyclomatic-complexity public toMarkdown(docstring: string): string { - // This method uses several regexs to 'translate' reStructruredText - // https://en.wikipedia.org/wiki/ReStructuredText - // (Python doc syntax) to Markdown syntax. It only translates - // as much as needed to display tooltips in intellisense. + // Translates reStructruredText (Python doc syntax) to markdown. + // It only translates as much as needed to display tooltips + // and documentation in the completion list. + // See https://en.wikipedia.org/wiki/ReStructuredText - // Determine if this is actually a reStructruredText + // Determine if this is actually a reStructruredText. if (docstring.indexOf('::') < 0 && docstring.indexOf('..')) { // If documentation contains markdown symbols such as ** (power of) in code, escape them. return this.escapeMarkdown(docstring); } + const result = this.transformLines(docstring); + + this.inPreBlock = this.inPreBlock = false; + this.md = []; - const md: string[] = []; - let inCodeBlock = false; + return result; + } + public escapeMarkdown(text: string): string { + // Not complete escape list so it does not interfere + // with subsequent code highlighting (see above). + return text + .replace(/\\/g, '\\\\') + .replace(/\*/g, '\\*') + .replace(/\_/g, '\\_') + .replace(/\{/g, '\\{') + .replace(/\}/g, '\\}') + .replace(/\[/g, '\\[') + .replace(/\]/g, '\\]') + .replace(/\(/g, '\\(') + .replace(/\)/g, '\\)') + .replace(/\#/g, '\\#') + .replace(/\+/g, '\\+') + .replace(/\-/g, '\\-') + .replace(/\!/g, '\\!'); + } + + private transformLines(docstring: string): string { const lines = docstring.split(/\r?\n/); for (let i = 0; i < lines.length; i += 1) { let line = lines[i]; - if (inCodeBlock) { - // Pseudo-code block terminates by a line without leading - // whitespace. Pseudo-code blocks are used to preserve - // pre-formatted text. - if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0))) { - md.push('```'); - inCodeBlock = false; - } - } - - if (line.startsWith('```')) { - md.push(line); - inCodeBlock = !inCodeBlock; + if (this.handleCodeBlock(line)) { continue; } - if (i < lines.length - 1 && (lines[i + 1].startsWith('==='))) { - // Section title -> heading level 3 - md.push(`### ${line}`); - i += 1; // Eat line with === - continue; + if (this.inPreBlock) { + // Preformatted block terminates by a line without leading + // whitespace or any special line like ..ABC::. + if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0))) { + this.endPreformattedBlock(); + } } - if (i < lines.length - 1 && (lines[i + 1].startsWith('---'))) { - // Subsection title -> heading level 4 - md.push(`#### ${line}`); - i += 1; // Eat line with --- + if (this.handleSectionHeader(lines, i)) { + i += 1; // Eat line with === or --- continue; } - if (line.startsWith('..') && line.indexOf('::') >= 0) { - continue; // Ignore assorted tags likes .. seealso:: - } if (line.indexOf('generated/') >= 0) { - continue; // ignore generated content + continue; // ignore generated content. } if (line.startsWith('===') || line.startsWith('---')) { - continue; + continue; // Eat standalone === or --- lines. } - if (line.endsWith('::')) { - // Literal blocks begin with `::`. Such as sequence like - // '... as shown below::' that is followed by a preformatted text. - if (line.length > 2) { - md.push(line.substring(0, line.length - 1)); - } - md.push('```'); - inCodeBlock = true; + if (this.handleDoubleColon(line)) { + continue; + } + if (line.startsWith('..') && line.indexOf('::') > 0) { + // Ignore lines likes .. sectionauthor:: John Doe. continue; } - line = line.replace(/``/g, '`'); // Convert double backticks to single + line = this.convertEmphasis(line); + line = line.replace(/``/g, '`'); // Convert double backticks to single. + if (line.length > 0 && isWhiteSpace(line.charCodeAt(0))) { - // Keep hard line breaks for the pre-indented content + // Keep hard line breaks for the pre-indented content. line = ` ${line} `; } - if (md.length > 0 && (md[md.length - 1].length === 0 || md[md.length - 1] === '```') && line.length === 0) { - continue; // Avoid more than one empty line in a row + const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; + if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { + continue; // Avoid more than one empty line in a row. } - md.push(line); + this.md.push(line); } - if (inCodeBlock) { - md.push('```'); + this.tryEndCodePreBlocks(); + return this.md.join(EOL).trim(); + } + + private handleCodeBlock(line: string): boolean { + if (!line.startsWith('```')) { + return false; + } + if (this.inCodeBlock) { + this.endCodeBlock(); + } else { + this.startCodeBlock(); } - return md.join(EOL).trim(); + return true; } - public escapeMarkdown(text: string): string { - // Not complete escape list so it does not interfere - // with subsequent code highlighting (see above). - return text - .replace(/\\/g, '\\\\') - .replace(/\*/g, '\\*') - .replace(/\_/g, '\\_') - .replace(/\{/g, '\\{') - .replace(/\}/g, '\\}') - .replace(/\[/g, '\\[') - .replace(/\]/g, '\\]') - .replace(/\(/g, '\\(') - .replace(/\)/g, '\\)') - .replace(/\#/g, '\\#') - .replace(/\+/g, '\\+') - .replace(/\-/g, '\\-') - .replace(/\!/g, '\\!'); + private handleSectionHeader(lines: string[], i: number): boolean { + const line = lines[i]; + if (i < lines.length - 1 && (lines[i + 1].startsWith('==='))) { + // Section title -> heading level 3. + this.md.push(`### ${this.convertEmphasis(line)}`); + return true; + } + if (i < lines.length - 1 && (lines[i + 1].startsWith('---'))) { + // Subsection title -> heading level 4. + this.md.push(`#### ${this.convertEmphasis(line)}`); + return true; + } + return false; + } + + private handleDoubleColon(line: string): boolean { + if (!line.endsWith('::')) { + return false; + } + // Literal blocks begin with `::`. Such as sequence like + // '... as shown below::' that is followed by a preformatted text. + if (line.length > 2 && !line.startsWith('..')) { + // Ignore lines likes .. autosummary:: John Doe. + // Trim trailing : so :: turns into :. + this.md.push(line.substring(0, line.length - 1)); + } + + this.startPreformattedBlock(); + return true; + } + + private tryEndCodePreBlocks(): void { + if (this.inCodeBlock) { + this.endCodeBlock(); + } + if (this.inPreBlock) { + this.endPreformattedBlock(); + } + } + + private startPreformattedBlock(): void { + // Remove previous empty line so we avoid double empties. + this.tryRemovePrecedingEmptyLine(); + // Lie about the language since we don't want preformatted text + // to be colorized as Python. HTML is more 'appropriate' as it does + // not colorize -- or + or keywords like 'from'. + this.md.push('```html'); + this.inPreBlock = true; + } + + private endPreformattedBlock(): void { + if (this.inPreBlock) { + this.md.push('```'); + this.inPreBlock = false; + } + } + + private startCodeBlock(): void { + // Remove previous empty line so we avoid double empties. + this.tryRemovePrecedingEmptyLine(); + this.md.push('```python'); + this.inCodeBlock = true; + } + + private endCodeBlock(): void { + if (this.inCodeBlock) { + this.md.push('```'); + this.inCodeBlock = false; + } + } + + private tryRemovePrecedingEmptyLine(): void { + if (this.md.length > 0 && this.md[this.md.length - 1].length === 0) { + this.md.pop(); + } + } + + private convertEmphasis(line: string): string { + return line.replace(/\:([\w\W]+)\:/g, '**$1**'); // Convert :word: to **word**. } } From 825f16b64d01b9342d7109da627968ccd034ca91 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Feb 2018 13:03:04 -0800 Subject: [PATCH 039/103] Tests --- .../common/markdown/restTextConverter.ts | 6 ++ src/test/markdown/restTextConverter.test.ts | 40 +++++++++++ .../markdown/scipy.spatial.distance.md | 58 +++++++++++++++ .../markdown/scipy.spatial.distance.pydoc | 71 +++++++++++++++++++ 4 files changed, 175 insertions(+) create mode 100644 src/test/markdown/restTextConverter.test.ts create mode 100644 src/test/pythonFiles/markdown/scipy.spatial.distance.md create mode 100644 src/test/pythonFiles/markdown/scipy.spatial.distance.pydoc diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index bbb11347775a..485801a35f49 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -48,11 +48,17 @@ export class RestTextConverter { .replace(/\!/g, '\\!'); } + // tslint:disable-next-line:cyclomatic-complexity private transformLines(docstring: string): string { const lines = docstring.split(/\r?\n/); for (let i = 0; i < lines.length; i += 1) { let line = lines[i]; + // Avoid leading empty lines + if (this.md.length === 0 && line.length === 0) { + continue; + } + if (this.handleCodeBlock(line)) { continue; } diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts new file mode 100644 index 000000000000..52cc7ea7c497 --- /dev/null +++ b/src/test/markdown/restTextConverter.test.ts @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { expect } from 'chai'; +import * as fs from 'fs-extra'; +import * as path from 'path'; +import { RestTextConverter } from '../../client/common/markdown/restTextConverter'; + +const srcPythoFilesPath = path.join(__dirname, '..', '..', '..', 'src', 'test', 'pythonFiles', 'markdown'); + +function compareFiles(expectedContent: string, actualContent: string) { + const expectedLines = expectedContent.split(/\r?\n/); + const actualLines = actualContent.split(/\r?\n/); + + for (let i = 0; i < Math.min(expectedLines.length, actualLines.length); i += 1) { + const e = expectedLines[i]; + const a = actualLines[i]; + expect(a, `Difference at line ${i}`).to.be.equal(e); + } + + expect(actualLines.length, + expectedLines.length > actualLines.length + ? 'Actual contains more lines than expected' + : 'Expected contains more lines than the actual' + ).to.be.equal(expectedLines.length); +} + +async function testConversion(fileName: string): Promise { + const cvt = new RestTextConverter(); + const file = path.join(srcPythoFilesPath, fileName); + const source = await fs.readFile(`${file}.pydoc`, 'utf8'); + const actual = cvt.toMarkdown(source); + const expected = await fs.readFile(`${file}.md`, 'utf8'); + compareFiles(expected, actual); +} + +// tslint:disable-next-line:max-func-body-length +suite('Hover - RestTextConverter', () => { + test('scipy.spatial.distance', async () => await testConversion('scipy.spatial.distance')); +}); diff --git a/src/test/pythonFiles/markdown/scipy.spatial.distance.md b/src/test/pythonFiles/markdown/scipy.spatial.distance.md new file mode 100644 index 000000000000..125b19f6cdeb --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.spatial.distance.md @@ -0,0 +1,58 @@ +### Distance computations (**mod**`scipy.spatial.distance`) + + +#### Function Reference + +Distance matrix computation from a collection of raw observation vectors +stored in a rectangular array. +```html + pdist -- pairwise distances between observation vectors. + cdist -- distances between two collections of observation vectors + squareform -- convert distance matrix to a condensed one and vice versa + directed_hausdorff -- directed Hausdorff distance between arrays + +``` +Predicates for checking the validity of distance matrices, both +condensed and redundant. Also contained in this module are functions +for computing the number of observations in a distance matrix. +```html + is_valid_dm -- checks for a valid distance matrix + is_valid_y -- checks for a valid condensed distance matrix + num_obs_dm -- # of observations in a distance matrix + num_obs_y -- # of observations in a condensed distance matrix + +``` +Distance functions between two numeric vectors `u` and `v`. Computing +distances over a large collection of vectors is inefficient for these +functions. Use `pdist` for this purpose. +```html + braycurtis -- the Bray-Curtis distance. + canberra -- the Canberra distance. + chebyshev -- the Chebyshev distance. + cityblock -- the Manhattan distance. + correlation -- the Correlation distance. + cosine -- the Cosine distance. + euclidean -- the Euclidean distance. + mahalanobis -- the Mahalanobis distance. + minkowski -- the Minkowski distance. + seuclidean -- the normalized Euclidean distance. + sqeuclidean -- the squared Euclidean distance. + wminkowski -- (deprecated) alias of `minkowski`. + +``` +Distance functions between two boolean vectors (representing sets) `u` and +`v`. As in the case of numerical vectors, `pdist` is more efficient for +computing the distances between all pairs. +```html + dice -- the Dice dissimilarity. + hamming -- the Hamming distance. + jaccard -- the Jaccard distance. + kulsinski -- the Kulsinski distance. + rogerstanimoto -- the Rogers-Tanimoto dissimilarity. + russellrao -- the Russell-Rao dissimilarity. + sokalmichener -- the Sokal-Michener dissimilarity. + sokalsneath -- the Sokal-Sneath dissimilarity. + yule -- the Yule dissimilarity. + +``` +**func**`hamming` also operates over discrete numerical vectors. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.distance.pydoc b/src/test/pythonFiles/markdown/scipy.spatial.distance.pydoc new file mode 100644 index 000000000000..cfc9b7008b99 --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.spatial.distance.pydoc @@ -0,0 +1,71 @@ + +===================================================== +Distance computations (:mod:`scipy.spatial.distance`) +===================================================== + +.. sectionauthor:: Damian Eads + +Function Reference +------------------ + +Distance matrix computation from a collection of raw observation vectors +stored in a rectangular array. + +.. autosummary:: + :toctree: generated/ + + pdist -- pairwise distances between observation vectors. + cdist -- distances between two collections of observation vectors + squareform -- convert distance matrix to a condensed one and vice versa + directed_hausdorff -- directed Hausdorff distance between arrays + +Predicates for checking the validity of distance matrices, both +condensed and redundant. Also contained in this module are functions +for computing the number of observations in a distance matrix. + +.. autosummary:: + :toctree: generated/ + + is_valid_dm -- checks for a valid distance matrix + is_valid_y -- checks for a valid condensed distance matrix + num_obs_dm -- # of observations in a distance matrix + num_obs_y -- # of observations in a condensed distance matrix + +Distance functions between two numeric vectors ``u`` and ``v``. Computing +distances over a large collection of vectors is inefficient for these +functions. Use ``pdist`` for this purpose. + +.. autosummary:: + :toctree: generated/ + + braycurtis -- the Bray-Curtis distance. + canberra -- the Canberra distance. + chebyshev -- the Chebyshev distance. + cityblock -- the Manhattan distance. + correlation -- the Correlation distance. + cosine -- the Cosine distance. + euclidean -- the Euclidean distance. + mahalanobis -- the Mahalanobis distance. + minkowski -- the Minkowski distance. + seuclidean -- the normalized Euclidean distance. + sqeuclidean -- the squared Euclidean distance. + wminkowski -- (deprecated) alias of `minkowski`. + +Distance functions between two boolean vectors (representing sets) ``u`` and +``v``. As in the case of numerical vectors, ``pdist`` is more efficient for +computing the distances between all pairs. + +.. autosummary:: + :toctree: generated/ + + dice -- the Dice dissimilarity. + hamming -- the Hamming distance. + jaccard -- the Jaccard distance. + kulsinski -- the Kulsinski distance. + rogerstanimoto -- the Rogers-Tanimoto dissimilarity. + russellrao -- the Russell-Rao dissimilarity. + sokalmichener -- the Sokal-Michener dissimilarity. + sokalsneath -- the Sokal-Sneath dissimilarity. + yule -- the Yule dissimilarity. + +:func:`hamming` also operates over discrete numerical vectors. From eb36eefde01044e3c6cb88f66ceb935edd244d09 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Feb 2018 13:11:21 -0800 Subject: [PATCH 040/103] Tests --- src/test/markdown/restTextConverter.test.ts | 2 + src/test/pythonFiles/markdown/scipy.md | 48 +++++++++++ src/test/pythonFiles/markdown/scipy.pydoc | 53 ++++++++++++ .../pythonFiles/markdown/scipy.spatial.md | 70 +++++++++++++++ .../pythonFiles/markdown/scipy.spatial.pydoc | 86 +++++++++++++++++++ 5 files changed, 259 insertions(+) create mode 100644 src/test/pythonFiles/markdown/scipy.md create mode 100644 src/test/pythonFiles/markdown/scipy.pydoc create mode 100644 src/test/pythonFiles/markdown/scipy.spatial.md create mode 100644 src/test/pythonFiles/markdown/scipy.spatial.pydoc diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts index 52cc7ea7c497..ac649b912081 100644 --- a/src/test/markdown/restTextConverter.test.ts +++ b/src/test/markdown/restTextConverter.test.ts @@ -36,5 +36,7 @@ async function testConversion(fileName: string): Promise { // tslint:disable-next-line:max-func-body-length suite('Hover - RestTextConverter', () => { + test('scipy', async () => await testConversion('scipy')); + test('scipy.spatial', async () => await testConversion('scipy.spatial')); test('scipy.spatial.distance', async () => await testConversion('scipy.spatial.distance')); }); diff --git a/src/test/pythonFiles/markdown/scipy.md b/src/test/pythonFiles/markdown/scipy.md new file mode 100644 index 000000000000..23721797aae3 --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.md @@ -0,0 +1,48 @@ +### SciPy: A scientific computing package for Python + +Documentation is available in the docstrings and +online at https://docs.scipy.org. + +#### Contents +SciPy imports all the functions from the NumPy namespace, and in +addition provides: + +#### Subpackages +Using any of these subpackages requires an explicit import. For example, +`import scipy.cluster`. +```html + cluster --- Vector Quantization / Kmeans + fftpack --- Discrete Fourier Transform algorithms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + linalg.blas --- Wrappers to BLAS library + linalg.lapack --- Wrappers to LAPACK library + misc --- Various utilities that don't have + another home. + ndimage --- n-dimensional image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + sparse --- Sparse Matrices + sparse.linalg --- Sparse Linear Algebra + sparse.linalg.dsolve --- Linear Solvers + sparse.linalg.dsolve.umfpack --- **Interface to the UMFPACK library** + Conjugate Gradient Method (LOBPCG) + sparse.linalg.eigen --- Sparse Eigenvalue Solvers + sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned + Conjugate Gradient Method (LOBPCG) + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions + +``` +#### Utility tools +```html + test --- Run scipy unittests + show_config --- Show scipy build configuration + show_numpy_config --- Show numpy build configuration + __version__ --- Scipy version string + __numpy_version__ --- Numpy version string +``` \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.pydoc b/src/test/pythonFiles/markdown/scipy.pydoc new file mode 100644 index 000000000000..293445fbea5b --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.pydoc @@ -0,0 +1,53 @@ +SciPy: A scientific computing package for Python +================================================ + +Documentation is available in the docstrings and +online at https://docs.scipy.org. + +Contents +-------- +SciPy imports all the functions from the NumPy namespace, and in +addition provides: + +Subpackages +----------- +Using any of these subpackages requires an explicit import. For example, +``import scipy.cluster``. + +:: + + cluster --- Vector Quantization / Kmeans + fftpack --- Discrete Fourier Transform algorithms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + linalg.blas --- Wrappers to BLAS library + linalg.lapack --- Wrappers to LAPACK library + misc --- Various utilities that don't have + another home. + ndimage --- n-dimensional image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + sparse --- Sparse Matrices + sparse.linalg --- Sparse Linear Algebra + sparse.linalg.dsolve --- Linear Solvers + sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library: + Conjugate Gradient Method (LOBPCG) + sparse.linalg.eigen --- Sparse Eigenvalue Solvers + sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned + Conjugate Gradient Method (LOBPCG) + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions + +Utility tools +------------- +:: + + test --- Run scipy unittests + show_config --- Show scipy build configuration + show_numpy_config --- Show numpy build configuration + __version__ --- Scipy version string + __numpy_version__ --- Numpy version string \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.md b/src/test/pythonFiles/markdown/scipy.spatial.md new file mode 100644 index 000000000000..8b6c7d7d5c51 --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.spatial.md @@ -0,0 +1,70 @@ +### Spatial algorithms and data structures (**mod**`scipy.spatial`) + + +### Nearest-neighbor Queries +```html + KDTree -- class for efficient nearest-neighbor queries + cKDTree -- class for efficient nearest-neighbor queries (faster impl.) + distance -- module containing many different distance measures + Rectangle + +``` +### Delaunay Triangulation, Convex Hulls and Voronoi Diagrams +```html + Delaunay -- compute Delaunay triangulation of input points + ConvexHull -- compute a convex hull for input points + Voronoi -- compute a Voronoi diagram hull from input points + SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere + HalfspaceIntersection -- compute the intersection points of input halfspaces + +``` +### Plotting Helpers +```html + delaunay_plot_2d -- plot 2-D triangulation + convex_hull_plot_2d -- plot 2-D convex hull + voronoi_plot_2d -- plot 2-D voronoi diagram + +``` +### Simplex representation +The simplices (triangles, tetrahedra, ...) appearing in the Delaunay +tesselation (N-dim simplices), convex hull facets, and Voronoi ridges +(N-1 dim simplices) are represented in the following scheme: +```html + tess = Delaunay(points) + hull = ConvexHull(points) + voro = Voronoi(points) + + # coordinates of the j-th vertex of the i-th simplex + tess.points[tess.simplices[i, j], :] # tesselation element + hull.points[hull.simplices[i, j], :] # convex hull facet + voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells + +``` +For Delaunay triangulations and convex hulls, the neighborhood +structure of the simplices satisfies the condition: + + `tess.neighbors[i,j]` is the neighboring simplex of the i-th + simplex, opposite to the j-vertex. It is -1 in case of no + neighbor. + +Convex hull facets also define a hyperplane equation: +```html + (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 + +``` +Similar hyperplane equations for the Delaunay triangulation correspond +to the convex hull facets on the corresponding N+1 dimensional +paraboloid. + +The Delaunay triangulation objects offer a method for locating the +simplex containing a given point, and barycentric coordinate +computations. + +#### Functions +```html + tsearch + distance_matrix + minkowski_distance + minkowski_distance_p + procrustes +``` \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.pydoc b/src/test/pythonFiles/markdown/scipy.spatial.pydoc new file mode 100644 index 000000000000..1613b94384b7 --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.spatial.pydoc @@ -0,0 +1,86 @@ +============================================================= +Spatial algorithms and data structures (:mod:`scipy.spatial`) +============================================================= + +.. currentmodule:: scipy.spatial + +Nearest-neighbor Queries +======================== +.. autosummary:: + :toctree: generated/ + + KDTree -- class for efficient nearest-neighbor queries + cKDTree -- class for efficient nearest-neighbor queries (faster impl.) + distance -- module containing many different distance measures + Rectangle + +Delaunay Triangulation, Convex Hulls and Voronoi Diagrams +========================================================= + +.. autosummary:: + :toctree: generated/ + + Delaunay -- compute Delaunay triangulation of input points + ConvexHull -- compute a convex hull for input points + Voronoi -- compute a Voronoi diagram hull from input points + SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere + HalfspaceIntersection -- compute the intersection points of input halfspaces + +Plotting Helpers +================ + +.. autosummary:: + :toctree: generated/ + + delaunay_plot_2d -- plot 2-D triangulation + convex_hull_plot_2d -- plot 2-D convex hull + voronoi_plot_2d -- plot 2-D voronoi diagram + +.. seealso:: :ref:`Tutorial ` + + +Simplex representation +====================== +The simplices (triangles, tetrahedra, ...) appearing in the Delaunay +tesselation (N-dim simplices), convex hull facets, and Voronoi ridges +(N-1 dim simplices) are represented in the following scheme:: + + tess = Delaunay(points) + hull = ConvexHull(points) + voro = Voronoi(points) + + # coordinates of the j-th vertex of the i-th simplex + tess.points[tess.simplices[i, j], :] # tesselation element + hull.points[hull.simplices[i, j], :] # convex hull facet + voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells + +For Delaunay triangulations and convex hulls, the neighborhood +structure of the simplices satisfies the condition: + + ``tess.neighbors[i,j]`` is the neighboring simplex of the i-th + simplex, opposite to the j-vertex. It is -1 in case of no + neighbor. + +Convex hull facets also define a hyperplane equation:: + + (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 + +Similar hyperplane equations for the Delaunay triangulation correspond +to the convex hull facets on the corresponding N+1 dimensional +paraboloid. + +The Delaunay triangulation objects offer a method for locating the +simplex containing a given point, and barycentric coordinate +computations. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + tsearch + distance_matrix + minkowski_distance + minkowski_distance_p + procrustes \ No newline at end of file From bab423966465f280325d64684cb7277964ab716c Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Feb 2018 14:36:38 -0800 Subject: [PATCH 041/103] Code heuristics --- .../common/markdown/restTextConverter.ts | 42 +++++++++++++------ src/client/providers/itemInfoSource.ts | 3 +- src/test/markdown/restTextConverter.test.ts | 1 + src/test/pythonFiles/markdown/anydbm.md | 36 ++++++++++++++++ src/test/pythonFiles/markdown/anydbm.pydoc | 33 +++++++++++++++ 5 files changed, 102 insertions(+), 13 deletions(-) create mode 100644 src/test/pythonFiles/markdown/anydbm.md create mode 100644 src/test/pythonFiles/markdown/anydbm.pydoc diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index 485801a35f49..104230f273a1 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -10,19 +10,17 @@ export class RestTextConverter { private md: string[] = []; // tslint:disable-next-line:cyclomatic-complexity - public toMarkdown(docstring: string): string { + public toMarkdown(docstring: string, force?: boolean): string { // Translates reStructruredText (Python doc syntax) to markdown. // It only translates as much as needed to display tooltips // and documentation in the completion list. // See https://en.wikipedia.org/wiki/ReStructuredText - // Determine if this is actually a reStructruredText. - if (docstring.indexOf('::') < 0 && docstring.indexOf('..')) { - // If documentation contains markdown symbols such as ** (power of) in code, escape them. + if (!force && !this.shouldConvert(docstring)) { return this.escapeMarkdown(docstring); } - const result = this.transformLines(docstring); + const result = this.transformLines(docstring); this.inPreBlock = this.inPreBlock = false; this.md = []; @@ -33,7 +31,7 @@ export class RestTextConverter { // Not complete escape list so it does not interfere // with subsequent code highlighting (see above). return text - .replace(/\\/g, '\\\\') + .replace(/\#/g, '\\#') .replace(/\*/g, '\\*') .replace(/\_/g, '\\_') .replace(/\{/g, '\\{') @@ -42,10 +40,19 @@ export class RestTextConverter { .replace(/\]/g, '\\]') .replace(/\(/g, '\\(') .replace(/\)/g, '\\)') - .replace(/\#/g, '\\#') .replace(/\+/g, '\\+') - .replace(/\-/g, '\\-') - .replace(/\!/g, '\\!'); + .replace(/\-/g, '\\+'); + } + + private shouldConvert(docstring: string): boolean { + // heuristics + if (docstring.indexOf('::') >= 0 || docstring.indexOf('..') >= 0) { + return true; + } + if (docstring.indexOf('===') >= 0 || docstring.indexOf('---') >= 0) { + return true; + } + return false; } // tslint:disable-next-line:cyclomatic-complexity @@ -59,6 +66,13 @@ export class RestTextConverter { continue; } + if (!this.inPreBlock) { + // Anything indented is considered to be preformatted. + if (line.length > 0 && isWhiteSpace(line.charCodeAt(0))) { + this.startPreformattedBlock(line); + } + } + if (this.handleCodeBlock(line)) { continue; } @@ -150,7 +164,7 @@ export class RestTextConverter { this.md.push(line.substring(0, line.length - 1)); } - this.startPreformattedBlock(); + this.startPreformattedBlock(line); return true; } @@ -163,13 +177,17 @@ export class RestTextConverter { } } - private startPreformattedBlock(): void { + private startPreformattedBlock(line: string): void { // Remove previous empty line so we avoid double empties. this.tryRemovePrecedingEmptyLine(); // Lie about the language since we don't want preformatted text // to be colorized as Python. HTML is more 'appropriate' as it does // not colorize -- or + or keywords like 'from'. - this.md.push('```html'); + if (line.indexOf('# ') >= 0) { + this.md.push('```python'); + } else { + this.md.push('```html'); + } this.inPreBlock = true; } diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index db147dbfd431..fc02d8097464 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -108,7 +108,8 @@ export class ItemInfoSource { if (signature.length > 0) { tooltip = tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); } - const description = this.textConverter.toMarkdown(lines.join(EOL)); + + const description = this.textConverter.toMarkdown(lines.join(EOL), signature.length === 0); tooltip = tooltip.appendMarkdown(description); const documentation = this.textConverter.toMarkdown(dnd[1]); // Used only in completion list diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts index ac649b912081..e40898e9dd6a 100644 --- a/src/test/markdown/restTextConverter.test.ts +++ b/src/test/markdown/restTextConverter.test.ts @@ -39,4 +39,5 @@ suite('Hover - RestTextConverter', () => { test('scipy', async () => await testConversion('scipy')); test('scipy.spatial', async () => await testConversion('scipy.spatial')); test('scipy.spatial.distance', async () => await testConversion('scipy.spatial.distance')); + test('anydbm', async () => await testConversion('anydbm')); }); diff --git a/src/test/pythonFiles/markdown/anydbm.md b/src/test/pythonFiles/markdown/anydbm.md new file mode 100644 index 000000000000..a86897871374 --- /dev/null +++ b/src/test/pythonFiles/markdown/anydbm.md @@ -0,0 +1,36 @@ +Generic interface to all dbm clones. + +Instead of +```html + import dbm + d = dbm.open(file, 'w', 0666) + +``` +use +```html + import anydbm + d = anydbm.open(file, 'w') + +``` +The returned object is a dbhash, gdbm, dbm or dumbdbm object, +dependent on the type of database being opened (determined by whichdb +module) in the case of an existing dbm. If the dbm does not exist and +the create or new flag ('c' or 'n') was specified, the dbm type will +be determined by the availability of the modules (tested in the above +order). + +It has the following interface (key and data are strings): +```python + d[key] = data # store data at key (may override data at + # existing key) + data = d[key] # retrieve data at key (raise KeyError if no + # such key) + del d[key] # delete data stored at key (raises KeyError + # if no such key) + flag = key in d # true if the key exists + list = d.keys() # return a list of all existing keys (slow!) + +``` +Future versions may change the order in which implementations are +tested for existence, and add interfaces to other dbm-like +implementations. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/anydbm.pydoc b/src/test/pythonFiles/markdown/anydbm.pydoc new file mode 100644 index 000000000000..2d46b5881789 --- /dev/null +++ b/src/test/pythonFiles/markdown/anydbm.pydoc @@ -0,0 +1,33 @@ +Generic interface to all dbm clones. + +Instead of + + import dbm + d = dbm.open(file, 'w', 0666) + +use + + import anydbm + d = anydbm.open(file, 'w') + +The returned object is a dbhash, gdbm, dbm or dumbdbm object, +dependent on the type of database being opened (determined by whichdb +module) in the case of an existing dbm. If the dbm does not exist and +the create or new flag ('c' or 'n') was specified, the dbm type will +be determined by the availability of the modules (tested in the above +order). + +It has the following interface (key and data are strings): + + d[key] = data # store data at key (may override data at + # existing key) + data = d[key] # retrieve data at key (raise KeyError if no + # such key) + del d[key] # delete data stored at key (raises KeyError + # if no such key) + flag = key in d # true if the key exists + list = d.keys() # return a list of all existing keys (slow!) + +Future versions may change the order in which implementations are +tested for existence, and add interfaces to other dbm-like +implementations. \ No newline at end of file From 2a3020157a271ef7de71663d62260e4f898a06bf Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Feb 2018 15:05:03 -0800 Subject: [PATCH 042/103] Baselines --- src/test/language/tokenizer.test.ts | 7 +++++++ src/test/markdown/restTextConverter.test.ts | 2 +- src/test/pythonFiles/markdown/scipy.spatial.md | 3 ++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 727ce969dd09..86deb9282249 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -76,4 +76,11 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(i).type, TokenType.Comment); } }); + test('Unknown token', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('.'); + assert.equal(tokens.count, 1); + + assert.equal(tokens.getItemAt(0).type, TokenType.Unknown); + }); }); diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts index e40898e9dd6a..7b2f9a97cdc8 100644 --- a/src/test/markdown/restTextConverter.test.ts +++ b/src/test/markdown/restTextConverter.test.ts @@ -29,7 +29,7 @@ async function testConversion(fileName: string): Promise { const cvt = new RestTextConverter(); const file = path.join(srcPythoFilesPath, fileName); const source = await fs.readFile(`${file}.pydoc`, 'utf8'); - const actual = cvt.toMarkdown(source); + const actual = cvt.toMarkdown(source, true); const expected = await fs.readFile(`${file}.md`, 'utf8'); compareFiles(expected, actual); } diff --git a/src/test/pythonFiles/markdown/scipy.spatial.md b/src/test/pythonFiles/markdown/scipy.spatial.md index 8b6c7d7d5c51..3584e78f1bbc 100644 --- a/src/test/pythonFiles/markdown/scipy.spatial.md +++ b/src/test/pythonFiles/markdown/scipy.spatial.md @@ -42,11 +42,12 @@ tesselation (N-dim simplices), convex hull facets, and Voronoi ridges ``` For Delaunay triangulations and convex hulls, the neighborhood structure of the simplices satisfies the condition: - +```html `tess.neighbors[i,j]` is the neighboring simplex of the i-th simplex, opposite to the j-vertex. It is -1 in case of no neighbor. +``` Convex hull facets also define a hyperplane equation: ```html (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 From e430ef820ea5253447a06937f0f35f2b06d85c9e Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Feb 2018 15:42:16 -0800 Subject: [PATCH 043/103] HTML handling --- .../common/markdown/restTextConverter.ts | 30 +++- src/test/markdown/restTextConverter.test.ts | 1 + src/test/pythonFiles/markdown/aifc.md | 144 ++++++++++++++++++ src/test/pythonFiles/markdown/aifc.pydoc | 134 ++++++++++++++++ 4 files changed, 304 insertions(+), 5 deletions(-) create mode 100644 src/test/pythonFiles/markdown/aifc.md create mode 100644 src/test/pythonFiles/markdown/aifc.pydoc diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index 104230f273a1..8e2bce47a411 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -2,6 +2,8 @@ // Licensed under the MIT License. import { EOL } from 'os'; +// tslint:disable-next-line:import-name +import Char from 'typescript-char'; import { isWhiteSpace } from '../../language/characters'; export class RestTextConverter { @@ -45,11 +47,19 @@ export class RestTextConverter { } private shouldConvert(docstring: string): boolean { - // heuristics - if (docstring.indexOf('::') >= 0 || docstring.indexOf('..') >= 0) { - return true; + // Heuristics that determe if string should be converted + // to markdown or just escaped. + + // :: at the end of a string + const doubleColon = docstring.indexOf('::'); + if (doubleColon >= 0 && doubleColon < docstring.length - 2) { + const ch = docstring.charCodeAt(doubleColon + 2); + if (ch === Char.LineFeed || ch === Char.CarriageReturn) { + return true; + } } - if (docstring.indexOf('===') >= 0 || docstring.indexOf('---') >= 0) { + // Section headers or lists + if (docstring.indexOf('===') >= 0 || docstring.indexOf('---') >= 0 || docstring.indexOf('.. ') >= 0) { return true; } return false; @@ -118,13 +128,23 @@ export class RestTextConverter { continue; // Avoid more than one empty line in a row. } - this.md.push(line); + this.addLine(line); } this.tryEndCodePreBlocks(); return this.md.join(EOL).trim(); } + private addLine(line: string): void { + // Since we use HTML blocks as preformatted text + // make sure we drop angle brackets since otherwise + // they will render as tags and attributes + if (this.inPreBlock) { + line = line.replace(//g, ''); + } + this.md.push(line); + } + private handleCodeBlock(line: string): boolean { if (!line.startsWith('```')) { return false; diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts index 7b2f9a97cdc8..b3284f4727e7 100644 --- a/src/test/markdown/restTextConverter.test.ts +++ b/src/test/markdown/restTextConverter.test.ts @@ -40,4 +40,5 @@ suite('Hover - RestTextConverter', () => { test('scipy.spatial', async () => await testConversion('scipy.spatial')); test('scipy.spatial.distance', async () => await testConversion('scipy.spatial.distance')); test('anydbm', async () => await testConversion('anydbm')); + test('aifc', async () => await testConversion('aifc')); }); diff --git a/src/test/pythonFiles/markdown/aifc.md b/src/test/pythonFiles/markdown/aifc.md new file mode 100644 index 000000000000..a2f120c6a3b2 --- /dev/null +++ b/src/test/pythonFiles/markdown/aifc.md @@ -0,0 +1,144 @@ +Stuff to parse AIFF-C and AIFF files. + +Unless explicitly stated otherwise, the description below is true +both for AIFF-C files and AIFF files. + +An AIFF-C file has the following structure. +```html + +-----------------+ + | FORM | + +-----------------+ + | size | + +----+------------+ + | | AIFC | + | +------------+ + | | chunks | + | | . | + | | . | + | | . | + +----+------------+ + +``` +An AIFF file has the string "AIFF" instead of "AIFC". + +A chunk consists of an identifier (4 bytes) followed by a size (4 bytes, +big endian order), followed by the data. The size field does not include +the size of the 8 byte header. + +The following chunk types are recognized. +```html + FVER + version number of AIFF-C defining document (AIFF-C only). + MARK + # of markers (2 bytes) + list of markers: + marker ID (2 bytes, must be 0) + position (4 bytes) + marker name ("pstring") + COMM + # of channels (2 bytes) + # of sound frames (4 bytes) + size of the samples (2 bytes) + sampling frequency (10 bytes, IEEE 80-bit extended + floating point) + in AIFF-C files only: + compression type (4 bytes) + human-readable version of compression type ("pstring") + SSND + offset (4 bytes, not used by this program) + blocksize (4 bytes, not used by this program) + sound data + +``` +A pstring consists of 1 byte length, a string of characters, and 0 or 1 +byte pad to make the total length even. + +Usage. + +Reading AIFF files: +```html + f = aifc.open(file, 'r') +``` +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +In some types of audio files, if the setpos() method is not used, +the seek() method is not necessary. + +This returns an instance of a class with the following public methods: +```html + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for AIFF files) + getcompname() -- returns human-readable version of + compression type ('not compressed' for AIFF files) + getparams() -- returns a tuple consisting of all of the + above in the above order + getmarkers() -- get the list of marks in the audio file or None + if there are no marks + getmark(id) -- get mark with the specified id (raises an error + if the mark does not exist) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +``` +The position returned by tell(), the position given to setpos() and +the position of marks are all compatible and have nothing to do with +the actual position in the file. +The close() method is called automatically when the class instance +is destroyed. + +Writing AIFF files: +```html + f = aifc.open(file, 'w') +``` +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: +```html + aiff() -- create an AIFF file (AIFF-C default) + aifc() -- create an AIFF-C file + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + setmark(id, pos, name) + -- add specified mark to the list of marks + tell() -- return current position in output file (useful + in combination with setmark()) + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +``` +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes('') or +close() to patch up the sizes in the header. +Marks can be added anytime. If there are any marks, you must call +close() after all frames have been written. +The close() method is called automatically when the class instance +is destroyed. + +When a file is opened with the extension '.aiff', an AIFF file is +written, otherwise an AIFF-C file is written. This default can be +changed by calling aiff() or aifc() before the first writeframes or +writeframesraw. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/aifc.pydoc b/src/test/pythonFiles/markdown/aifc.pydoc new file mode 100644 index 000000000000..a4cc346d5531 --- /dev/null +++ b/src/test/pythonFiles/markdown/aifc.pydoc @@ -0,0 +1,134 @@ +Stuff to parse AIFF-C and AIFF files. + +Unless explicitly stated otherwise, the description below is true +both for AIFF-C files and AIFF files. + +An AIFF-C file has the following structure. + + +-----------------+ + | FORM | + +-----------------+ + | | + +----+------------+ + | | AIFC | + | +------------+ + | | | + | | . | + | | . | + | | . | + +----+------------+ + +An AIFF file has the string "AIFF" instead of "AIFC". + +A chunk consists of an identifier (4 bytes) followed by a size (4 bytes, +big endian order), followed by the data. The size field does not include +the size of the 8 byte header. + +The following chunk types are recognized. + + FVER + (AIFF-C only). + MARK + <# of markers> (2 bytes) + list of markers: + (2 bytes, must be > 0) + (4 bytes) + ("pstring") + COMM + <# of channels> (2 bytes) + <# of sound frames> (4 bytes) + (2 bytes) + (10 bytes, IEEE 80-bit extended + floating point) + in AIFF-C files only: + (4 bytes) + ("pstring") + SSND + (4 bytes, not used by this program) + (4 bytes, not used by this program) + + +A pstring consists of 1 byte length, a string of characters, and 0 or 1 +byte pad to make the total length even. + +Usage. + +Reading AIFF files: + f = aifc.open(file, 'r') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +In some types of audio files, if the setpos() method is not used, +the seek() method is not necessary. + +This returns an instance of a class with the following public methods: + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for AIFF files) + getcompname() -- returns human-readable version of + compression type ('not compressed' for AIFF files) + getparams() -- returns a tuple consisting of all of the + above in the above order + getmarkers() -- get the list of marks in the audio file or None + if there are no marks + getmark(id) -- get mark with the specified id (raises an error + if the mark does not exist) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +The position returned by tell(), the position given to setpos() and +the position of marks are all compatible and have nothing to do with +the actual position in the file. +The close() method is called automatically when the class instance +is destroyed. + +Writing AIFF files: + f = aifc.open(file, 'w') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: + aiff() -- create an AIFF file (AIFF-C default) + aifc() -- create an AIFF-C file + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + setmark(id, pos, name) + -- add specified mark to the list of marks + tell() -- return current position in output file (useful + in combination with setmark()) + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes('') or +close() to patch up the sizes in the header. +Marks can be added anytime. If there are any marks, you must call +close() after all frames have been written. +The close() method is called automatically when the class instance +is destroyed. + +When a file is opened with the extension '.aiff', an AIFF file is +written, otherwise an AIFF-C file is written. This default can be +changed by calling aiff() or aifc() before the first writeframes or +writeframesraw. \ No newline at end of file From 1afa841a7ba08c1041442b26494d0f7b523946e0 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Feb 2018 16:03:22 -0800 Subject: [PATCH 044/103] Lists --- .../common/markdown/restTextConverter.ts | 46 ++++++++++++++++--- 1 file changed, 39 insertions(+), 7 deletions(-) diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index 8e2bce47a411..447acef789ff 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -4,7 +4,7 @@ import { EOL } from 'os'; // tslint:disable-next-line:import-name import Char from 'typescript-char'; -import { isWhiteSpace } from '../../language/characters'; +import { isDecimal, isWhiteSpace } from '../../language/characters'; export class RestTextConverter { private inPreBlock = false; @@ -76,12 +76,7 @@ export class RestTextConverter { continue; } - if (!this.inPreBlock) { - // Anything indented is considered to be preformatted. - if (line.length > 0 && isWhiteSpace(line.charCodeAt(0))) { - this.startPreformattedBlock(line); - } - } + this.checkPreContent(lines, i); if (this.handleCodeBlock(line)) { continue; @@ -145,6 +140,43 @@ export class RestTextConverter { this.md.push(line); } + private checkPreContent(lines: string[], i: number): void { + if (this.inPreBlock) { + return; + } + // Indented is considered to be preformatted except + // when previous line is indented or begins list item. + const line = lines[i]; + if (line.length === 0 || !isWhiteSpace(line.charCodeAt(0))) { + return; + } + + let prevLine = i > 0 ? lines[i - 1] : undefined; + if (!prevLine) { + return; + } + if (prevLine.length === 0) { + this.startPreformattedBlock(line); + return; + } + if (isWhiteSpace(prevLine.charCodeAt(0))) { + return; + } + + prevLine = prevLine.trim(); + if (prevLine.length === 0) { + this.startPreformattedBlock(line); + return; + } + + const ch = prevLine.charCodeAt(0); + if (ch === Char.Asterisk || ch === Char.Hyphen || isDecimal(ch)) { + return; + } + + this.startPreformattedBlock(line); + } + private handleCodeBlock(line: string): boolean { if (!line.startsWith('```')) { return false; From 6bffb0797bf7f20197253ce08671d1ef3ad11393 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Feb 2018 13:19:52 -0800 Subject: [PATCH 045/103] State machine --- .../common/markdown/restTextConverter.ts | 236 +++++++++--------- src/test/markdown/restTextConverter.test.ts | 3 +- src/test/pythonFiles/markdown/aifc.md | 154 ++++++------ src/test/pythonFiles/markdown/anydbm.md | 29 +-- src/test/pythonFiles/markdown/astroid.md | 24 ++ src/test/pythonFiles/markdown/astroid.pydoc | 23 ++ src/test/pythonFiles/markdown/scipy.md | 61 +++-- .../markdown/scipy.spatial.distance.md | 62 +++-- .../pythonFiles/markdown/scipy.spatial.md | 64 +++-- 9 files changed, 346 insertions(+), 310 deletions(-) create mode 100644 src/test/pythonFiles/markdown/astroid.md create mode 100644 src/test/pythonFiles/markdown/astroid.pydoc diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index 447acef789ff..da00da8e2fe3 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -6,9 +6,14 @@ import { EOL } from 'os'; import Char from 'typescript-char'; import { isDecimal, isWhiteSpace } from '../../language/characters'; +enum State { + Default, + Preformatted, + Code +} + export class RestTextConverter { - private inPreBlock = false; - private inCodeBlock = false; + private state: State = State.Default; private md: string[] = []; // tslint:disable-next-line:cyclomatic-complexity @@ -23,7 +28,7 @@ export class RestTextConverter { } const result = this.transformLines(docstring); - this.inPreBlock = this.inPreBlock = false; + this.state = State.Default; this.md = []; return result; @@ -65,128 +70,132 @@ export class RestTextConverter { return false; } - // tslint:disable-next-line:cyclomatic-complexity private transformLines(docstring: string): string { const lines = docstring.split(/\r?\n/); for (let i = 0; i < lines.length; i += 1) { - let line = lines[i]; - + const line = lines[i]; // Avoid leading empty lines if (this.md.length === 0 && line.length === 0) { continue; } - this.checkPreContent(lines, i); - - if (this.handleCodeBlock(line)) { - continue; + switch (this.state) { + case State.Default: + i += this.inDefaultState(lines, i); + break; + case State.Preformatted: + i += this.inPreformattedState(lines, i); + break; + case State.Code: + this.inCodeState(line); + break; + default: + break; } + } - if (this.inPreBlock) { - // Preformatted block terminates by a line without leading - // whitespace or any special line like ..ABC::. - if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0))) { - this.endPreformattedBlock(); - } - } + this.endCodeBlock(); + this.endPreformattedBlock(); - if (this.handleSectionHeader(lines, i)) { - i += 1; // Eat line with === or --- - continue; - } + return this.md.join(EOL).trim(); + } - if (line.indexOf('generated/') >= 0) { - continue; // ignore generated content. - } - if (line.startsWith('===') || line.startsWith('---')) { - continue; // Eat standalone === or --- lines. - } + private inDefaultState(lines: string[], i: number): number { + let line = lines[i]; + if (line.startsWith('```')) { + this.startCodeBlock(); + return 0; + } - if (this.handleDoubleColon(line)) { - continue; - } - if (line.startsWith('..') && line.indexOf('::') > 0) { - // Ignore lines likes .. sectionauthor:: John Doe. - continue; - } + if (line.startsWith('===') || line.startsWith('---')) { + return 0; // Eat standalone === or --- lines. + } + if (this.handleDoubleColon(line)) { + return 0; + } + if (this.isIgnorable(line)) { + return 0; + } - line = this.convertEmphasis(line); - line = line.replace(/``/g, '`'); // Convert double backticks to single. + if (this.handleSectionHeader(lines, i)) { + return 1; // Eat line with === or --- + } - if (line.length > 0 && isWhiteSpace(line.charCodeAt(0))) { - // Keep hard line breaks for the pre-indented content. - line = ` ${line} `; - } + const result = this.checkPreContent(lines, i); + if (this.state !== State.Default) { + return result; // Handle line in the new state + } - const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; - if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { - continue; // Avoid more than one empty line in a row. - } + line = this.convertEmphasis(line); + line = line.replace(/``/g, '`'); // Convert double backticks to single. + this.md.push(line); + + return 0; + } - this.addLine(line); + private inPreformattedState(lines: string[], i: number): number { + let line = lines[i]; + if (this.isIgnorable(line)) { + return 0; + } + // Preformatted block terminates by a line without leading whitespace. + if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0)) && !this.isListItem(line)) { + this.endPreformattedBlock(); + return -1; } - this.tryEndCodePreBlocks(); - return this.md.join(EOL).trim(); - } + const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; + if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { + return 0; // Avoid more than one empty line in a row. + } - private addLine(line: string): void { // Since we use HTML blocks as preformatted text // make sure we drop angle brackets since otherwise // they will render as tags and attributes - if (this.inPreBlock) { - line = line.replace(//g, ''); - } - this.md.push(line); + line = line.replace(//g, ' '); + line = line.replace(/``/g, '`'); // Convert double backticks to single. + // Keep hard line breaks for the preformatted content + this.md.push(`${line} `); + return 0; } - private checkPreContent(lines: string[], i: number): void { - if (this.inPreBlock) { - return; - } - // Indented is considered to be preformatted except - // when previous line is indented or begins list item. - const line = lines[i]; - if (line.length === 0 || !isWhiteSpace(line.charCodeAt(0))) { - return; + private inCodeState(line: string): void { + const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; + if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { + return; // Avoid more than one empty line in a row. } - let prevLine = i > 0 ? lines[i - 1] : undefined; - if (!prevLine) { - return; - } - if (prevLine.length === 0) { - this.startPreformattedBlock(line); - return; - } - if (isWhiteSpace(prevLine.charCodeAt(0))) { - return; + if (line.startsWith('```')) { + this.endCodeBlock(); + } else { + this.md.push(line); } + } - prevLine = prevLine.trim(); - if (prevLine.length === 0) { - this.startPreformattedBlock(line); - return; + private isIgnorable(line: string): boolean { + if (line.indexOf('generated/') >= 0) { + return true; // Drop generated content. } - - const ch = prevLine.charCodeAt(0); - if (ch === Char.Asterisk || ch === Char.Hyphen || isDecimal(ch)) { - return; + const trimmed = line.trim(); + if (trimmed.startsWith('..') && trimmed.indexOf('::') > 0) { + // Ignore lines likes .. sectionauthor:: John Doe. + return true; } - - this.startPreformattedBlock(line); + return false; } - private handleCodeBlock(line: string): boolean { - if (!line.startsWith('```')) { - return false; + private checkPreContent(lines: string[], i: number): number { + const line = lines[i]; + if (i === 0 || line.trim().length === 0) { + return 0; } - if (this.inCodeBlock) { - this.endCodeBlock(); - } else { - this.startCodeBlock(); + + if (!isWhiteSpace(line.charCodeAt(0)) && !this.isListItem(line)) { + return 0; // regular line, nothing to do here. } - return true; + // Indented content is considered to be preformatted. + this.startPreformattedBlock(); + return -1; } private handleSectionHeader(lines: string[], i: number): boolean { @@ -216,56 +225,45 @@ export class RestTextConverter { this.md.push(line.substring(0, line.length - 1)); } - this.startPreformattedBlock(line); + this.startPreformattedBlock(); return true; } - private tryEndCodePreBlocks(): void { - if (this.inCodeBlock) { - this.endCodeBlock(); - } - if (this.inPreBlock) { - this.endPreformattedBlock(); - } - } - - private startPreformattedBlock(line: string): void { + private startPreformattedBlock(): void { // Remove previous empty line so we avoid double empties. - this.tryRemovePrecedingEmptyLine(); + this.tryRemovePrecedingEmptyLines(); // Lie about the language since we don't want preformatted text // to be colorized as Python. HTML is more 'appropriate' as it does // not colorize -- or + or keywords like 'from'. - if (line.indexOf('# ') >= 0) { - this.md.push('```python'); - } else { - this.md.push('```html'); - } - this.inPreBlock = true; + this.md.push('```html'); + this.state = State.Preformatted; } private endPreformattedBlock(): void { - if (this.inPreBlock) { + if (this.state === State.Preformatted) { + this.tryRemovePrecedingEmptyLines(); this.md.push('```'); - this.inPreBlock = false; + this.state = State.Default; } } private startCodeBlock(): void { // Remove previous empty line so we avoid double empties. - this.tryRemovePrecedingEmptyLine(); + this.tryRemovePrecedingEmptyLines(); this.md.push('```python'); - this.inCodeBlock = true; + this.state = State.Code; } private endCodeBlock(): void { - if (this.inCodeBlock) { + if (this.state === State.Code) { + this.tryRemovePrecedingEmptyLines(); this.md.push('```'); - this.inCodeBlock = false; + this.state = State.Default; } } - private tryRemovePrecedingEmptyLine(): void { - if (this.md.length > 0 && this.md[this.md.length - 1].length === 0) { + private tryRemovePrecedingEmptyLines(): void { + while (this.md.length > 0 && this.md[this.md.length - 1].trim().length === 0) { this.md.pop(); } } @@ -273,4 +271,10 @@ export class RestTextConverter { private convertEmphasis(line: string): string { return line.replace(/\:([\w\W]+)\:/g, '**$1**'); // Convert :word: to **word**. } + + private isListItem(line: string): boolean { + const trimmed = line.trim(); + const ch = trimmed.length > 0 ? trimmed.charCodeAt(0) : 0; + return ch === Char.Asterisk || ch === Char.Hyphen || isDecimal(ch); + } } diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts index b3284f4727e7..81b1ba5bbf12 100644 --- a/src/test/markdown/restTextConverter.test.ts +++ b/src/test/markdown/restTextConverter.test.ts @@ -15,7 +15,7 @@ function compareFiles(expectedContent: string, actualContent: string) { for (let i = 0; i < Math.min(expectedLines.length, actualLines.length); i += 1) { const e = expectedLines[i]; const a = actualLines[i]; - expect(a, `Difference at line ${i}`).to.be.equal(e); + expect(e, `Difference at line ${i}`).to.be.equal(a); } expect(actualLines.length, @@ -41,4 +41,5 @@ suite('Hover - RestTextConverter', () => { test('scipy.spatial.distance', async () => await testConversion('scipy.spatial.distance')); test('anydbm', async () => await testConversion('anydbm')); test('aifc', async () => await testConversion('aifc')); + test('astroid', async () => await testConversion('astroid')); }); diff --git a/src/test/pythonFiles/markdown/aifc.md b/src/test/pythonFiles/markdown/aifc.md index a2f120c6a3b2..fff22dece1e5 100644 --- a/src/test/pythonFiles/markdown/aifc.md +++ b/src/test/pythonFiles/markdown/aifc.md @@ -5,19 +5,18 @@ both for AIFF-C files and AIFF files. An AIFF-C file has the following structure. ```html - +-----------------+ - | FORM | - +-----------------+ - | size | - +----+------------+ - | | AIFC | - | +------------+ - | | chunks | - | | . | - | | . | - | | . | - +----+------------+ - + +-----------------+ + | FORM | + +-----------------+ + | size | + +----+------------+ + | | AIFC | + | +------------+ + | | chunks | + | | . | + | | . | + | | . | + +----+------------+ ``` An AIFF file has the string "AIFF" instead of "AIFC". @@ -27,28 +26,27 @@ the size of the 8 byte header. The following chunk types are recognized. ```html - FVER - version number of AIFF-C defining document (AIFF-C only). - MARK - # of markers (2 bytes) - list of markers: - marker ID (2 bytes, must be 0) - position (4 bytes) - marker name ("pstring") - COMM - # of channels (2 bytes) - # of sound frames (4 bytes) - size of the samples (2 bytes) - sampling frequency (10 bytes, IEEE 80-bit extended - floating point) - in AIFF-C files only: - compression type (4 bytes) - human-readable version of compression type ("pstring") - SSND - offset (4 bytes, not used by this program) - blocksize (4 bytes, not used by this program) - sound data - + FVER + version number of AIFF-C defining document (AIFF-C only). + MARK + # of markers (2 bytes) + list of markers: + marker ID (2 bytes, must be 0) + position (4 bytes) + marker name ("pstring") + COMM + # of channels (2 bytes) + # of sound frames (4 bytes) + size of the samples (2 bytes) + sampling frequency (10 bytes, IEEE 80-bit extended + floating point) + in AIFF-C files only: + compression type (4 bytes) + human-readable version of compression type ("pstring") + SSND + offset (4 bytes, not used by this program) + blocksize (4 bytes, not used by this program) + sound data ``` A pstring consists of 1 byte length, a string of characters, and 0 or 1 byte pad to make the total length even. @@ -57,7 +55,7 @@ Usage. Reading AIFF files: ```html - f = aifc.open(file, 'r') + f = aifc.open(file, 'r') ``` where file is either the name of a file or an open file pointer. The open file pointer must have methods read(), seek(), and close(). @@ -66,25 +64,25 @@ the seek() method is not necessary. This returns an instance of a class with the following public methods: ```html - getnchannels() -- returns number of audio channels (1 for - mono, 2 for stereo) - getsampwidth() -- returns sample width in bytes - getframerate() -- returns sampling frequency - getnframes() -- returns number of audio frames - getcomptype() -- returns compression type ('NONE' for AIFF files) - getcompname() -- returns human-readable version of - compression type ('not compressed' for AIFF files) - getparams() -- returns a tuple consisting of all of the - above in the above order - getmarkers() -- get the list of marks in the audio file or None - if there are no marks - getmark(id) -- get mark with the specified id (raises an error - if the mark does not exist) - readframes(n) -- returns at most n frames of audio - rewind() -- rewind to the beginning of the audio stream - setpos(pos) -- seek to the specified position - tell() -- return the current position - close() -- close the instance (make it unusable) + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for AIFF files) + getcompname() -- returns human-readable version of + compression type ('not compressed' for AIFF files) + getparams() -- returns a tuple consisting of all of the + above in the above order + getmarkers() -- get the list of marks in the audio file or None + if there are no marks + getmark(id) -- get mark with the specified id (raises an error + if the mark does not exist) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) ``` The position returned by tell(), the position given to setpos() and the position of marks are all compatible and have nothing to do with @@ -94,7 +92,7 @@ is destroyed. Writing AIFF files: ```html - f = aifc.open(file, 'w') + f = aifc.open(file, 'w') ``` where file is either the name of a file or an open file pointer. The open file pointer must have methods write(), tell(), seek(), and @@ -102,28 +100,28 @@ close(). This returns an instance of a class with the following public methods: ```html - aiff() -- create an AIFF file (AIFF-C default) - aifc() -- create an AIFF-C file - setnchannels(n) -- set the number of channels - setsampwidth(n) -- set the sample width - setframerate(n) -- set the frame rate - setnframes(n) -- set the number of frames - setcomptype(type, name) - -- set the compression type and the - human-readable compression type - setparams(tuple) - -- set all parameters at once - setmark(id, pos, name) - -- add specified mark to the list of marks - tell() -- return current position in output file (useful - in combination with setmark()) - writeframesraw(data) - -- write audio frames without pathing up the - file header - writeframes(data) - -- write audio frames and patch up the file header - close() -- patch up the file header and close the - output file + aiff() -- create an AIFF file (AIFF-C default) + aifc() -- create an AIFF-C file + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + setmark(id, pos, name) + -- add specified mark to the list of marks + tell() -- return current position in output file (useful + in combination with setmark()) + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file ``` You should set the parameters before the first writeframesraw or writeframes. The total number of frames does not need to be set, diff --git a/src/test/pythonFiles/markdown/anydbm.md b/src/test/pythonFiles/markdown/anydbm.md index a86897871374..e5914dcbadde 100644 --- a/src/test/pythonFiles/markdown/anydbm.md +++ b/src/test/pythonFiles/markdown/anydbm.md @@ -2,15 +2,13 @@ Generic interface to all dbm clones. Instead of ```html - import dbm - d = dbm.open(file, 'w', 0666) - + import dbm + d = dbm.open(file, 'w', 0666) ``` use ```html - import anydbm - d = anydbm.open(file, 'w') - + import anydbm + d = anydbm.open(file, 'w') ``` The returned object is a dbhash, gdbm, dbm or dumbdbm object, dependent on the type of database being opened (determined by whichdb @@ -20,16 +18,15 @@ be determined by the availability of the modules (tested in the above order). It has the following interface (key and data are strings): -```python - d[key] = data # store data at key (may override data at - # existing key) - data = d[key] # retrieve data at key (raise KeyError if no - # such key) - del d[key] # delete data stored at key (raises KeyError - # if no such key) - flag = key in d # true if the key exists - list = d.keys() # return a list of all existing keys (slow!) - +```html + d[key] = data # store data at key (may override data at + # existing key) + data = d[key] # retrieve data at key (raise KeyError if no + # such key) + del d[key] # delete data stored at key (raises KeyError + # if no such key) + flag = key in d # true if the key exists + list = d.keys() # return a list of all existing keys (slow!) ``` Future versions may change the order in which implementations are tested for existence, and add interfaces to other dbm-like diff --git a/src/test/pythonFiles/markdown/astroid.md b/src/test/pythonFiles/markdown/astroid.md new file mode 100644 index 000000000000..d3c1bda813ee --- /dev/null +++ b/src/test/pythonFiles/markdown/astroid.md @@ -0,0 +1,24 @@ +Python Abstract Syntax Tree New Generation + +The aim of this module is to provide a common base representation of +python source code for projects such as pychecker, pyreverse, +pylint... Well, actually the development of this library is essentially +governed by pylint's needs. + +It extends class defined in the python's _ast module with some +additional methods and attributes. Instance attributes are added by a +builder object, which can either generate extended ast (let's call +them astroid ;) by visiting an existent ast tree or by inspecting living +object. Methods are added by monkey patching ast classes. + +Main modules are: +```html +* nodes and scoped_nodes for more information about methods and + attributes added to different node classes + +* the manager contains a high level object to get astroid trees from + source files and living objects. It maintains a cache of previously + constructed tree for quick access + +* builder contains the class responsible to build astroid trees +``` \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/astroid.pydoc b/src/test/pythonFiles/markdown/astroid.pydoc new file mode 100644 index 000000000000..84d58487ead5 --- /dev/null +++ b/src/test/pythonFiles/markdown/astroid.pydoc @@ -0,0 +1,23 @@ +Python Abstract Syntax Tree New Generation + +The aim of this module is to provide a common base representation of +python source code for projects such as pychecker, pyreverse, +pylint... Well, actually the development of this library is essentially +governed by pylint's needs. + +It extends class defined in the python's _ast module with some +additional methods and attributes. Instance attributes are added by a +builder object, which can either generate extended ast (let's call +them astroid ;) by visiting an existent ast tree or by inspecting living +object. Methods are added by monkey patching ast classes. + +Main modules are: + +* nodes and scoped_nodes for more information about methods and + attributes added to different node classes + +* the manager contains a high level object to get astroid trees from + source files and living objects. It maintains a cache of previously + constructed tree for quick access + +* builder contains the class responsible to build astroid trees \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.md b/src/test/pythonFiles/markdown/scipy.md index 23721797aae3..d28c1e290abe 100644 --- a/src/test/pythonFiles/markdown/scipy.md +++ b/src/test/pythonFiles/markdown/scipy.md @@ -11,38 +11,37 @@ addition provides: Using any of these subpackages requires an explicit import. For example, `import scipy.cluster`. ```html - cluster --- Vector Quantization / Kmeans - fftpack --- Discrete Fourier Transform algorithms - integrate --- Integration routines - interpolate --- Interpolation Tools - io --- Data input and output - linalg --- Linear algebra routines - linalg.blas --- Wrappers to BLAS library - linalg.lapack --- Wrappers to LAPACK library - misc --- Various utilities that don't have - another home. - ndimage --- n-dimensional image package - odr --- Orthogonal Distance Regression - optimize --- Optimization Tools - signal --- Signal Processing Tools - sparse --- Sparse Matrices - sparse.linalg --- Sparse Linear Algebra - sparse.linalg.dsolve --- Linear Solvers - sparse.linalg.dsolve.umfpack --- **Interface to the UMFPACK library** - Conjugate Gradient Method (LOBPCG) - sparse.linalg.eigen --- Sparse Eigenvalue Solvers - sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned - Conjugate Gradient Method (LOBPCG) - spatial --- Spatial data structures and algorithms - special --- Special functions - stats --- Statistical Functions - + cluster --- Vector Quantization / Kmeans + fftpack --- Discrete Fourier Transform algorithms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + linalg.blas --- Wrappers to BLAS library + linalg.lapack --- Wrappers to LAPACK library + misc --- Various utilities that don't have + another home. + ndimage --- n-dimensional image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + sparse --- Sparse Matrices + sparse.linalg --- Sparse Linear Algebra + sparse.linalg.dsolve --- Linear Solvers + sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library: + Conjugate Gradient Method (LOBPCG) + sparse.linalg.eigen --- Sparse Eigenvalue Solvers + sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned + Conjugate Gradient Method (LOBPCG) + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions ``` #### Utility tools ```html - test --- Run scipy unittests - show_config --- Show scipy build configuration - show_numpy_config --- Show numpy build configuration - __version__ --- Scipy version string - __numpy_version__ --- Numpy version string + test --- Run scipy unittests + show_config --- Show scipy build configuration + show_numpy_config --- Show numpy build configuration + __version__ --- Scipy version string + __numpy_version__ --- Numpy version string ``` \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.distance.md b/src/test/pythonFiles/markdown/scipy.spatial.distance.md index 125b19f6cdeb..8e9dd996931d 100644 --- a/src/test/pythonFiles/markdown/scipy.spatial.distance.md +++ b/src/test/pythonFiles/markdown/scipy.spatial.distance.md @@ -6,53 +6,49 @@ Distance matrix computation from a collection of raw observation vectors stored in a rectangular array. ```html - pdist -- pairwise distances between observation vectors. - cdist -- distances between two collections of observation vectors - squareform -- convert distance matrix to a condensed one and vice versa - directed_hausdorff -- directed Hausdorff distance between arrays - + pdist -- pairwise distances between observation vectors. + cdist -- distances between two collections of observation vectors + squareform -- convert distance matrix to a condensed one and vice versa + directed_hausdorff -- directed Hausdorff distance between arrays ``` Predicates for checking the validity of distance matrices, both condensed and redundant. Also contained in this module are functions for computing the number of observations in a distance matrix. ```html - is_valid_dm -- checks for a valid distance matrix - is_valid_y -- checks for a valid condensed distance matrix - num_obs_dm -- # of observations in a distance matrix - num_obs_y -- # of observations in a condensed distance matrix - + is_valid_dm -- checks for a valid distance matrix + is_valid_y -- checks for a valid condensed distance matrix + num_obs_dm -- # of observations in a distance matrix + num_obs_y -- # of observations in a condensed distance matrix ``` Distance functions between two numeric vectors `u` and `v`. Computing distances over a large collection of vectors is inefficient for these functions. Use `pdist` for this purpose. ```html - braycurtis -- the Bray-Curtis distance. - canberra -- the Canberra distance. - chebyshev -- the Chebyshev distance. - cityblock -- the Manhattan distance. - correlation -- the Correlation distance. - cosine -- the Cosine distance. - euclidean -- the Euclidean distance. - mahalanobis -- the Mahalanobis distance. - minkowski -- the Minkowski distance. - seuclidean -- the normalized Euclidean distance. - sqeuclidean -- the squared Euclidean distance. - wminkowski -- (deprecated) alias of `minkowski`. - + braycurtis -- the Bray-Curtis distance. + canberra -- the Canberra distance. + chebyshev -- the Chebyshev distance. + cityblock -- the Manhattan distance. + correlation -- the Correlation distance. + cosine -- the Cosine distance. + euclidean -- the Euclidean distance. + mahalanobis -- the Mahalanobis distance. + minkowski -- the Minkowski distance. + seuclidean -- the normalized Euclidean distance. + sqeuclidean -- the squared Euclidean distance. + wminkowski -- (deprecated) alias of `minkowski`. ``` Distance functions between two boolean vectors (representing sets) `u` and `v`. As in the case of numerical vectors, `pdist` is more efficient for computing the distances between all pairs. ```html - dice -- the Dice dissimilarity. - hamming -- the Hamming distance. - jaccard -- the Jaccard distance. - kulsinski -- the Kulsinski distance. - rogerstanimoto -- the Rogers-Tanimoto dissimilarity. - russellrao -- the Russell-Rao dissimilarity. - sokalmichener -- the Sokal-Michener dissimilarity. - sokalsneath -- the Sokal-Sneath dissimilarity. - yule -- the Yule dissimilarity. - + dice -- the Dice dissimilarity. + hamming -- the Hamming distance. + jaccard -- the Jaccard distance. + kulsinski -- the Kulsinski distance. + rogerstanimoto -- the Rogers-Tanimoto dissimilarity. + russellrao -- the Russell-Rao dissimilarity. + sokalmichener -- the Sokal-Michener dissimilarity. + sokalsneath -- the Sokal-Sneath dissimilarity. + yule -- the Yule dissimilarity. ``` **func**`hamming` also operates over discrete numerical vectors. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.md b/src/test/pythonFiles/markdown/scipy.spatial.md index 3584e78f1bbc..ba9a8a615843 100644 --- a/src/test/pythonFiles/markdown/scipy.spatial.md +++ b/src/test/pythonFiles/markdown/scipy.spatial.md @@ -3,55 +3,49 @@ ### Nearest-neighbor Queries ```html - KDTree -- class for efficient nearest-neighbor queries - cKDTree -- class for efficient nearest-neighbor queries (faster impl.) - distance -- module containing many different distance measures - Rectangle - + KDTree -- class for efficient nearest-neighbor queries + cKDTree -- class for efficient nearest-neighbor queries (faster impl.) + distance -- module containing many different distance measures + Rectangle ``` ### Delaunay Triangulation, Convex Hulls and Voronoi Diagrams ```html - Delaunay -- compute Delaunay triangulation of input points - ConvexHull -- compute a convex hull for input points - Voronoi -- compute a Voronoi diagram hull from input points - SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere - HalfspaceIntersection -- compute the intersection points of input halfspaces - + Delaunay -- compute Delaunay triangulation of input points + ConvexHull -- compute a convex hull for input points + Voronoi -- compute a Voronoi diagram hull from input points + SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere + HalfspaceIntersection -- compute the intersection points of input halfspaces ``` ### Plotting Helpers ```html - delaunay_plot_2d -- plot 2-D triangulation - convex_hull_plot_2d -- plot 2-D convex hull - voronoi_plot_2d -- plot 2-D voronoi diagram - + delaunay_plot_2d -- plot 2-D triangulation + convex_hull_plot_2d -- plot 2-D convex hull + voronoi_plot_2d -- plot 2-D voronoi diagram ``` ### Simplex representation The simplices (triangles, tetrahedra, ...) appearing in the Delaunay tesselation (N-dim simplices), convex hull facets, and Voronoi ridges (N-1 dim simplices) are represented in the following scheme: ```html - tess = Delaunay(points) - hull = ConvexHull(points) - voro = Voronoi(points) - - # coordinates of the j-th vertex of the i-th simplex - tess.points[tess.simplices[i, j], :] # tesselation element - hull.points[hull.simplices[i, j], :] # convex hull facet - voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells - + tess = Delaunay(points) + hull = ConvexHull(points) + voro = Voronoi(points) + + # coordinates of the j-th vertex of the i-th simplex + tess.points[tess.simplices[i, j], :] # tesselation element + hull.points[hull.simplices[i, j], :] # convex hull facet + voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells ``` For Delaunay triangulations and convex hulls, the neighborhood structure of the simplices satisfies the condition: ```html - `tess.neighbors[i,j]` is the neighboring simplex of the i-th - simplex, opposite to the j-vertex. It is -1 in case of no - neighbor. - + `tess.neighbors[i,j]` is the neighboring simplex of the i-th + simplex, opposite to the j-vertex. It is -1 in case of no + neighbor. ``` Convex hull facets also define a hyperplane equation: ```html - (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 - + (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 ``` Similar hyperplane equations for the Delaunay triangulation correspond to the convex hull facets on the corresponding N+1 dimensional @@ -63,9 +57,9 @@ computations. #### Functions ```html - tsearch - distance_matrix - minkowski_distance - minkowski_distance_p - procrustes + tsearch + distance_matrix + minkowski_distance + minkowski_distance_p + procrustes ``` \ No newline at end of file From e436fde65a1d818338826edf7e4e0ec97686455d Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Feb 2018 13:42:45 -0800 Subject: [PATCH 046/103] Baselines --- .../common/markdown/restTextConverter.ts | 50 ++++--------------- src/client/providers/itemInfoSource.ts | 2 +- src/test/markdown/restTextConverter.test.ts | 2 +- src/test/pythonFiles/markdown/astroid.md | 2 +- .../markdown/scipy.spatial.distance.md | 4 +- .../pythonFiles/markdown/scipy.spatial.md | 2 +- 6 files changed, 16 insertions(+), 46 deletions(-) diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index da00da8e2fe3..e606fd46bfbc 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -17,16 +17,12 @@ export class RestTextConverter { private md: string[] = []; // tslint:disable-next-line:cyclomatic-complexity - public toMarkdown(docstring: string, force?: boolean): string { + public toMarkdown(docstring: string): string { // Translates reStructruredText (Python doc syntax) to markdown. // It only translates as much as needed to display tooltips // and documentation in the completion list. // See https://en.wikipedia.org/wiki/ReStructuredText - if (!force && !this.shouldConvert(docstring)) { - return this.escapeMarkdown(docstring); - } - const result = this.transformLines(docstring); this.state = State.Default; this.md = []; @@ -40,34 +36,7 @@ export class RestTextConverter { return text .replace(/\#/g, '\\#') .replace(/\*/g, '\\*') - .replace(/\_/g, '\\_') - .replace(/\{/g, '\\{') - .replace(/\}/g, '\\}') - .replace(/\[/g, '\\[') - .replace(/\]/g, '\\]') - .replace(/\(/g, '\\(') - .replace(/\)/g, '\\)') - .replace(/\+/g, '\\+') - .replace(/\-/g, '\\+'); - } - - private shouldConvert(docstring: string): boolean { - // Heuristics that determe if string should be converted - // to markdown or just escaped. - - // :: at the end of a string - const doubleColon = docstring.indexOf('::'); - if (doubleColon >= 0 && doubleColon < docstring.length - 2) { - const ch = docstring.charCodeAt(doubleColon + 2); - if (ch === Char.LineFeed || ch === Char.CarriageReturn) { - return true; - } - } - // Section headers or lists - if (docstring.indexOf('===') >= 0 || docstring.indexOf('---') >= 0 || docstring.indexOf('.. ') >= 0) { - return true; - } - return false; + .replace(/\_/g, '\\_'); } private transformLines(docstring: string): string { @@ -126,8 +95,9 @@ export class RestTextConverter { return result; // Handle line in the new state } - line = this.convertEmphasis(line); + line = this.cleanup(line); line = line.replace(/``/g, '`'); // Convert double backticks to single. + line = this.escapeMarkdown(line); this.md.push(line); return 0; @@ -202,12 +172,12 @@ export class RestTextConverter { const line = lines[i]; if (i < lines.length - 1 && (lines[i + 1].startsWith('==='))) { // Section title -> heading level 3. - this.md.push(`### ${this.convertEmphasis(line)}`); + this.md.push(`### ${this.cleanup(line)}`); return true; } if (i < lines.length - 1 && (lines[i + 1].startsWith('---'))) { // Subsection title -> heading level 4. - this.md.push(`#### ${this.convertEmphasis(line)}`); + this.md.push(`#### ${this.cleanup(line)}`); return true; } return false; @@ -268,13 +238,13 @@ export class RestTextConverter { } } - private convertEmphasis(line: string): string { - return line.replace(/\:([\w\W]+)\:/g, '**$1**'); // Convert :word: to **word**. - } - private isListItem(line: string): boolean { const trimmed = line.trim(); const ch = trimmed.length > 0 ? trimmed.charCodeAt(0) : 0; return ch === Char.Asterisk || ch === Char.Hyphen || isDecimal(ch); } + + private cleanup(line: string): string { + return line.replace(/:mod:/g, 'module:'); + } } diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index fc02d8097464..4159e9396f82 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -109,7 +109,7 @@ export class ItemInfoSource { tooltip = tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); } - const description = this.textConverter.toMarkdown(lines.join(EOL), signature.length === 0); + const description = this.textConverter.toMarkdown(lines.join(EOL)); tooltip = tooltip.appendMarkdown(description); const documentation = this.textConverter.toMarkdown(dnd[1]); // Used only in completion list diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts index 81b1ba5bbf12..9b43d4d57657 100644 --- a/src/test/markdown/restTextConverter.test.ts +++ b/src/test/markdown/restTextConverter.test.ts @@ -29,7 +29,7 @@ async function testConversion(fileName: string): Promise { const cvt = new RestTextConverter(); const file = path.join(srcPythoFilesPath, fileName); const source = await fs.readFile(`${file}.pydoc`, 'utf8'); - const actual = cvt.toMarkdown(source, true); + const actual = cvt.toMarkdown(source); const expected = await fs.readFile(`${file}.md`, 'utf8'); compareFiles(expected, actual); } diff --git a/src/test/pythonFiles/markdown/astroid.md b/src/test/pythonFiles/markdown/astroid.md index d3c1bda813ee..b5ece21c1faf 100644 --- a/src/test/pythonFiles/markdown/astroid.md +++ b/src/test/pythonFiles/markdown/astroid.md @@ -5,7 +5,7 @@ python source code for projects such as pychecker, pyreverse, pylint... Well, actually the development of this library is essentially governed by pylint's needs. -It extends class defined in the python's _ast module with some +It extends class defined in the python's \_ast module with some additional methods and attributes. Instance attributes are added by a builder object, which can either generate extended ast (let's call them astroid ;) by visiting an existent ast tree or by inspecting living diff --git a/src/test/pythonFiles/markdown/scipy.spatial.distance.md b/src/test/pythonFiles/markdown/scipy.spatial.distance.md index 8e9dd996931d..276acddef787 100644 --- a/src/test/pythonFiles/markdown/scipy.spatial.distance.md +++ b/src/test/pythonFiles/markdown/scipy.spatial.distance.md @@ -1,4 +1,4 @@ -### Distance computations (**mod**`scipy.spatial.distance`) +### Distance computations (module:`scipy.spatial.distance`) #### Function Reference @@ -51,4 +51,4 @@ computing the distances between all pairs. sokalsneath -- the Sokal-Sneath dissimilarity. yule -- the Yule dissimilarity. ``` -**func**`hamming` also operates over discrete numerical vectors. \ No newline at end of file +:func:`hamming` also operates over discrete numerical vectors. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.md b/src/test/pythonFiles/markdown/scipy.spatial.md index ba9a8a615843..2d5e891db625 100644 --- a/src/test/pythonFiles/markdown/scipy.spatial.md +++ b/src/test/pythonFiles/markdown/scipy.spatial.md @@ -1,4 +1,4 @@ -### Spatial algorithms and data structures (**mod**`scipy.spatial`) +### Spatial algorithms and data structures (module:`scipy.spatial`) ### Nearest-neighbor Queries From e52bcffbe29775cb7d95f65a75f97fda30a0f62e Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Feb 2018 13:49:50 -0800 Subject: [PATCH 047/103] Squash --- CONTRIBUTING.md | 56 ++-- src/client/common/extensions.ts | 18 +- .../common/markdown/restTextConverter.ts | 252 ++++++++------- src/client/common/terminal/helper.ts | 2 +- src/client/providers/itemInfoSource.ts | 2 +- .../codeExecution/codeExecutionManager.ts | 5 +- .../codeExecution/djangoShellCodeExecution.ts | 2 +- .../codeExecution/terminalCodeExecution.ts | 4 +- src/client/unittests/main.ts | 2 +- src/test/common/extensions.test.ts | 42 +++ .../common/terminals/activation.bash.test.ts | 6 +- .../activation.commandPrompt.test.ts | 288 +++++++++--------- src/test/common/terminals/helper.test.ts | 4 +- .../interpreters/condaEnvFileService.test.ts | 11 +- src/test/markdown/restTextConverter.test.ts | 6 +- src/test/pythonFiles/markdown/aifc.md | 142 +++++++++ src/test/pythonFiles/markdown/aifc.pydoc | 134 ++++++++ src/test/pythonFiles/markdown/anydbm.md | 29 +- src/test/pythonFiles/markdown/astroid.md | 24 ++ src/test/pythonFiles/markdown/astroid.pydoc | 23 ++ src/test/pythonFiles/markdown/scipy.md | 61 ++-- .../markdown/scipy.spatial.distance.md | 66 ++-- .../pythonFiles/markdown/scipy.spatial.md | 66 ++-- .../extension.refactor.extract.var.test.ts | 4 +- .../djangoShellCodeExect.test.ts | 2 +- .../codeExecution/terminalCodeExec.test.ts | 5 +- src/test/unittests/debugger.test.ts | 29 +- .../unittests/stoppingDiscoverAndTest.test.ts | 21 +- 28 files changed, 863 insertions(+), 443 deletions(-) create mode 100644 src/test/common/extensions.test.ts create mode 100644 src/test/pythonFiles/markdown/aifc.md create mode 100644 src/test/pythonFiles/markdown/aifc.pydoc create mode 100644 src/test/pythonFiles/markdown/astroid.md create mode 100644 src/test/pythonFiles/markdown/astroid.pydoc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 04abf6a0df58..2d5b4456385a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -93,7 +93,7 @@ for a release are tracked in a [milestone](https://github.com/Microsoft/vscode-python/milestones) (which is actively updated as plans change). -The overall schedule for a release is to code freeze for on the last +The overall schedule for a release is to feature freeze for on the last Monday of the month to coincide with Visual Studio Code's code freeze. We then aim to release later that week so the latest version of the extension is already live by the time Visual Studio Code launches @@ -104,16 +104,10 @@ between scheduled releases as necessary. All development is actively done in the `master` branch of the repository. It is what allows us to have an [insiders build](#insiders-build) which is expected to be stable at -all times. We do keep the previous release as a branch in case the +all times. We do keep the most recent release as a branch in case the need for a bugfix release arises. But once a new release is made we -convert the older branch into a tag and delete the branch as -Visual Studio Code's automatic updating makes keeping old versions -around unnecessary. - -Since we try to spend about 25% of our development time fixing bugs -and removing technical debt, the week of a release is mostly spent -focusing on that topic. That way we don't ignore the health of the -code base by accidentally focusing on new features exclusively. +delete the older release branch (all releases are appropriately +tagged, so history is lost). ### Issue triaging @@ -142,21 +136,17 @@ lexicographically sort from earliest stage to latest stage). The suffix term for each label then specifies what is currently blocking the issue from being closed. -* `1-` - * [`decision`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-decision): - The issue is a feature enhancement request and a decision has not - been made as to whether we would accept a pull request - implementing the enhancement - * [`more info`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-more%20info): - We need more information from the OP (original poster) - * [`verification`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-verification): - We need to verify that the issue can be replicated +* [`1-decision`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-decision): +The issue is a feature enhancement request and a decision has not +been made as to whether we would accept a pull request +implementing the enhancement +* [`1-more info`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-more%20info): +We need more information from the OP (original poster) +* [`1-verification`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-verification): + We need to verify that the issue is reproducible * [`2-PR`](https://github.com/Microsoft/vscode-python/labels/awaiting%202-PR): The issue is valid and is now awaiting a fix to be created and merged into the `master` branch -* [`4-validation`](https://github.com/Microsoft/vscode-python/labels/awaiting%204-validation): - A pull request has been merged and resolution of the issue should be - independently validated #### Closed issues @@ -168,22 +158,21 @@ it should have an appropriate `closed-` label. 1. Check that there is an issue corresponding to what the pull request is attempting to address - * If an issue exists, make sure it has reached the stage of being - labeled `awaiting 2-PR` + * If an issue exists, make sure it has reached the stage of + `awaiting 2-PR` * If no issue exists, open one and wait for it to reach the `awaiting 2-PR` stage before submitting the pull request -1. Open the pull request, mentioning the appropriate issue(s) in the +1. Create the pull request, mentioning the appropriate issue(s) in the pull request message body * The pull request is expected to have appropriate unit tests * The pull request must pass its CI run before merging will be considered - * Code coverage is expected to not worsen + * Code coverage is expected to (at minimum) not worsen 1. Make sure all status checks are green (e.g. CLA check, CI, etc.) 1. Address any review comments 1. [Maintainers only] Merge the pull request 1. [Maintainers only] Update affected issues to be: 1. Closed (with an appropriate `closed-` label) - 1. The stage is set to `awaiting 4-validation` 1. The issue(s) are attached to the current milestone 1. Register OSS usage 1. Email CELA about any 3rd-party usage changes @@ -194,11 +183,12 @@ Starting in 2018, the extension switched to [calendar versioning](http://calver.org/) since the extension auto-updates and thus there is no need to track its version number for backwards-compatibility. As such, the major version -is the current year, the minor version is the current month, and -the micro version is how many releases there have been that month in -the year (starting at 0). For example, the first release in July 2018 -would be `2018.7.0`, the second release that month would be -`2018.7.1`, etc. +is the current year, the minor version is the month when feature +freeze was reached, and the micro version is how many releases there +have been since that feature freeze (starting at 0). For example +the release made when we reach feature freeze in July 2018 +would be `2018.7.0`, and if a second release was necessary to fix a +critical bug it would be `2018.7.1`. ## Insiders Build @@ -214,7 +204,7 @@ file, please follow the instructions on [this page](https://code.visualstudio.com/docs/editor/extension-gallery#_install-from-a-vsix) to install the extension. -The insiders build of the extension ...: +The insiders build of the extension: * Will be replaced with new releases published onto the [VS Code Marketplace](https://marketplace.visualstudio.com/VSCode). diff --git a/src/client/common/extensions.ts b/src/client/common/extensions.ts index 629b809e8d06..71858e6e01da 100644 --- a/src/client/common/extensions.ts +++ b/src/client/common/extensions.ts @@ -20,6 +20,11 @@ interface String { * E.g. if an argument contains a space, then it will be enclosed within double quotes. */ toCommandArgument(): string; + /** + * Appropriately formats a a file path so it can be used as an argument for a command in a shell. + * E.g. if an argument contains a space, then it will be enclosed within double quotes. + */ + fileToCommandArgument(): string; } /** @@ -47,5 +52,16 @@ String.prototype.toCommandArgument = function (this: string): string { if (!this) { return this; } - return (this.indexOf(' ') > 0 && !this.startsWith('"') && !this.endsWith('"')) ? `"${this}"` : this.toString(); + return (this.indexOf(' ') >= 0 && !this.startsWith('"') && !this.endsWith('"')) ? `"${this}"` : this.toString(); +}; + +/** + * Appropriately formats a a file path so it can be used as an argument for a command in a shell. + * E.g. if an argument contains a space, then it will be enclosed within double quotes. + */ +String.prototype.fileToCommandArgument = function (this: string): string { + if (!this) { + return this; + } + return this.toCommandArgument().replace(/\\/g, '/'); }; diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index 104230f273a1..e606fd46bfbc 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -2,26 +2,29 @@ // Licensed under the MIT License. import { EOL } from 'os'; -import { isWhiteSpace } from '../../language/characters'; +// tslint:disable-next-line:import-name +import Char from 'typescript-char'; +import { isDecimal, isWhiteSpace } from '../../language/characters'; + +enum State { + Default, + Preformatted, + Code +} export class RestTextConverter { - private inPreBlock = false; - private inCodeBlock = false; + private state: State = State.Default; private md: string[] = []; // tslint:disable-next-line:cyclomatic-complexity - public toMarkdown(docstring: string, force?: boolean): string { + public toMarkdown(docstring: string): string { // Translates reStructruredText (Python doc syntax) to markdown. // It only translates as much as needed to display tooltips // and documentation in the completion list. // See https://en.wikipedia.org/wiki/ReStructuredText - if (!force && !this.shouldConvert(docstring)) { - return this.escapeMarkdown(docstring); - } - const result = this.transformLines(docstring); - this.inPreBlock = this.inPreBlock = false; + this.state = State.Default; this.md = []; return result; @@ -33,120 +36,148 @@ export class RestTextConverter { return text .replace(/\#/g, '\\#') .replace(/\*/g, '\\*') - .replace(/\_/g, '\\_') - .replace(/\{/g, '\\{') - .replace(/\}/g, '\\}') - .replace(/\[/g, '\\[') - .replace(/\]/g, '\\]') - .replace(/\(/g, '\\(') - .replace(/\)/g, '\\)') - .replace(/\+/g, '\\+') - .replace(/\-/g, '\\+'); - } - - private shouldConvert(docstring: string): boolean { - // heuristics - if (docstring.indexOf('::') >= 0 || docstring.indexOf('..') >= 0) { - return true; - } - if (docstring.indexOf('===') >= 0 || docstring.indexOf('---') >= 0) { - return true; - } - return false; + .replace(/\_/g, '\\_'); } - // tslint:disable-next-line:cyclomatic-complexity private transformLines(docstring: string): string { const lines = docstring.split(/\r?\n/); for (let i = 0; i < lines.length; i += 1) { - let line = lines[i]; - + const line = lines[i]; // Avoid leading empty lines if (this.md.length === 0 && line.length === 0) { continue; } - if (!this.inPreBlock) { - // Anything indented is considered to be preformatted. - if (line.length > 0 && isWhiteSpace(line.charCodeAt(0))) { - this.startPreformattedBlock(line); - } + switch (this.state) { + case State.Default: + i += this.inDefaultState(lines, i); + break; + case State.Preformatted: + i += this.inPreformattedState(lines, i); + break; + case State.Code: + this.inCodeState(line); + break; + default: + break; } + } - if (this.handleCodeBlock(line)) { - continue; - } + this.endCodeBlock(); + this.endPreformattedBlock(); - if (this.inPreBlock) { - // Preformatted block terminates by a line without leading - // whitespace or any special line like ..ABC::. - if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0))) { - this.endPreformattedBlock(); - } - } + return this.md.join(EOL).trim(); + } - if (this.handleSectionHeader(lines, i)) { - i += 1; // Eat line with === or --- - continue; - } + private inDefaultState(lines: string[], i: number): number { + let line = lines[i]; + if (line.startsWith('```')) { + this.startCodeBlock(); + return 0; + } - if (line.indexOf('generated/') >= 0) { - continue; // ignore generated content. - } - if (line.startsWith('===') || line.startsWith('---')) { - continue; // Eat standalone === or --- lines. - } + if (line.startsWith('===') || line.startsWith('---')) { + return 0; // Eat standalone === or --- lines. + } + if (this.handleDoubleColon(line)) { + return 0; + } + if (this.isIgnorable(line)) { + return 0; + } - if (this.handleDoubleColon(line)) { - continue; - } - if (line.startsWith('..') && line.indexOf('::') > 0) { - // Ignore lines likes .. sectionauthor:: John Doe. - continue; - } + if (this.handleSectionHeader(lines, i)) { + return 1; // Eat line with === or --- + } - line = this.convertEmphasis(line); - line = line.replace(/``/g, '`'); // Convert double backticks to single. + const result = this.checkPreContent(lines, i); + if (this.state !== State.Default) { + return result; // Handle line in the new state + } - if (line.length > 0 && isWhiteSpace(line.charCodeAt(0))) { - // Keep hard line breaks for the pre-indented content. - line = ` ${line} `; - } + line = this.cleanup(line); + line = line.replace(/``/g, '`'); // Convert double backticks to single. + line = this.escapeMarkdown(line); + this.md.push(line); - const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; - if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { - continue; // Avoid more than one empty line in a row. - } + return 0; + } - this.md.push(line); + private inPreformattedState(lines: string[], i: number): number { + let line = lines[i]; + if (this.isIgnorable(line)) { + return 0; + } + // Preformatted block terminates by a line without leading whitespace. + if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0)) && !this.isListItem(line)) { + this.endPreformattedBlock(); + return -1; } - this.tryEndCodePreBlocks(); - return this.md.join(EOL).trim(); + const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; + if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { + return 0; // Avoid more than one empty line in a row. + } + + // Since we use HTML blocks as preformatted text + // make sure we drop angle brackets since otherwise + // they will render as tags and attributes + line = line.replace(//g, ' '); + line = line.replace(/``/g, '`'); // Convert double backticks to single. + // Keep hard line breaks for the preformatted content + this.md.push(`${line} `); + return 0; } - private handleCodeBlock(line: string): boolean { - if (!line.startsWith('```')) { - return false; + private inCodeState(line: string): void { + const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; + if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { + return; // Avoid more than one empty line in a row. } - if (this.inCodeBlock) { + + if (line.startsWith('```')) { this.endCodeBlock(); } else { - this.startCodeBlock(); + this.md.push(line); } - return true; + } + + private isIgnorable(line: string): boolean { + if (line.indexOf('generated/') >= 0) { + return true; // Drop generated content. + } + const trimmed = line.trim(); + if (trimmed.startsWith('..') && trimmed.indexOf('::') > 0) { + // Ignore lines likes .. sectionauthor:: John Doe. + return true; + } + return false; + } + + private checkPreContent(lines: string[], i: number): number { + const line = lines[i]; + if (i === 0 || line.trim().length === 0) { + return 0; + } + + if (!isWhiteSpace(line.charCodeAt(0)) && !this.isListItem(line)) { + return 0; // regular line, nothing to do here. + } + // Indented content is considered to be preformatted. + this.startPreformattedBlock(); + return -1; } private handleSectionHeader(lines: string[], i: number): boolean { const line = lines[i]; if (i < lines.length - 1 && (lines[i + 1].startsWith('==='))) { // Section title -> heading level 3. - this.md.push(`### ${this.convertEmphasis(line)}`); + this.md.push(`### ${this.cleanup(line)}`); return true; } if (i < lines.length - 1 && (lines[i + 1].startsWith('---'))) { // Subsection title -> heading level 4. - this.md.push(`#### ${this.convertEmphasis(line)}`); + this.md.push(`#### ${this.cleanup(line)}`); return true; } return false; @@ -164,61 +195,56 @@ export class RestTextConverter { this.md.push(line.substring(0, line.length - 1)); } - this.startPreformattedBlock(line); + this.startPreformattedBlock(); return true; } - private tryEndCodePreBlocks(): void { - if (this.inCodeBlock) { - this.endCodeBlock(); - } - if (this.inPreBlock) { - this.endPreformattedBlock(); - } - } - - private startPreformattedBlock(line: string): void { + private startPreformattedBlock(): void { // Remove previous empty line so we avoid double empties. - this.tryRemovePrecedingEmptyLine(); + this.tryRemovePrecedingEmptyLines(); // Lie about the language since we don't want preformatted text // to be colorized as Python. HTML is more 'appropriate' as it does // not colorize -- or + or keywords like 'from'. - if (line.indexOf('# ') >= 0) { - this.md.push('```python'); - } else { - this.md.push('```html'); - } - this.inPreBlock = true; + this.md.push('```html'); + this.state = State.Preformatted; } private endPreformattedBlock(): void { - if (this.inPreBlock) { + if (this.state === State.Preformatted) { + this.tryRemovePrecedingEmptyLines(); this.md.push('```'); - this.inPreBlock = false; + this.state = State.Default; } } private startCodeBlock(): void { // Remove previous empty line so we avoid double empties. - this.tryRemovePrecedingEmptyLine(); + this.tryRemovePrecedingEmptyLines(); this.md.push('```python'); - this.inCodeBlock = true; + this.state = State.Code; } private endCodeBlock(): void { - if (this.inCodeBlock) { + if (this.state === State.Code) { + this.tryRemovePrecedingEmptyLines(); this.md.push('```'); - this.inCodeBlock = false; + this.state = State.Default; } } - private tryRemovePrecedingEmptyLine(): void { - if (this.md.length > 0 && this.md[this.md.length - 1].length === 0) { + private tryRemovePrecedingEmptyLines(): void { + while (this.md.length > 0 && this.md[this.md.length - 1].trim().length === 0) { this.md.pop(); } } - private convertEmphasis(line: string): string { - return line.replace(/\:([\w\W]+)\:/g, '**$1**'); // Convert :word: to **word**. + private isListItem(line: string): boolean { + const trimmed = line.trim(); + const ch = trimmed.length > 0 ? trimmed.charCodeAt(0) : 0; + return ch === Char.Asterisk || ch === Char.Hyphen || isDecimal(ch); + } + + private cleanup(line: string): string { + return line.replace(/:mod:/g, 'module:'); } } diff --git a/src/client/common/terminal/helper.ts b/src/client/common/terminal/helper.ts index 07c679b86f4f..bc2f9c78a237 100644 --- a/src/client/common/terminal/helper.ts +++ b/src/client/common/terminal/helper.ts @@ -68,7 +68,7 @@ export class TerminalHelper implements ITerminalHelper { public buildCommandForTerminal(terminalShellType: TerminalShellType, command: string, args: string[]) { const isPowershell = terminalShellType === TerminalShellType.powershell || terminalShellType === TerminalShellType.powershellCore; const commandPrefix = isPowershell ? '& ' : ''; - return `${commandPrefix}${command.toCommandArgument()} ${args.join(' ')}`.trim(); + return `${commandPrefix}${command.fileToCommandArgument()} ${args.join(' ')}`.trim(); } public async getEnvironmentActivationCommands(terminalShellType: TerminalShellType, resource?: Uri): Promise { const settings = this.serviceContainer.get(IConfigurationService).getSettings(resource); diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index fc02d8097464..4159e9396f82 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -109,7 +109,7 @@ export class ItemInfoSource { tooltip = tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); } - const description = this.textConverter.toMarkdown(lines.join(EOL), signature.length === 0); + const description = this.textConverter.toMarkdown(lines.join(EOL)); tooltip = tooltip.appendMarkdown(description); const documentation = this.textConverter.toMarkdown(dnd[1]); // Used only in completion list diff --git a/src/client/terminals/codeExecution/codeExecutionManager.ts b/src/client/terminals/codeExecution/codeExecutionManager.ts index fd967c56318f..5a6159a50aae 100644 --- a/src/client/terminals/codeExecution/codeExecutionManager.ts +++ b/src/client/terminals/codeExecution/codeExecutionManager.ts @@ -15,7 +15,7 @@ import { ICodeExecutionHelper, ICodeExecutionManager, ICodeExecutionService } fr @injectable() export class CodeExecutionManager implements ICodeExecutionManager { - constructor( @inject(ICommandManager) private commandManager: ICommandManager, + constructor(@inject(ICommandManager) private commandManager: ICommandManager, @inject(IDocumentManager) private documentManager: IDocumentManager, @inject(IDisposableRegistry) private disposableRegistry: Disposable[], @inject(IServiceContainer) private serviceContainer: IServiceContainer) { @@ -28,8 +28,9 @@ export class CodeExecutionManager implements ICodeExecutionManager { this.disposableRegistry.push(this.commandManager.registerCommand(Commands.Exec_Selection_In_Django_Shell, this.executeSelectionInDjangoShell.bind(this))); } @captureTelemetry(EXECUTION_CODE, { scope: 'file' }, false) - private async executeFileInterTerminal(file: Uri) { + private async executeFileInterTerminal(file?: Uri) { const codeExecutionHelper = this.serviceContainer.get(ICodeExecutionHelper); + file = file instanceof Uri ? file : undefined; const fileToExecute = file ? file : await codeExecutionHelper.getFileToExecute(); if (!fileToExecute) { return; diff --git a/src/client/terminals/codeExecution/djangoShellCodeExecution.ts b/src/client/terminals/codeExecution/djangoShellCodeExecution.ts index d188a2091547..5fbe2ef2d19f 100644 --- a/src/client/terminals/codeExecution/djangoShellCodeExecution.ts +++ b/src/client/terminals/codeExecution/djangoShellCodeExecution.ts @@ -40,7 +40,7 @@ export class DjangoShellCodeExecutionProvider extends TerminalCodeExecutionProvi const workspaceRoot = workspaceUri ? workspaceUri.uri.fsPath : defaultWorkspace; const managePyPath = workspaceRoot.length === 0 ? 'manage.py' : path.join(workspaceRoot, 'manage.py'); - args.push(managePyPath.toCommandArgument()); + args.push(managePyPath.fileToCommandArgument()); args.push('shell'); return { command, args }; } diff --git a/src/client/terminals/codeExecution/terminalCodeExecution.ts b/src/client/terminals/codeExecution/terminalCodeExecution.ts index a3aaedcc2584..4ed1d7da479c 100644 --- a/src/client/terminals/codeExecution/terminalCodeExecution.ts +++ b/src/client/terminals/codeExecution/terminalCodeExecution.ts @@ -34,7 +34,7 @@ export class TerminalCodeExecutionProvider implements ICodeExecutionService { const command = this.platformService.isWindows ? pythonSettings.pythonPath.replace(/\\/g, '/') : pythonSettings.pythonPath; const launchArgs = pythonSettings.terminal.launchArgs; - await this.getTerminalService(file).sendCommand(command, launchArgs.concat(file.fsPath.toCommandArgument())); + await this.getTerminalService(file).sendCommand(command, launchArgs.concat(file.fsPath.fileToCommandArgument())); } public async execute(code: string, resource?: Uri): Promise { @@ -47,7 +47,7 @@ export class TerminalCodeExecutionProvider implements ICodeExecutionService { } public async initializeRepl(resource?: Uri) { if (this.replActive && await this.replActive!) { - this._terminalService!.show(); + await this._terminalService!.show(); return; } this.replActive = new Promise(async resolve => { diff --git a/src/client/unittests/main.ts b/src/client/unittests/main.ts index c95777f78bcc..2e386edea0b0 100644 --- a/src/client/unittests/main.ts +++ b/src/client/unittests/main.ts @@ -158,7 +158,7 @@ async function selectAndRunTestMethod(cmdSource: CommandSource, resource: Uri, d return; } // tslint:disable-next-line:prefer-type-cast - await runTestsImpl(cmdSource, testManager.workspaceFolder, { testFunction: [selectedTestFn.testFunction] } as TestsToRun, debug); + await runTestsImpl(cmdSource, testManager.workspaceFolder, { testFunction: [selectedTestFn.testFunction] } as TestsToRun, false, debug); } async function selectAndRunTestFile(cmdSource: CommandSource) { const testManager = await getTestManager(true); diff --git a/src/test/common/extensions.test.ts b/src/test/common/extensions.test.ts new file mode 100644 index 000000000000..5724f3291274 --- /dev/null +++ b/src/test/common/extensions.test.ts @@ -0,0 +1,42 @@ +import { expect } from 'chai'; +import '../../client/common/extensions'; + +// Defines a Mocha test suite to group tests of similar kind together +suite('String Extensions', () => { + test('Should return empty string for empty arg', () => { + const argTotest = ''; + expect(argTotest.toCommandArgument()).to.be.equal(''); + }); + test('Should quote an empty space', () => { + const argTotest = ' '; + expect(argTotest.toCommandArgument()).to.be.equal('" "'); + }); + test('Should not quote command arguments without spaces', () => { + const argTotest = 'one.two.three'; + expect(argTotest.toCommandArgument()).to.be.equal(argTotest); + }); + test('Should quote command arguments with spaces', () => { + const argTotest = 'one two three'; + expect(argTotest.toCommandArgument()).to.be.equal(`"${argTotest}"`); + }); + test('Should return empty string for empty path', () => { + const fileToTest = ''; + expect(fileToTest.fileToCommandArgument()).to.be.equal(''); + }); + test('Should not quote file argument without spaces', () => { + const fileToTest = 'users/test/one'; + expect(fileToTest.fileToCommandArgument()).to.be.equal(fileToTest); + }); + test('Should quote file argument with spaces', () => { + const fileToTest = 'one two three'; + expect(fileToTest.fileToCommandArgument()).to.be.equal(`"${fileToTest}"`); + }); + test('Should replace all back slashes with forward slashes (irrespective of OS)', () => { + const fileToTest = 'c:\\users\\user\\conda\\scripts\\python.exe'; + expect(fileToTest.fileToCommandArgument()).to.be.equal(fileToTest.replace(/\\/g, '/')); + }); + test('Should replace all back slashes with forward slashes (irrespective of OS) and quoted when file has spaces', () => { + const fileToTest = 'c:\\users\\user namne\\conda path\\scripts\\python.exe'; + expect(fileToTest.fileToCommandArgument()).to.be.equal(`"${fileToTest.replace(/\\/g, '/')}"`); + }); +}); diff --git a/src/test/common/terminals/activation.bash.test.ts b/src/test/common/terminals/activation.bash.test.ts index ee7d2829ea46..c321528140ea 100644 --- a/src/test/common/terminals/activation.bash.test.ts +++ b/src/test/common/terminals/activation.bash.test.ts @@ -5,6 +5,7 @@ import { expect } from 'chai'; import * as path from 'path'; import * as TypeMoq from 'typemoq'; import { EnumEx } from '../../../client/common/enumUtils'; +import '../../../client/common/extensions'; import { IFileSystem } from '../../../client/common/platform/types'; import { Bash } from '../../../client/common/terminal/environmentActivationProviders/bash'; import { TerminalShellType } from '../../../client/common/terminal/types'; @@ -13,7 +14,7 @@ import { IServiceContainer } from '../../../client/ioc/types'; // tslint:disable-next-line:max-func-body-length suite('Terminal Environment Activation (bash)', () => { - ['usr/bin/python', 'usr/bin/env with spaces/env more/python'].forEach(pythonPath => { + ['usr/bin/python', 'usr/bin/env with spaces/env more/python', 'c:\\users\\windows paths\\conda\\python.exe'].forEach(pythonPath => { const hasSpaces = pythonPath.indexOf(' ') > 0; const suiteTitle = hasSpaces ? 'and there are spaces in the script file (pythonpath),' : 'and there are no spaces in the script file (pythonpath),'; suite(suiteTitle, () => { @@ -83,8 +84,7 @@ suite('Terminal Environment Activation (bash)', () => { // Ensure the path is quoted if it contains any spaces. // Ensure it contains the name of the environment as an argument to the script file. - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - expect(command).to.be.deep.equal([`source ${quotedScriptFile}`.trim()], 'Invalid command'); + expect(command).to.be.deep.equal([`source ${pathToScriptFile.fileToCommandArgument()}`.trim()], 'Invalid command'); } else { expect(command).to.be.equal(undefined, 'Command should be undefined'); } diff --git a/src/test/common/terminals/activation.commandPrompt.test.ts b/src/test/common/terminals/activation.commandPrompt.test.ts index 47b610ea0997..f6c7509ce00d 100644 --- a/src/test/common/terminals/activation.commandPrompt.test.ts +++ b/src/test/common/terminals/activation.commandPrompt.test.ts @@ -15,20 +15,64 @@ import { IConfigurationService, IPythonSettings } from '../../../client/common/t import { IServiceContainer } from '../../../client/ioc/types'; suite('Terminal Environment Activation (cmd/powershell)', () => { - ['c:/programfiles/python/python', 'c:/program files/python/python'].forEach(pythonPath => { - const hasSpaces = pythonPath.indexOf(' ') > 0; - const resource = Uri.file('a'); - - const suiteTitle = hasSpaces ? 'and there are spaces in the script file (pythonpath),' : 'and there are no spaces in the script file (pythonpath),'; - suite(suiteTitle, () => { - ['activate', 'activate.sh', 'activate.csh', 'activate.fish', 'activate.bat', 'activate.ps1'].forEach(scriptFileName => { - suite(`and script file is ${scriptFileName}`, () => { + ['c:/programfiles/python/python', 'c:/program files/python/python', + 'c:\\users\\windows paths\\conda\\python.exe'].forEach(pythonPath => { + const hasSpaces = pythonPath.indexOf(' ') > 0; + const resource = Uri.file('a'); + + const suiteTitle = hasSpaces ? 'and there are spaces in the script file (pythonpath),' : 'and there are no spaces in the script file (pythonpath),'; + suite(suiteTitle, () => { + ['activate', 'activate.sh', 'activate.csh', 'activate.fish', 'activate.bat', 'activate.ps1'].forEach(scriptFileName => { + suite(`and script file is ${scriptFileName}`, () => { + let serviceContainer: TypeMoq.IMock; + let fileSystem: TypeMoq.IMock; + setup(() => { + serviceContainer = TypeMoq.Mock.ofType(); + fileSystem = TypeMoq.Mock.ofType(); + serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); + + const configService = TypeMoq.Mock.ofType(); + serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); + const settings = TypeMoq.Mock.ofType(); + settings.setup(s => s.pythonPath).returns(() => pythonPath); + configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); + }); + + EnumEx.getNamesAndValues(TerminalShellType).forEach(shellType => { + const isScriptFileSupported = ['activate.bat', 'activate.ps1'].indexOf(scriptFileName) >= 0; + const titleTitle = isScriptFileSupported ? `Ensure terminal type is supported (Shell: ${shellType.name})` : + `Ensure terminal type is not supported (Shell: ${shellType.name})`; + + test(titleTitle, async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); + + const supported = bash.isShellSupported(shellType.value); + switch (shellType.value) { + case TerminalShellType.commandPrompt: + case TerminalShellType.powershellCore: + case TerminalShellType.powershell: { + expect(supported).to.be.equal(true, `${shellType.name} shell not supported (it should be)`); + break; + } + default: { + expect(supported).to.be.equal(false, `${shellType.name} incorrectly supported (should not be)`); + } + } + }); + }); + }); + }); + + suite('and script file is activate.bat', () => { let serviceContainer: TypeMoq.IMock; let fileSystem: TypeMoq.IMock; + let platform: TypeMoq.IMock; setup(() => { serviceContainer = TypeMoq.Mock.ofType(); fileSystem = TypeMoq.Mock.ofType(); + platform = TypeMoq.Mock.ofType(); serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); + serviceContainer.setup(c => c.get(IPlatformService)).returns(() => platform.object); const configService = TypeMoq.Mock.ofType(); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); @@ -37,173 +81,125 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); }); - EnumEx.getNamesAndValues(TerminalShellType).forEach(shellType => { - const isScriptFileSupported = ['activate.bat', 'activate.ps1'].indexOf(scriptFileName) >= 0; - const titleTitle = isScriptFileSupported ? `Ensure terminal type is supported (Shell: ${shellType.name})` : - `Ensure terminal type is not supported (Shell: ${shellType.name})`; - - test(titleTitle, async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); - - const supported = bash.isShellSupported(shellType.value); - switch (shellType.value) { - case TerminalShellType.commandPrompt: - case TerminalShellType.powershellCore: - case TerminalShellType.powershell: { - expect(supported).to.be.equal(true, `${shellType.name} shell not supported (it should be)`); - break; - } - default: { - expect(supported).to.be.equal(false, `${shellType.name} incorrectly supported (should not be)`); - } - } - }); - }); - }); - }); - - suite('and script file is activate.bat', () => { - let serviceContainer: TypeMoq.IMock; - let fileSystem: TypeMoq.IMock; - let platform: TypeMoq.IMock; - setup(() => { - serviceContainer = TypeMoq.Mock.ofType(); - fileSystem = TypeMoq.Mock.ofType(); - platform = TypeMoq.Mock.ofType(); - serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); - serviceContainer.setup(c => c.get(IPlatformService)).returns(() => platform.object); - - const configService = TypeMoq.Mock.ofType(); - serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); - const settings = TypeMoq.Mock.ofType(); - settings.setup(s => s.pythonPath).returns(() => pythonPath); - configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); - }); + test('Ensure batch files are supported by command prompt', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - test('Ensure batch files are supported by command prompt', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const commands = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const commands = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); + // Ensure the script file is of the following form: + // source "" + // Ensure the path is quoted if it contains any spaces. + // Ensure it contains the name of the environment as an argument to the script file. - // Ensure the script file is of the following form: - // source "" - // Ensure the path is quoted if it contains any spaces. - // Ensure it contains the name of the environment as an argument to the script file. - - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - expect(commands).to.be.deep.equal([`${quotedScriptFile}`.trim()], 'Invalid command'); - }); + expect(commands).to.be.deep.equal([pathToScriptFile.fileToCommandArgument()], 'Invalid command'); + }); - test('Ensure batch files are supported by powershell (on windows)', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure batch files are supported by powershell (on windows)', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); - // Executing batch files from powershell requires going back to cmd, then into powershell + // Executing batch files from powershell requires going back to cmd, then into powershell - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - const activationCommand = `${quotedScriptFile}`.trim(); - const commands = [`& cmd /k "${activationCommand} & powershell"`]; - expect(command).to.be.deep.equal(commands, 'Invalid command'); - }); + const activationCommand = pathToScriptFile.fileToCommandArgument(); + const commands = [`& cmd /k "${activationCommand} & powershell"`]; + expect(command).to.be.deep.equal(commands, 'Invalid command'); + }); - test('Ensure batch files are supported by powershell core (on windows)', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure batch files are supported by powershell core (on windows)', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); - // Executing batch files from powershell requires going back to cmd, then into powershell + // Executing batch files from powershell requires going back to cmd, then into powershell - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - const activationCommand = `${quotedScriptFile}`.trim(); - const commands = [`& cmd /k "${activationCommand} & pwsh"`]; - expect(command).to.be.deep.equal(commands, 'Invalid command'); - }); + const activationCommand = pathToScriptFile.fileToCommandArgument(); + const commands = [`& cmd /k "${activationCommand} & pwsh"`]; + expect(command).to.be.deep.equal(commands, 'Invalid command'); + }); - test('Ensure batch files are not supported by powershell (on non-windows)', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure batch files are not supported by powershell (on non-windows)', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => false); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); + platform.setup(p => p.isWindows).returns(() => false); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); - expect(command).to.be.equal(undefined, 'Invalid command'); - }); + expect(command).to.be.equal(undefined, 'Invalid command'); + }); - test('Ensure batch files are not supported by powershell core (on non-windows)', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure batch files are not supported by powershell core (on non-windows)', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => false); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); + platform.setup(p => p.isWindows).returns(() => false); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); - expect(command).to.be.equal(undefined, 'Invalid command'); + expect(command).to.be.equal(undefined, 'Invalid command'); + }); }); - }); - suite('and script file is activate.ps1', () => { - let serviceContainer: TypeMoq.IMock; - let fileSystem: TypeMoq.IMock; - let platform: TypeMoq.IMock; - setup(() => { - serviceContainer = TypeMoq.Mock.ofType(); - fileSystem = TypeMoq.Mock.ofType(); - platform = TypeMoq.Mock.ofType(); - serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); - serviceContainer.setup(c => c.get(IPlatformService)).returns(() => platform.object); - - const configService = TypeMoq.Mock.ofType(); - serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); - const settings = TypeMoq.Mock.ofType(); - settings.setup(s => s.pythonPath).returns(() => pythonPath); - configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); - }); + suite('and script file is activate.ps1', () => { + let serviceContainer: TypeMoq.IMock; + let fileSystem: TypeMoq.IMock; + let platform: TypeMoq.IMock; + setup(() => { + serviceContainer = TypeMoq.Mock.ofType(); + fileSystem = TypeMoq.Mock.ofType(); + platform = TypeMoq.Mock.ofType(); + serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); + serviceContainer.setup(c => c.get(IPlatformService)).returns(() => platform.object); + + const configService = TypeMoq.Mock.ofType(); + serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); + const settings = TypeMoq.Mock.ofType(); + settings.setup(s => s.pythonPath).returns(() => pythonPath); + configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); + }); - test('Ensure powershell files are not supported by command prompt', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure powershell files are not supported by command prompt', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); - expect(command).to.be.deep.equal([], 'Invalid command (running powershell files are not supported on command prompt)'); - }); + expect(command).to.be.deep.equal([], 'Invalid command (running powershell files are not supported on command prompt)'); + }); - test('Ensure powershell files are supported by powershell', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure powershell files are supported by powershell', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - expect(command).to.be.deep.equal([`& ${quotedScriptFile}`.trim()], 'Invalid command'); - }); + expect(command).to.be.deep.equal([`& ${pathToScriptFile.fileToCommandArgument()}`.trim()], 'Invalid command'); + }); - test('Ensure powershell files are supported by powershell core', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure powershell files are supported by powershell core', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - expect(command).to.be.deep.equal([`& ${quotedScriptFile}`.trim()], 'Invalid command'); + expect(command).to.be.deep.equal([`& ${pathToScriptFile.fileToCommandArgument()}`.trim()], 'Invalid command'); + }); }); }); }); - }); }); diff --git a/src/test/common/terminals/helper.test.ts b/src/test/common/terminals/helper.test.ts index dea22cd4eef5..e692f480a6ca 100644 --- a/src/test/common/terminals/helper.test.ts +++ b/src/test/common/terminals/helper.test.ts @@ -102,7 +102,7 @@ suite('Terminal Service helpers', () => { const command = 'c:\\python 3.7.exe'; const args = ['1', '2']; const commandPrefix = (item.value === TerminalShellType.powershell || item.value === TerminalShellType.powershellCore) ? '& ' : ''; - const expectedTerminalCommand = `${commandPrefix}"${command}" 1 2`; + const expectedTerminalCommand = `${commandPrefix}${command.fileToCommandArgument()} 1 2`; const terminalCommand = helper.buildCommandForTerminal(item.value, command, args); expect(terminalCommand).to.equal(expectedTerminalCommand, `Incorrect command for Shell ${item.name}`); @@ -126,7 +126,7 @@ suite('Terminal Service helpers', () => { const command = 'c:\\python 3.7.exe'; const args = []; const commandPrefix = (item.value === TerminalShellType.powershell || item.value === TerminalShellType.powershellCore) ? '& ' : ''; - const expectedTerminalCommand = `${commandPrefix}"${command}"`; + const expectedTerminalCommand = `${commandPrefix}${command.fileToCommandArgument()}`; const terminalCommand = helper.buildCommandForTerminal(item.value, command, args); expect(terminalCommand).to.equal(expectedTerminalCommand, `Incorrect command for Shell ${item.name}`); diff --git a/src/test/interpreters/condaEnvFileService.test.ts b/src/test/interpreters/condaEnvFileService.test.ts index 85844e19c022..2207be82732a 100644 --- a/src/test/interpreters/condaEnvFileService.test.ts +++ b/src/test/interpreters/condaEnvFileService.test.ts @@ -104,15 +104,20 @@ suite('Interpreters from Conda Environments Text File', () => { const interpreterPaths = [ path.join(environmentsPath, 'conda', 'envs', 'numpy') ]; + const pythonPath = path.join(interpreterPaths[0], 'pythonPath'); condaService.setup(c => c.condaEnvironmentsFile).returns(() => environmentsFilePath); + condaService.setup(c => c.getInterpreterPath(TypeMoq.It.isAny())).returns(() => pythonPath); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(true)); fileSystem.setup(fs => fs.readFile(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(interpreterPaths.join(EOL))); - AnacondaCompanyNames.forEach(async companyDisplayName => { + for (const companyName of AnacondaCompanyNames) { + const versionWithCompanyName = `Mock Version :: ${companyName}`; + interpreterVersion.setup(c => c.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve(versionWithCompanyName)); const interpreters = await condaFileProvider.getInterpreters(); assert.equal(interpreters.length, 1, 'Incorrect number of entries'); - assert.equal(interpreters[0].displayName, `${AnacondaDisplayName} Mock Version (numpy)`, 'Incorrect display name'); - }); + assert.equal(interpreters[0].displayName, `${AnacondaDisplayName} Mock Version`, 'Incorrect display name'); + } }); }); diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts index 7b2f9a97cdc8..9b43d4d57657 100644 --- a/src/test/markdown/restTextConverter.test.ts +++ b/src/test/markdown/restTextConverter.test.ts @@ -15,7 +15,7 @@ function compareFiles(expectedContent: string, actualContent: string) { for (let i = 0; i < Math.min(expectedLines.length, actualLines.length); i += 1) { const e = expectedLines[i]; const a = actualLines[i]; - expect(a, `Difference at line ${i}`).to.be.equal(e); + expect(e, `Difference at line ${i}`).to.be.equal(a); } expect(actualLines.length, @@ -29,7 +29,7 @@ async function testConversion(fileName: string): Promise { const cvt = new RestTextConverter(); const file = path.join(srcPythoFilesPath, fileName); const source = await fs.readFile(`${file}.pydoc`, 'utf8'); - const actual = cvt.toMarkdown(source, true); + const actual = cvt.toMarkdown(source); const expected = await fs.readFile(`${file}.md`, 'utf8'); compareFiles(expected, actual); } @@ -40,4 +40,6 @@ suite('Hover - RestTextConverter', () => { test('scipy.spatial', async () => await testConversion('scipy.spatial')); test('scipy.spatial.distance', async () => await testConversion('scipy.spatial.distance')); test('anydbm', async () => await testConversion('anydbm')); + test('aifc', async () => await testConversion('aifc')); + test('astroid', async () => await testConversion('astroid')); }); diff --git a/src/test/pythonFiles/markdown/aifc.md b/src/test/pythonFiles/markdown/aifc.md new file mode 100644 index 000000000000..fff22dece1e5 --- /dev/null +++ b/src/test/pythonFiles/markdown/aifc.md @@ -0,0 +1,142 @@ +Stuff to parse AIFF-C and AIFF files. + +Unless explicitly stated otherwise, the description below is true +both for AIFF-C files and AIFF files. + +An AIFF-C file has the following structure. +```html + +-----------------+ + | FORM | + +-----------------+ + | size | + +----+------------+ + | | AIFC | + | +------------+ + | | chunks | + | | . | + | | . | + | | . | + +----+------------+ +``` +An AIFF file has the string "AIFF" instead of "AIFC". + +A chunk consists of an identifier (4 bytes) followed by a size (4 bytes, +big endian order), followed by the data. The size field does not include +the size of the 8 byte header. + +The following chunk types are recognized. +```html + FVER + version number of AIFF-C defining document (AIFF-C only). + MARK + # of markers (2 bytes) + list of markers: + marker ID (2 bytes, must be 0) + position (4 bytes) + marker name ("pstring") + COMM + # of channels (2 bytes) + # of sound frames (4 bytes) + size of the samples (2 bytes) + sampling frequency (10 bytes, IEEE 80-bit extended + floating point) + in AIFF-C files only: + compression type (4 bytes) + human-readable version of compression type ("pstring") + SSND + offset (4 bytes, not used by this program) + blocksize (4 bytes, not used by this program) + sound data +``` +A pstring consists of 1 byte length, a string of characters, and 0 or 1 +byte pad to make the total length even. + +Usage. + +Reading AIFF files: +```html + f = aifc.open(file, 'r') +``` +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +In some types of audio files, if the setpos() method is not used, +the seek() method is not necessary. + +This returns an instance of a class with the following public methods: +```html + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for AIFF files) + getcompname() -- returns human-readable version of + compression type ('not compressed' for AIFF files) + getparams() -- returns a tuple consisting of all of the + above in the above order + getmarkers() -- get the list of marks in the audio file or None + if there are no marks + getmark(id) -- get mark with the specified id (raises an error + if the mark does not exist) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +``` +The position returned by tell(), the position given to setpos() and +the position of marks are all compatible and have nothing to do with +the actual position in the file. +The close() method is called automatically when the class instance +is destroyed. + +Writing AIFF files: +```html + f = aifc.open(file, 'w') +``` +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: +```html + aiff() -- create an AIFF file (AIFF-C default) + aifc() -- create an AIFF-C file + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + setmark(id, pos, name) + -- add specified mark to the list of marks + tell() -- return current position in output file (useful + in combination with setmark()) + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +``` +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes('') or +close() to patch up the sizes in the header. +Marks can be added anytime. If there are any marks, you must call +close() after all frames have been written. +The close() method is called automatically when the class instance +is destroyed. + +When a file is opened with the extension '.aiff', an AIFF file is +written, otherwise an AIFF-C file is written. This default can be +changed by calling aiff() or aifc() before the first writeframes or +writeframesraw. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/aifc.pydoc b/src/test/pythonFiles/markdown/aifc.pydoc new file mode 100644 index 000000000000..a4cc346d5531 --- /dev/null +++ b/src/test/pythonFiles/markdown/aifc.pydoc @@ -0,0 +1,134 @@ +Stuff to parse AIFF-C and AIFF files. + +Unless explicitly stated otherwise, the description below is true +both for AIFF-C files and AIFF files. + +An AIFF-C file has the following structure. + + +-----------------+ + | FORM | + +-----------------+ + | | + +----+------------+ + | | AIFC | + | +------------+ + | | | + | | . | + | | . | + | | . | + +----+------------+ + +An AIFF file has the string "AIFF" instead of "AIFC". + +A chunk consists of an identifier (4 bytes) followed by a size (4 bytes, +big endian order), followed by the data. The size field does not include +the size of the 8 byte header. + +The following chunk types are recognized. + + FVER + (AIFF-C only). + MARK + <# of markers> (2 bytes) + list of markers: + (2 bytes, must be > 0) + (4 bytes) + ("pstring") + COMM + <# of channels> (2 bytes) + <# of sound frames> (4 bytes) + (2 bytes) + (10 bytes, IEEE 80-bit extended + floating point) + in AIFF-C files only: + (4 bytes) + ("pstring") + SSND + (4 bytes, not used by this program) + (4 bytes, not used by this program) + + +A pstring consists of 1 byte length, a string of characters, and 0 or 1 +byte pad to make the total length even. + +Usage. + +Reading AIFF files: + f = aifc.open(file, 'r') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +In some types of audio files, if the setpos() method is not used, +the seek() method is not necessary. + +This returns an instance of a class with the following public methods: + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for AIFF files) + getcompname() -- returns human-readable version of + compression type ('not compressed' for AIFF files) + getparams() -- returns a tuple consisting of all of the + above in the above order + getmarkers() -- get the list of marks in the audio file or None + if there are no marks + getmark(id) -- get mark with the specified id (raises an error + if the mark does not exist) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +The position returned by tell(), the position given to setpos() and +the position of marks are all compatible and have nothing to do with +the actual position in the file. +The close() method is called automatically when the class instance +is destroyed. + +Writing AIFF files: + f = aifc.open(file, 'w') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: + aiff() -- create an AIFF file (AIFF-C default) + aifc() -- create an AIFF-C file + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + setmark(id, pos, name) + -- add specified mark to the list of marks + tell() -- return current position in output file (useful + in combination with setmark()) + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes('') or +close() to patch up the sizes in the header. +Marks can be added anytime. If there are any marks, you must call +close() after all frames have been written. +The close() method is called automatically when the class instance +is destroyed. + +When a file is opened with the extension '.aiff', an AIFF file is +written, otherwise an AIFF-C file is written. This default can be +changed by calling aiff() or aifc() before the first writeframes or +writeframesraw. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/anydbm.md b/src/test/pythonFiles/markdown/anydbm.md index a86897871374..e5914dcbadde 100644 --- a/src/test/pythonFiles/markdown/anydbm.md +++ b/src/test/pythonFiles/markdown/anydbm.md @@ -2,15 +2,13 @@ Generic interface to all dbm clones. Instead of ```html - import dbm - d = dbm.open(file, 'w', 0666) - + import dbm + d = dbm.open(file, 'w', 0666) ``` use ```html - import anydbm - d = anydbm.open(file, 'w') - + import anydbm + d = anydbm.open(file, 'w') ``` The returned object is a dbhash, gdbm, dbm or dumbdbm object, dependent on the type of database being opened (determined by whichdb @@ -20,16 +18,15 @@ be determined by the availability of the modules (tested in the above order). It has the following interface (key and data are strings): -```python - d[key] = data # store data at key (may override data at - # existing key) - data = d[key] # retrieve data at key (raise KeyError if no - # such key) - del d[key] # delete data stored at key (raises KeyError - # if no such key) - flag = key in d # true if the key exists - list = d.keys() # return a list of all existing keys (slow!) - +```html + d[key] = data # store data at key (may override data at + # existing key) + data = d[key] # retrieve data at key (raise KeyError if no + # such key) + del d[key] # delete data stored at key (raises KeyError + # if no such key) + flag = key in d # true if the key exists + list = d.keys() # return a list of all existing keys (slow!) ``` Future versions may change the order in which implementations are tested for existence, and add interfaces to other dbm-like diff --git a/src/test/pythonFiles/markdown/astroid.md b/src/test/pythonFiles/markdown/astroid.md new file mode 100644 index 000000000000..b5ece21c1faf --- /dev/null +++ b/src/test/pythonFiles/markdown/astroid.md @@ -0,0 +1,24 @@ +Python Abstract Syntax Tree New Generation + +The aim of this module is to provide a common base representation of +python source code for projects such as pychecker, pyreverse, +pylint... Well, actually the development of this library is essentially +governed by pylint's needs. + +It extends class defined in the python's \_ast module with some +additional methods and attributes. Instance attributes are added by a +builder object, which can either generate extended ast (let's call +them astroid ;) by visiting an existent ast tree or by inspecting living +object. Methods are added by monkey patching ast classes. + +Main modules are: +```html +* nodes and scoped_nodes for more information about methods and + attributes added to different node classes + +* the manager contains a high level object to get astroid trees from + source files and living objects. It maintains a cache of previously + constructed tree for quick access + +* builder contains the class responsible to build astroid trees +``` \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/astroid.pydoc b/src/test/pythonFiles/markdown/astroid.pydoc new file mode 100644 index 000000000000..84d58487ead5 --- /dev/null +++ b/src/test/pythonFiles/markdown/astroid.pydoc @@ -0,0 +1,23 @@ +Python Abstract Syntax Tree New Generation + +The aim of this module is to provide a common base representation of +python source code for projects such as pychecker, pyreverse, +pylint... Well, actually the development of this library is essentially +governed by pylint's needs. + +It extends class defined in the python's _ast module with some +additional methods and attributes. Instance attributes are added by a +builder object, which can either generate extended ast (let's call +them astroid ;) by visiting an existent ast tree or by inspecting living +object. Methods are added by monkey patching ast classes. + +Main modules are: + +* nodes and scoped_nodes for more information about methods and + attributes added to different node classes + +* the manager contains a high level object to get astroid trees from + source files and living objects. It maintains a cache of previously + constructed tree for quick access + +* builder contains the class responsible to build astroid trees \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.md b/src/test/pythonFiles/markdown/scipy.md index 23721797aae3..d28c1e290abe 100644 --- a/src/test/pythonFiles/markdown/scipy.md +++ b/src/test/pythonFiles/markdown/scipy.md @@ -11,38 +11,37 @@ addition provides: Using any of these subpackages requires an explicit import. For example, `import scipy.cluster`. ```html - cluster --- Vector Quantization / Kmeans - fftpack --- Discrete Fourier Transform algorithms - integrate --- Integration routines - interpolate --- Interpolation Tools - io --- Data input and output - linalg --- Linear algebra routines - linalg.blas --- Wrappers to BLAS library - linalg.lapack --- Wrappers to LAPACK library - misc --- Various utilities that don't have - another home. - ndimage --- n-dimensional image package - odr --- Orthogonal Distance Regression - optimize --- Optimization Tools - signal --- Signal Processing Tools - sparse --- Sparse Matrices - sparse.linalg --- Sparse Linear Algebra - sparse.linalg.dsolve --- Linear Solvers - sparse.linalg.dsolve.umfpack --- **Interface to the UMFPACK library** - Conjugate Gradient Method (LOBPCG) - sparse.linalg.eigen --- Sparse Eigenvalue Solvers - sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned - Conjugate Gradient Method (LOBPCG) - spatial --- Spatial data structures and algorithms - special --- Special functions - stats --- Statistical Functions - + cluster --- Vector Quantization / Kmeans + fftpack --- Discrete Fourier Transform algorithms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + linalg.blas --- Wrappers to BLAS library + linalg.lapack --- Wrappers to LAPACK library + misc --- Various utilities that don't have + another home. + ndimage --- n-dimensional image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + sparse --- Sparse Matrices + sparse.linalg --- Sparse Linear Algebra + sparse.linalg.dsolve --- Linear Solvers + sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library: + Conjugate Gradient Method (LOBPCG) + sparse.linalg.eigen --- Sparse Eigenvalue Solvers + sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned + Conjugate Gradient Method (LOBPCG) + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions ``` #### Utility tools ```html - test --- Run scipy unittests - show_config --- Show scipy build configuration - show_numpy_config --- Show numpy build configuration - __version__ --- Scipy version string - __numpy_version__ --- Numpy version string + test --- Run scipy unittests + show_config --- Show scipy build configuration + show_numpy_config --- Show numpy build configuration + __version__ --- Scipy version string + __numpy_version__ --- Numpy version string ``` \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.distance.md b/src/test/pythonFiles/markdown/scipy.spatial.distance.md index 125b19f6cdeb..276acddef787 100644 --- a/src/test/pythonFiles/markdown/scipy.spatial.distance.md +++ b/src/test/pythonFiles/markdown/scipy.spatial.distance.md @@ -1,4 +1,4 @@ -### Distance computations (**mod**`scipy.spatial.distance`) +### Distance computations (module:`scipy.spatial.distance`) #### Function Reference @@ -6,53 +6,49 @@ Distance matrix computation from a collection of raw observation vectors stored in a rectangular array. ```html - pdist -- pairwise distances between observation vectors. - cdist -- distances between two collections of observation vectors - squareform -- convert distance matrix to a condensed one and vice versa - directed_hausdorff -- directed Hausdorff distance between arrays - + pdist -- pairwise distances between observation vectors. + cdist -- distances between two collections of observation vectors + squareform -- convert distance matrix to a condensed one and vice versa + directed_hausdorff -- directed Hausdorff distance between arrays ``` Predicates for checking the validity of distance matrices, both condensed and redundant. Also contained in this module are functions for computing the number of observations in a distance matrix. ```html - is_valid_dm -- checks for a valid distance matrix - is_valid_y -- checks for a valid condensed distance matrix - num_obs_dm -- # of observations in a distance matrix - num_obs_y -- # of observations in a condensed distance matrix - + is_valid_dm -- checks for a valid distance matrix + is_valid_y -- checks for a valid condensed distance matrix + num_obs_dm -- # of observations in a distance matrix + num_obs_y -- # of observations in a condensed distance matrix ``` Distance functions between two numeric vectors `u` and `v`. Computing distances over a large collection of vectors is inefficient for these functions. Use `pdist` for this purpose. ```html - braycurtis -- the Bray-Curtis distance. - canberra -- the Canberra distance. - chebyshev -- the Chebyshev distance. - cityblock -- the Manhattan distance. - correlation -- the Correlation distance. - cosine -- the Cosine distance. - euclidean -- the Euclidean distance. - mahalanobis -- the Mahalanobis distance. - minkowski -- the Minkowski distance. - seuclidean -- the normalized Euclidean distance. - sqeuclidean -- the squared Euclidean distance. - wminkowski -- (deprecated) alias of `minkowski`. - + braycurtis -- the Bray-Curtis distance. + canberra -- the Canberra distance. + chebyshev -- the Chebyshev distance. + cityblock -- the Manhattan distance. + correlation -- the Correlation distance. + cosine -- the Cosine distance. + euclidean -- the Euclidean distance. + mahalanobis -- the Mahalanobis distance. + minkowski -- the Minkowski distance. + seuclidean -- the normalized Euclidean distance. + sqeuclidean -- the squared Euclidean distance. + wminkowski -- (deprecated) alias of `minkowski`. ``` Distance functions between two boolean vectors (representing sets) `u` and `v`. As in the case of numerical vectors, `pdist` is more efficient for computing the distances between all pairs. ```html - dice -- the Dice dissimilarity. - hamming -- the Hamming distance. - jaccard -- the Jaccard distance. - kulsinski -- the Kulsinski distance. - rogerstanimoto -- the Rogers-Tanimoto dissimilarity. - russellrao -- the Russell-Rao dissimilarity. - sokalmichener -- the Sokal-Michener dissimilarity. - sokalsneath -- the Sokal-Sneath dissimilarity. - yule -- the Yule dissimilarity. - + dice -- the Dice dissimilarity. + hamming -- the Hamming distance. + jaccard -- the Jaccard distance. + kulsinski -- the Kulsinski distance. + rogerstanimoto -- the Rogers-Tanimoto dissimilarity. + russellrao -- the Russell-Rao dissimilarity. + sokalmichener -- the Sokal-Michener dissimilarity. + sokalsneath -- the Sokal-Sneath dissimilarity. + yule -- the Yule dissimilarity. ``` -**func**`hamming` also operates over discrete numerical vectors. \ No newline at end of file +:func:`hamming` also operates over discrete numerical vectors. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.md b/src/test/pythonFiles/markdown/scipy.spatial.md index 3584e78f1bbc..2d5e891db625 100644 --- a/src/test/pythonFiles/markdown/scipy.spatial.md +++ b/src/test/pythonFiles/markdown/scipy.spatial.md @@ -1,57 +1,51 @@ -### Spatial algorithms and data structures (**mod**`scipy.spatial`) +### Spatial algorithms and data structures (module:`scipy.spatial`) ### Nearest-neighbor Queries ```html - KDTree -- class for efficient nearest-neighbor queries - cKDTree -- class for efficient nearest-neighbor queries (faster impl.) - distance -- module containing many different distance measures - Rectangle - + KDTree -- class for efficient nearest-neighbor queries + cKDTree -- class for efficient nearest-neighbor queries (faster impl.) + distance -- module containing many different distance measures + Rectangle ``` ### Delaunay Triangulation, Convex Hulls and Voronoi Diagrams ```html - Delaunay -- compute Delaunay triangulation of input points - ConvexHull -- compute a convex hull for input points - Voronoi -- compute a Voronoi diagram hull from input points - SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere - HalfspaceIntersection -- compute the intersection points of input halfspaces - + Delaunay -- compute Delaunay triangulation of input points + ConvexHull -- compute a convex hull for input points + Voronoi -- compute a Voronoi diagram hull from input points + SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere + HalfspaceIntersection -- compute the intersection points of input halfspaces ``` ### Plotting Helpers ```html - delaunay_plot_2d -- plot 2-D triangulation - convex_hull_plot_2d -- plot 2-D convex hull - voronoi_plot_2d -- plot 2-D voronoi diagram - + delaunay_plot_2d -- plot 2-D triangulation + convex_hull_plot_2d -- plot 2-D convex hull + voronoi_plot_2d -- plot 2-D voronoi diagram ``` ### Simplex representation The simplices (triangles, tetrahedra, ...) appearing in the Delaunay tesselation (N-dim simplices), convex hull facets, and Voronoi ridges (N-1 dim simplices) are represented in the following scheme: ```html - tess = Delaunay(points) - hull = ConvexHull(points) - voro = Voronoi(points) - - # coordinates of the j-th vertex of the i-th simplex - tess.points[tess.simplices[i, j], :] # tesselation element - hull.points[hull.simplices[i, j], :] # convex hull facet - voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells - + tess = Delaunay(points) + hull = ConvexHull(points) + voro = Voronoi(points) + + # coordinates of the j-th vertex of the i-th simplex + tess.points[tess.simplices[i, j], :] # tesselation element + hull.points[hull.simplices[i, j], :] # convex hull facet + voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells ``` For Delaunay triangulations and convex hulls, the neighborhood structure of the simplices satisfies the condition: ```html - `tess.neighbors[i,j]` is the neighboring simplex of the i-th - simplex, opposite to the j-vertex. It is -1 in case of no - neighbor. - + `tess.neighbors[i,j]` is the neighboring simplex of the i-th + simplex, opposite to the j-vertex. It is -1 in case of no + neighbor. ``` Convex hull facets also define a hyperplane equation: ```html - (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 - + (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 ``` Similar hyperplane equations for the Delaunay triangulation correspond to the convex hull facets on the corresponding N+1 dimensional @@ -63,9 +57,9 @@ computations. #### Functions ```html - tsearch - distance_matrix - minkowski_distance - minkowski_distance_p - procrustes + tsearch + distance_matrix + minkowski_distance + minkowski_distance_p + procrustes ``` \ No newline at end of file diff --git a/src/test/refactor/extension.refactor.extract.var.test.ts b/src/test/refactor/extension.refactor.extract.var.test.ts index 5ce6cc3f743c..d12283a74198 100644 --- a/src/test/refactor/extension.refactor.extract.var.test.ts +++ b/src/test/refactor/extension.refactor.extract.var.test.ts @@ -101,13 +101,13 @@ suite('Variable Extraction', () => { test('Extract Variable', async () => { const startPos = new vscode.Position(234, 29); const endPos = new vscode.Position(234, 38); - testingVariableExtraction(false, startPos, endPos); + await testingVariableExtraction(false, startPos, endPos); }); test('Extract Variable fails if whole string not selected', async () => { const startPos = new vscode.Position(234, 20); const endPos = new vscode.Position(234, 38); - testingVariableExtraction(true, startPos, endPos); + await testingVariableExtraction(true, startPos, endPos); }); function testingVariableExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position) { diff --git a/src/test/terminals/codeExecution/djangoShellCodeExect.test.ts b/src/test/terminals/codeExecution/djangoShellCodeExect.test.ts index 4778ffa55512..7b1dc4a55742 100644 --- a/src/test/terminals/codeExecution/djangoShellCodeExect.test.ts +++ b/src/test/terminals/codeExecution/djangoShellCodeExect.test.ts @@ -150,7 +150,7 @@ suite('Terminal - Django Shell Code Execution', () => { const workspaceFolder: WorkspaceFolder = { index: 0, name: 'blah', uri: workspaceUri }; workspace.setup(w => w.getWorkspaceFolder(TypeMoq.It.isAny())).returns(() => undefined); workspace.setup(w => w.workspaceFolders).returns(() => [workspaceFolder]); - const expectedTerminalArgs = terminalArgs.concat(path.join(workspaceUri.fsPath, 'manage.py'), 'shell'); + const expectedTerminalArgs = terminalArgs.concat(path.join(workspaceUri.fsPath, 'manage.py').fileToCommandArgument(), 'shell'); testReplCommandArguments(true, pythonPath, pythonPath, terminalArgs, expectedTerminalArgs, Uri.file('x')); }); diff --git a/src/test/terminals/codeExecution/terminalCodeExec.test.ts b/src/test/terminals/codeExecution/terminalCodeExec.test.ts index 90c92560ee20..0a907c1ce548 100644 --- a/src/test/terminals/codeExecution/terminalCodeExec.test.ts +++ b/src/test/terminals/codeExecution/terminalCodeExec.test.ts @@ -154,7 +154,8 @@ suite('Terminal Code Execution', () => { terminalSettings.setup(t => t.launchArgs).returns(() => []); await executor.executeFile(file); - terminalService.verify(async t => await t.sendText(TypeMoq.It.isValue(`cd "${path.dirname(file.fsPath)}"`)), TypeMoq.Times.once()); + const dir = `"${path.dirname(file.fsPath)}"`.fileToCommandArgument(); + terminalService.verify(async t => await t.sendText(TypeMoq.It.isValue(`cd ${dir}`)), TypeMoq.Times.once()); } test('Ensure we set current directory (and quote it when containing spaces) before executing file (non windows)', async () => { @@ -213,7 +214,7 @@ suite('Terminal Code Execution', () => { await executor.executeFile(file); const expectedPythonPath = isWindows ? pythonPath.replace(/\\/g, '/') : pythonPath; - const expectedArgs = terminalArgs.concat(file.fsPath.indexOf(' ') > 0 ? `"${file.fsPath}"` : file.fsPath); + const expectedArgs = terminalArgs.concat(file.fsPath.fileToCommandArgument()); terminalService.verify(async t => await t.sendCommand(TypeMoq.It.isValue(expectedPythonPath), TypeMoq.It.isValue(expectedArgs)), TypeMoq.Times.once()); } diff --git a/src/test/unittests/debugger.test.ts b/src/test/unittests/debugger.test.ts index 2daba9a848e1..c526961527a9 100644 --- a/src/test/unittests/debugger.test.ts +++ b/src/test/unittests/debugger.test.ts @@ -71,15 +71,30 @@ suite('Unit Tests - debugging', () => { assert.equal(tests.testFunctions.length, 2, 'Incorrect number of test functions'); assert.equal(tests.testSuites.length, 2, 'Incorrect number of test suites'); + const deferred = createDeferred(); const testFunction = [tests.testFunctions[0].testFunction]; - testManager.runTest(CommandSource.commandPalette, { testFunction }, false, true); - const launched = await mockDebugLauncher.launched; - assert.isTrue(launched, 'Debugger not launched'); + const runningPromise = testManager.runTest(CommandSource.commandPalette, { testFunction }, false, true); + + // This promise should never resolve nor reject. + runningPromise + .then(() => deferred.reject('Debugger stopped when it shouldn\'t have')) + .catch(error => deferred.reject(error)); + + mockDebugLauncher.launched + .then((launched) => { + if (launched) { + deferred.resolve(''); + } else { + deferred.reject('Debugger not launched'); + } + }) .catch(error => deferred.reject(error)); + + await deferred.promise; } test('Debugger should start (unittest)', async () => { await updateSetting('unitTest.unittestArgs', ['-s=./tests', '-p=test_*.py'], rootWorkspaceUri, configTarget); - await testStartingDebugger('unittest'); + await testStartingDebugger('unittest'); }); test('Debugger should start (pytest)', async () => { @@ -105,9 +120,10 @@ suite('Unit Tests - debugging', () => { const launched = await mockDebugLauncher.launched; assert.isTrue(launched, 'Debugger not launched'); - testManager.discoverTests(CommandSource.commandPalette, true, true, true); - + const discoveryPromise = testManager.discoverTests(CommandSource.commandPalette, true, true, true); await expect(runningPromise).to.be.rejectedWith(CANCELLATION_REASON, 'Incorrect reason for ending the debugger'); + ioc.dispose(); // will cancel test discovery + await expect(discoveryPromise).to.be.rejectedWith(CANCELLATION_REASON, 'Incorrect reason for ending the debugger'); } test('Debugger should stop when user invokes a test discovery (unittest)', async () => { @@ -151,6 +167,7 @@ suite('Unit Tests - debugging', () => { runningPromise .then(() => 'Debugger stopped when it shouldn\'t have') .catch(() => 'Debugger crashed when it shouldn\'t have') + // tslint:disable-next-line: no-floating-promises .then(error => { deferred.reject(error); }); diff --git a/src/test/unittests/stoppingDiscoverAndTest.test.ts b/src/test/unittests/stoppingDiscoverAndTest.test.ts index 3386ee2b6955..3b3558f12bd2 100644 --- a/src/test/unittests/stoppingDiscoverAndTest.test.ts +++ b/src/test/unittests/stoppingDiscoverAndTest.test.ts @@ -5,6 +5,7 @@ import { expect, use } from 'chai'; import * as chaiAsPromised from 'chai-as-promised'; import * as path from 'path'; import { Uri } from 'vscode'; +import {createDeferred} from '../../client/common/helpers'; import { Product } from '../../client/common/types'; import { CANCELLATION_REASON, CommandSource, UNITTEST_PROVIDER } from '../../client/unittests/common/constants'; import { ITestDiscoveryService } from '../../client/unittests/common/types'; @@ -60,9 +61,23 @@ suite('Unit Tests Stopping Discovery and Runner', () => { const discoveryPromise = mockTestManager.discoverTests(CommandSource.auto); mockTestManager.discoveryDeferred.resolve(EmptyTests); - mockTestManager.runTest(CommandSource.ui); + const runningPromise = mockTestManager.runTest(CommandSource.ui); + const deferred = createDeferred(); - await expect(discoveryPromise).to.eventually.equal(EmptyTests); + // This promise should never resolve nor reject. + runningPromise + .then(() => Promise.reject('Debugger stopped when it shouldn\'t have')) + .catch(error => deferred.reject(error)); + + discoveryPromise.then(result => { + if (result === EmptyTests) { + deferred.resolve(''); + } else { + deferred.reject('tests not empty'); + } + }).catch(error => deferred.reject(error)); + + await deferred.promise; }); test('Discovering tests should stop running tests', async () => { @@ -75,7 +90,7 @@ suite('Unit Tests Stopping Discovery and Runner', () => { await new Promise(resolve => setTimeout(resolve, 1000)); // User manually discovering tests will kill the existing test runner. - mockTestManager.discoverTests(CommandSource.ui, true, false, true); + await mockTestManager.discoverTests(CommandSource.ui, true, false, true); await expect(runPromise).to.eventually.be.rejectedWith(CANCELLATION_REASON); }); }); From 3a0cfb1dca529aee3d3e3f474d00f10c74c5f592 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Feb 2018 13:51:15 -0800 Subject: [PATCH 048/103] no message --- CONTRIBUTING.md | 56 ++-- src/client/common/extensions.ts | 18 +- .../common/markdown/restTextConverter.ts | 230 ++++++++------ src/client/common/terminal/helper.ts | 2 +- src/client/providers/itemInfoSource.ts | 1 + .../codeExecution/codeExecutionManager.ts | 5 +- .../codeExecution/djangoShellCodeExecution.ts | 2 +- .../codeExecution/terminalCodeExecution.ts | 4 +- src/client/unittests/main.ts | 2 +- src/test/common/extensions.test.ts | 42 +++ .../common/terminals/activation.bash.test.ts | 6 +- .../activation.commandPrompt.test.ts | 288 +++++++++--------- src/test/common/terminals/helper.test.ts | 4 +- .../interpreters/condaEnvFileService.test.ts | 11 +- src/test/language/tokenizer.test.ts | 7 + src/test/markdown/restTextConverter.test.ts | 7 +- src/test/pythonFiles/markdown/aifc.md | 142 +++++++++ src/test/pythonFiles/markdown/aifc.pydoc | 134 ++++++++ src/test/pythonFiles/markdown/anydbm.md | 33 ++ src/test/pythonFiles/markdown/anydbm.pydoc | 33 ++ src/test/pythonFiles/markdown/astroid.md | 24 ++ src/test/pythonFiles/markdown/astroid.pydoc | 23 ++ src/test/pythonFiles/markdown/scipy.md | 47 +++ src/test/pythonFiles/markdown/scipy.pydoc | 53 ++++ .../markdown/scipy.spatial.distance.md | 66 ++-- .../pythonFiles/markdown/scipy.spatial.md | 65 ++++ .../pythonFiles/markdown/scipy.spatial.pydoc | 86 ++++++ .../extension.refactor.extract.var.test.ts | 4 +- .../djangoShellCodeExect.test.ts | 2 +- .../codeExecution/terminalCodeExec.test.ts | 5 +- src/test/unittests/debugger.test.ts | 29 +- .../unittests/stoppingDiscoverAndTest.test.ts | 21 +- 32 files changed, 1114 insertions(+), 338 deletions(-) create mode 100644 src/test/common/extensions.test.ts create mode 100644 src/test/pythonFiles/markdown/aifc.md create mode 100644 src/test/pythonFiles/markdown/aifc.pydoc create mode 100644 src/test/pythonFiles/markdown/anydbm.md create mode 100644 src/test/pythonFiles/markdown/anydbm.pydoc create mode 100644 src/test/pythonFiles/markdown/astroid.md create mode 100644 src/test/pythonFiles/markdown/astroid.pydoc create mode 100644 src/test/pythonFiles/markdown/scipy.md create mode 100644 src/test/pythonFiles/markdown/scipy.pydoc create mode 100644 src/test/pythonFiles/markdown/scipy.spatial.md create mode 100644 src/test/pythonFiles/markdown/scipy.spatial.pydoc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 04abf6a0df58..2d5b4456385a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -93,7 +93,7 @@ for a release are tracked in a [milestone](https://github.com/Microsoft/vscode-python/milestones) (which is actively updated as plans change). -The overall schedule for a release is to code freeze for on the last +The overall schedule for a release is to feature freeze for on the last Monday of the month to coincide with Visual Studio Code's code freeze. We then aim to release later that week so the latest version of the extension is already live by the time Visual Studio Code launches @@ -104,16 +104,10 @@ between scheduled releases as necessary. All development is actively done in the `master` branch of the repository. It is what allows us to have an [insiders build](#insiders-build) which is expected to be stable at -all times. We do keep the previous release as a branch in case the +all times. We do keep the most recent release as a branch in case the need for a bugfix release arises. But once a new release is made we -convert the older branch into a tag and delete the branch as -Visual Studio Code's automatic updating makes keeping old versions -around unnecessary. - -Since we try to spend about 25% of our development time fixing bugs -and removing technical debt, the week of a release is mostly spent -focusing on that topic. That way we don't ignore the health of the -code base by accidentally focusing on new features exclusively. +delete the older release branch (all releases are appropriately +tagged, so history is lost). ### Issue triaging @@ -142,21 +136,17 @@ lexicographically sort from earliest stage to latest stage). The suffix term for each label then specifies what is currently blocking the issue from being closed. -* `1-` - * [`decision`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-decision): - The issue is a feature enhancement request and a decision has not - been made as to whether we would accept a pull request - implementing the enhancement - * [`more info`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-more%20info): - We need more information from the OP (original poster) - * [`verification`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-verification): - We need to verify that the issue can be replicated +* [`1-decision`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-decision): +The issue is a feature enhancement request and a decision has not +been made as to whether we would accept a pull request +implementing the enhancement +* [`1-more info`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-more%20info): +We need more information from the OP (original poster) +* [`1-verification`](https://github.com/Microsoft/vscode-python/labels/awaiting%201-verification): + We need to verify that the issue is reproducible * [`2-PR`](https://github.com/Microsoft/vscode-python/labels/awaiting%202-PR): The issue is valid and is now awaiting a fix to be created and merged into the `master` branch -* [`4-validation`](https://github.com/Microsoft/vscode-python/labels/awaiting%204-validation): - A pull request has been merged and resolution of the issue should be - independently validated #### Closed issues @@ -168,22 +158,21 @@ it should have an appropriate `closed-` label. 1. Check that there is an issue corresponding to what the pull request is attempting to address - * If an issue exists, make sure it has reached the stage of being - labeled `awaiting 2-PR` + * If an issue exists, make sure it has reached the stage of + `awaiting 2-PR` * If no issue exists, open one and wait for it to reach the `awaiting 2-PR` stage before submitting the pull request -1. Open the pull request, mentioning the appropriate issue(s) in the +1. Create the pull request, mentioning the appropriate issue(s) in the pull request message body * The pull request is expected to have appropriate unit tests * The pull request must pass its CI run before merging will be considered - * Code coverage is expected to not worsen + * Code coverage is expected to (at minimum) not worsen 1. Make sure all status checks are green (e.g. CLA check, CI, etc.) 1. Address any review comments 1. [Maintainers only] Merge the pull request 1. [Maintainers only] Update affected issues to be: 1. Closed (with an appropriate `closed-` label) - 1. The stage is set to `awaiting 4-validation` 1. The issue(s) are attached to the current milestone 1. Register OSS usage 1. Email CELA about any 3rd-party usage changes @@ -194,11 +183,12 @@ Starting in 2018, the extension switched to [calendar versioning](http://calver.org/) since the extension auto-updates and thus there is no need to track its version number for backwards-compatibility. As such, the major version -is the current year, the minor version is the current month, and -the micro version is how many releases there have been that month in -the year (starting at 0). For example, the first release in July 2018 -would be `2018.7.0`, the second release that month would be -`2018.7.1`, etc. +is the current year, the minor version is the month when feature +freeze was reached, and the micro version is how many releases there +have been since that feature freeze (starting at 0). For example +the release made when we reach feature freeze in July 2018 +would be `2018.7.0`, and if a second release was necessary to fix a +critical bug it would be `2018.7.1`. ## Insiders Build @@ -214,7 +204,7 @@ file, please follow the instructions on [this page](https://code.visualstudio.com/docs/editor/extension-gallery#_install-from-a-vsix) to install the extension. -The insiders build of the extension ...: +The insiders build of the extension: * Will be replaced with new releases published onto the [VS Code Marketplace](https://marketplace.visualstudio.com/VSCode). diff --git a/src/client/common/extensions.ts b/src/client/common/extensions.ts index 629b809e8d06..71858e6e01da 100644 --- a/src/client/common/extensions.ts +++ b/src/client/common/extensions.ts @@ -20,6 +20,11 @@ interface String { * E.g. if an argument contains a space, then it will be enclosed within double quotes. */ toCommandArgument(): string; + /** + * Appropriately formats a a file path so it can be used as an argument for a command in a shell. + * E.g. if an argument contains a space, then it will be enclosed within double quotes. + */ + fileToCommandArgument(): string; } /** @@ -47,5 +52,16 @@ String.prototype.toCommandArgument = function (this: string): string { if (!this) { return this; } - return (this.indexOf(' ') > 0 && !this.startsWith('"') && !this.endsWith('"')) ? `"${this}"` : this.toString(); + return (this.indexOf(' ') >= 0 && !this.startsWith('"') && !this.endsWith('"')) ? `"${this}"` : this.toString(); +}; + +/** + * Appropriately formats a a file path so it can be used as an argument for a command in a shell. + * E.g. if an argument contains a space, then it will be enclosed within double quotes. + */ +String.prototype.fileToCommandArgument = function (this: string): string { + if (!this) { + return this; + } + return this.toCommandArgument().replace(/\\/g, '/'); }; diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index 485801a35f49..e606fd46bfbc 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -2,11 +2,18 @@ // Licensed under the MIT License. import { EOL } from 'os'; -import { isWhiteSpace } from '../../language/characters'; +// tslint:disable-next-line:import-name +import Char from 'typescript-char'; +import { isDecimal, isWhiteSpace } from '../../language/characters'; + +enum State { + Default, + Preformatted, + Code +} export class RestTextConverter { - private inPreBlock = false; - private inCodeBlock = false; + private state: State = State.Default; private md: string[] = []; // tslint:disable-next-line:cyclomatic-complexity @@ -16,14 +23,8 @@ export class RestTextConverter { // and documentation in the completion list. // See https://en.wikipedia.org/wiki/ReStructuredText - // Determine if this is actually a reStructruredText. - if (docstring.indexOf('::') < 0 && docstring.indexOf('..')) { - // If documentation contains markdown symbols such as ** (power of) in code, escape them. - return this.escapeMarkdown(docstring); - } const result = this.transformLines(docstring); - - this.inPreBlock = this.inPreBlock = false; + this.state = State.Default; this.md = []; return result; @@ -33,106 +34,150 @@ export class RestTextConverter { // Not complete escape list so it does not interfere // with subsequent code highlighting (see above). return text - .replace(/\\/g, '\\\\') - .replace(/\*/g, '\\*') - .replace(/\_/g, '\\_') - .replace(/\{/g, '\\{') - .replace(/\}/g, '\\}') - .replace(/\[/g, '\\[') - .replace(/\]/g, '\\]') - .replace(/\(/g, '\\(') - .replace(/\)/g, '\\)') .replace(/\#/g, '\\#') - .replace(/\+/g, '\\+') - .replace(/\-/g, '\\-') - .replace(/\!/g, '\\!'); + .replace(/\*/g, '\\*') + .replace(/\_/g, '\\_'); } - // tslint:disable-next-line:cyclomatic-complexity private transformLines(docstring: string): string { const lines = docstring.split(/\r?\n/); for (let i = 0; i < lines.length; i += 1) { - let line = lines[i]; - + const line = lines[i]; // Avoid leading empty lines if (this.md.length === 0 && line.length === 0) { continue; } - if (this.handleCodeBlock(line)) { - continue; + switch (this.state) { + case State.Default: + i += this.inDefaultState(lines, i); + break; + case State.Preformatted: + i += this.inPreformattedState(lines, i); + break; + case State.Code: + this.inCodeState(line); + break; + default: + break; } + } - if (this.inPreBlock) { - // Preformatted block terminates by a line without leading - // whitespace or any special line like ..ABC::. - if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0))) { - this.endPreformattedBlock(); - } - } + this.endCodeBlock(); + this.endPreformattedBlock(); - if (this.handleSectionHeader(lines, i)) { - i += 1; // Eat line with === or --- - continue; - } + return this.md.join(EOL).trim(); + } - if (line.indexOf('generated/') >= 0) { - continue; // ignore generated content. - } - if (line.startsWith('===') || line.startsWith('---')) { - continue; // Eat standalone === or --- lines. - } + private inDefaultState(lines: string[], i: number): number { + let line = lines[i]; + if (line.startsWith('```')) { + this.startCodeBlock(); + return 0; + } - if (this.handleDoubleColon(line)) { - continue; - } - if (line.startsWith('..') && line.indexOf('::') > 0) { - // Ignore lines likes .. sectionauthor:: John Doe. - continue; - } + if (line.startsWith('===') || line.startsWith('---')) { + return 0; // Eat standalone === or --- lines. + } + if (this.handleDoubleColon(line)) { + return 0; + } + if (this.isIgnorable(line)) { + return 0; + } - line = this.convertEmphasis(line); - line = line.replace(/``/g, '`'); // Convert double backticks to single. + if (this.handleSectionHeader(lines, i)) { + return 1; // Eat line with === or --- + } - if (line.length > 0 && isWhiteSpace(line.charCodeAt(0))) { - // Keep hard line breaks for the pre-indented content. - line = ` ${line} `; - } + const result = this.checkPreContent(lines, i); + if (this.state !== State.Default) { + return result; // Handle line in the new state + } - const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; - if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { - continue; // Avoid more than one empty line in a row. - } + line = this.cleanup(line); + line = line.replace(/``/g, '`'); // Convert double backticks to single. + line = this.escapeMarkdown(line); + this.md.push(line); - this.md.push(line); + return 0; + } + + private inPreformattedState(lines: string[], i: number): number { + let line = lines[i]; + if (this.isIgnorable(line)) { + return 0; + } + // Preformatted block terminates by a line without leading whitespace. + if (line.length > 0 && !isWhiteSpace(line.charCodeAt(0)) && !this.isListItem(line)) { + this.endPreformattedBlock(); + return -1; } - this.tryEndCodePreBlocks(); - return this.md.join(EOL).trim(); + const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; + if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { + return 0; // Avoid more than one empty line in a row. + } + + // Since we use HTML blocks as preformatted text + // make sure we drop angle brackets since otherwise + // they will render as tags and attributes + line = line.replace(//g, ' '); + line = line.replace(/``/g, '`'); // Convert double backticks to single. + // Keep hard line breaks for the preformatted content + this.md.push(`${line} `); + return 0; } - private handleCodeBlock(line: string): boolean { - if (!line.startsWith('```')) { - return false; + private inCodeState(line: string): void { + const prevLine = this.md.length > 0 ? this.md[this.md.length - 1] : undefined; + if (line.length === 0 && prevLine && (prevLine.length === 0 || prevLine.startsWith('```'))) { + return; // Avoid more than one empty line in a row. } - if (this.inCodeBlock) { + + if (line.startsWith('```')) { this.endCodeBlock(); } else { - this.startCodeBlock(); + this.md.push(line); } - return true; + } + + private isIgnorable(line: string): boolean { + if (line.indexOf('generated/') >= 0) { + return true; // Drop generated content. + } + const trimmed = line.trim(); + if (trimmed.startsWith('..') && trimmed.indexOf('::') > 0) { + // Ignore lines likes .. sectionauthor:: John Doe. + return true; + } + return false; + } + + private checkPreContent(lines: string[], i: number): number { + const line = lines[i]; + if (i === 0 || line.trim().length === 0) { + return 0; + } + + if (!isWhiteSpace(line.charCodeAt(0)) && !this.isListItem(line)) { + return 0; // regular line, nothing to do here. + } + // Indented content is considered to be preformatted. + this.startPreformattedBlock(); + return -1; } private handleSectionHeader(lines: string[], i: number): boolean { const line = lines[i]; if (i < lines.length - 1 && (lines[i + 1].startsWith('==='))) { // Section title -> heading level 3. - this.md.push(`### ${this.convertEmphasis(line)}`); + this.md.push(`### ${this.cleanup(line)}`); return true; } if (i < lines.length - 1 && (lines[i + 1].startsWith('---'))) { // Subsection title -> heading level 4. - this.md.push(`#### ${this.convertEmphasis(line)}`); + this.md.push(`#### ${this.cleanup(line)}`); return true; } return false; @@ -154,53 +199,52 @@ export class RestTextConverter { return true; } - private tryEndCodePreBlocks(): void { - if (this.inCodeBlock) { - this.endCodeBlock(); - } - if (this.inPreBlock) { - this.endPreformattedBlock(); - } - } - private startPreformattedBlock(): void { // Remove previous empty line so we avoid double empties. - this.tryRemovePrecedingEmptyLine(); + this.tryRemovePrecedingEmptyLines(); // Lie about the language since we don't want preformatted text // to be colorized as Python. HTML is more 'appropriate' as it does // not colorize -- or + or keywords like 'from'. this.md.push('```html'); - this.inPreBlock = true; + this.state = State.Preformatted; } private endPreformattedBlock(): void { - if (this.inPreBlock) { + if (this.state === State.Preformatted) { + this.tryRemovePrecedingEmptyLines(); this.md.push('```'); - this.inPreBlock = false; + this.state = State.Default; } } private startCodeBlock(): void { // Remove previous empty line so we avoid double empties. - this.tryRemovePrecedingEmptyLine(); + this.tryRemovePrecedingEmptyLines(); this.md.push('```python'); - this.inCodeBlock = true; + this.state = State.Code; } private endCodeBlock(): void { - if (this.inCodeBlock) { + if (this.state === State.Code) { + this.tryRemovePrecedingEmptyLines(); this.md.push('```'); - this.inCodeBlock = false; + this.state = State.Default; } } - private tryRemovePrecedingEmptyLine(): void { - if (this.md.length > 0 && this.md[this.md.length - 1].length === 0) { + private tryRemovePrecedingEmptyLines(): void { + while (this.md.length > 0 && this.md[this.md.length - 1].trim().length === 0) { this.md.pop(); } } - private convertEmphasis(line: string): string { - return line.replace(/\:([\w\W]+)\:/g, '**$1**'); // Convert :word: to **word**. + private isListItem(line: string): boolean { + const trimmed = line.trim(); + const ch = trimmed.length > 0 ? trimmed.charCodeAt(0) : 0; + return ch === Char.Asterisk || ch === Char.Hyphen || isDecimal(ch); + } + + private cleanup(line: string): string { + return line.replace(/:mod:/g, 'module:'); } } diff --git a/src/client/common/terminal/helper.ts b/src/client/common/terminal/helper.ts index 07c679b86f4f..bc2f9c78a237 100644 --- a/src/client/common/terminal/helper.ts +++ b/src/client/common/terminal/helper.ts @@ -68,7 +68,7 @@ export class TerminalHelper implements ITerminalHelper { public buildCommandForTerminal(terminalShellType: TerminalShellType, command: string, args: string[]) { const isPowershell = terminalShellType === TerminalShellType.powershell || terminalShellType === TerminalShellType.powershellCore; const commandPrefix = isPowershell ? '& ' : ''; - return `${commandPrefix}${command.toCommandArgument()} ${args.join(' ')}`.trim(); + return `${commandPrefix}${command.fileToCommandArgument()} ${args.join(' ')}`.trim(); } public async getEnvironmentActivationCommands(terminalShellType: TerminalShellType, resource?: Uri): Promise { const settings = this.serviceContainer.get(IConfigurationService).getSettings(resource); diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index db147dbfd431..4159e9396f82 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -108,6 +108,7 @@ export class ItemInfoSource { if (signature.length > 0) { tooltip = tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); } + const description = this.textConverter.toMarkdown(lines.join(EOL)); tooltip = tooltip.appendMarkdown(description); diff --git a/src/client/terminals/codeExecution/codeExecutionManager.ts b/src/client/terminals/codeExecution/codeExecutionManager.ts index fd967c56318f..5a6159a50aae 100644 --- a/src/client/terminals/codeExecution/codeExecutionManager.ts +++ b/src/client/terminals/codeExecution/codeExecutionManager.ts @@ -15,7 +15,7 @@ import { ICodeExecutionHelper, ICodeExecutionManager, ICodeExecutionService } fr @injectable() export class CodeExecutionManager implements ICodeExecutionManager { - constructor( @inject(ICommandManager) private commandManager: ICommandManager, + constructor(@inject(ICommandManager) private commandManager: ICommandManager, @inject(IDocumentManager) private documentManager: IDocumentManager, @inject(IDisposableRegistry) private disposableRegistry: Disposable[], @inject(IServiceContainer) private serviceContainer: IServiceContainer) { @@ -28,8 +28,9 @@ export class CodeExecutionManager implements ICodeExecutionManager { this.disposableRegistry.push(this.commandManager.registerCommand(Commands.Exec_Selection_In_Django_Shell, this.executeSelectionInDjangoShell.bind(this))); } @captureTelemetry(EXECUTION_CODE, { scope: 'file' }, false) - private async executeFileInterTerminal(file: Uri) { + private async executeFileInterTerminal(file?: Uri) { const codeExecutionHelper = this.serviceContainer.get(ICodeExecutionHelper); + file = file instanceof Uri ? file : undefined; const fileToExecute = file ? file : await codeExecutionHelper.getFileToExecute(); if (!fileToExecute) { return; diff --git a/src/client/terminals/codeExecution/djangoShellCodeExecution.ts b/src/client/terminals/codeExecution/djangoShellCodeExecution.ts index d188a2091547..5fbe2ef2d19f 100644 --- a/src/client/terminals/codeExecution/djangoShellCodeExecution.ts +++ b/src/client/terminals/codeExecution/djangoShellCodeExecution.ts @@ -40,7 +40,7 @@ export class DjangoShellCodeExecutionProvider extends TerminalCodeExecutionProvi const workspaceRoot = workspaceUri ? workspaceUri.uri.fsPath : defaultWorkspace; const managePyPath = workspaceRoot.length === 0 ? 'manage.py' : path.join(workspaceRoot, 'manage.py'); - args.push(managePyPath.toCommandArgument()); + args.push(managePyPath.fileToCommandArgument()); args.push('shell'); return { command, args }; } diff --git a/src/client/terminals/codeExecution/terminalCodeExecution.ts b/src/client/terminals/codeExecution/terminalCodeExecution.ts index a3aaedcc2584..4ed1d7da479c 100644 --- a/src/client/terminals/codeExecution/terminalCodeExecution.ts +++ b/src/client/terminals/codeExecution/terminalCodeExecution.ts @@ -34,7 +34,7 @@ export class TerminalCodeExecutionProvider implements ICodeExecutionService { const command = this.platformService.isWindows ? pythonSettings.pythonPath.replace(/\\/g, '/') : pythonSettings.pythonPath; const launchArgs = pythonSettings.terminal.launchArgs; - await this.getTerminalService(file).sendCommand(command, launchArgs.concat(file.fsPath.toCommandArgument())); + await this.getTerminalService(file).sendCommand(command, launchArgs.concat(file.fsPath.fileToCommandArgument())); } public async execute(code: string, resource?: Uri): Promise { @@ -47,7 +47,7 @@ export class TerminalCodeExecutionProvider implements ICodeExecutionService { } public async initializeRepl(resource?: Uri) { if (this.replActive && await this.replActive!) { - this._terminalService!.show(); + await this._terminalService!.show(); return; } this.replActive = new Promise(async resolve => { diff --git a/src/client/unittests/main.ts b/src/client/unittests/main.ts index c95777f78bcc..2e386edea0b0 100644 --- a/src/client/unittests/main.ts +++ b/src/client/unittests/main.ts @@ -158,7 +158,7 @@ async function selectAndRunTestMethod(cmdSource: CommandSource, resource: Uri, d return; } // tslint:disable-next-line:prefer-type-cast - await runTestsImpl(cmdSource, testManager.workspaceFolder, { testFunction: [selectedTestFn.testFunction] } as TestsToRun, debug); + await runTestsImpl(cmdSource, testManager.workspaceFolder, { testFunction: [selectedTestFn.testFunction] } as TestsToRun, false, debug); } async function selectAndRunTestFile(cmdSource: CommandSource) { const testManager = await getTestManager(true); diff --git a/src/test/common/extensions.test.ts b/src/test/common/extensions.test.ts new file mode 100644 index 000000000000..5724f3291274 --- /dev/null +++ b/src/test/common/extensions.test.ts @@ -0,0 +1,42 @@ +import { expect } from 'chai'; +import '../../client/common/extensions'; + +// Defines a Mocha test suite to group tests of similar kind together +suite('String Extensions', () => { + test('Should return empty string for empty arg', () => { + const argTotest = ''; + expect(argTotest.toCommandArgument()).to.be.equal(''); + }); + test('Should quote an empty space', () => { + const argTotest = ' '; + expect(argTotest.toCommandArgument()).to.be.equal('" "'); + }); + test('Should not quote command arguments without spaces', () => { + const argTotest = 'one.two.three'; + expect(argTotest.toCommandArgument()).to.be.equal(argTotest); + }); + test('Should quote command arguments with spaces', () => { + const argTotest = 'one two three'; + expect(argTotest.toCommandArgument()).to.be.equal(`"${argTotest}"`); + }); + test('Should return empty string for empty path', () => { + const fileToTest = ''; + expect(fileToTest.fileToCommandArgument()).to.be.equal(''); + }); + test('Should not quote file argument without spaces', () => { + const fileToTest = 'users/test/one'; + expect(fileToTest.fileToCommandArgument()).to.be.equal(fileToTest); + }); + test('Should quote file argument with spaces', () => { + const fileToTest = 'one two three'; + expect(fileToTest.fileToCommandArgument()).to.be.equal(`"${fileToTest}"`); + }); + test('Should replace all back slashes with forward slashes (irrespective of OS)', () => { + const fileToTest = 'c:\\users\\user\\conda\\scripts\\python.exe'; + expect(fileToTest.fileToCommandArgument()).to.be.equal(fileToTest.replace(/\\/g, '/')); + }); + test('Should replace all back slashes with forward slashes (irrespective of OS) and quoted when file has spaces', () => { + const fileToTest = 'c:\\users\\user namne\\conda path\\scripts\\python.exe'; + expect(fileToTest.fileToCommandArgument()).to.be.equal(`"${fileToTest.replace(/\\/g, '/')}"`); + }); +}); diff --git a/src/test/common/terminals/activation.bash.test.ts b/src/test/common/terminals/activation.bash.test.ts index ee7d2829ea46..c321528140ea 100644 --- a/src/test/common/terminals/activation.bash.test.ts +++ b/src/test/common/terminals/activation.bash.test.ts @@ -5,6 +5,7 @@ import { expect } from 'chai'; import * as path from 'path'; import * as TypeMoq from 'typemoq'; import { EnumEx } from '../../../client/common/enumUtils'; +import '../../../client/common/extensions'; import { IFileSystem } from '../../../client/common/platform/types'; import { Bash } from '../../../client/common/terminal/environmentActivationProviders/bash'; import { TerminalShellType } from '../../../client/common/terminal/types'; @@ -13,7 +14,7 @@ import { IServiceContainer } from '../../../client/ioc/types'; // tslint:disable-next-line:max-func-body-length suite('Terminal Environment Activation (bash)', () => { - ['usr/bin/python', 'usr/bin/env with spaces/env more/python'].forEach(pythonPath => { + ['usr/bin/python', 'usr/bin/env with spaces/env more/python', 'c:\\users\\windows paths\\conda\\python.exe'].forEach(pythonPath => { const hasSpaces = pythonPath.indexOf(' ') > 0; const suiteTitle = hasSpaces ? 'and there are spaces in the script file (pythonpath),' : 'and there are no spaces in the script file (pythonpath),'; suite(suiteTitle, () => { @@ -83,8 +84,7 @@ suite('Terminal Environment Activation (bash)', () => { // Ensure the path is quoted if it contains any spaces. // Ensure it contains the name of the environment as an argument to the script file. - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - expect(command).to.be.deep.equal([`source ${quotedScriptFile}`.trim()], 'Invalid command'); + expect(command).to.be.deep.equal([`source ${pathToScriptFile.fileToCommandArgument()}`.trim()], 'Invalid command'); } else { expect(command).to.be.equal(undefined, 'Command should be undefined'); } diff --git a/src/test/common/terminals/activation.commandPrompt.test.ts b/src/test/common/terminals/activation.commandPrompt.test.ts index 47b610ea0997..f6c7509ce00d 100644 --- a/src/test/common/terminals/activation.commandPrompt.test.ts +++ b/src/test/common/terminals/activation.commandPrompt.test.ts @@ -15,20 +15,64 @@ import { IConfigurationService, IPythonSettings } from '../../../client/common/t import { IServiceContainer } from '../../../client/ioc/types'; suite('Terminal Environment Activation (cmd/powershell)', () => { - ['c:/programfiles/python/python', 'c:/program files/python/python'].forEach(pythonPath => { - const hasSpaces = pythonPath.indexOf(' ') > 0; - const resource = Uri.file('a'); - - const suiteTitle = hasSpaces ? 'and there are spaces in the script file (pythonpath),' : 'and there are no spaces in the script file (pythonpath),'; - suite(suiteTitle, () => { - ['activate', 'activate.sh', 'activate.csh', 'activate.fish', 'activate.bat', 'activate.ps1'].forEach(scriptFileName => { - suite(`and script file is ${scriptFileName}`, () => { + ['c:/programfiles/python/python', 'c:/program files/python/python', + 'c:\\users\\windows paths\\conda\\python.exe'].forEach(pythonPath => { + const hasSpaces = pythonPath.indexOf(' ') > 0; + const resource = Uri.file('a'); + + const suiteTitle = hasSpaces ? 'and there are spaces in the script file (pythonpath),' : 'and there are no spaces in the script file (pythonpath),'; + suite(suiteTitle, () => { + ['activate', 'activate.sh', 'activate.csh', 'activate.fish', 'activate.bat', 'activate.ps1'].forEach(scriptFileName => { + suite(`and script file is ${scriptFileName}`, () => { + let serviceContainer: TypeMoq.IMock; + let fileSystem: TypeMoq.IMock; + setup(() => { + serviceContainer = TypeMoq.Mock.ofType(); + fileSystem = TypeMoq.Mock.ofType(); + serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); + + const configService = TypeMoq.Mock.ofType(); + serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); + const settings = TypeMoq.Mock.ofType(); + settings.setup(s => s.pythonPath).returns(() => pythonPath); + configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); + }); + + EnumEx.getNamesAndValues(TerminalShellType).forEach(shellType => { + const isScriptFileSupported = ['activate.bat', 'activate.ps1'].indexOf(scriptFileName) >= 0; + const titleTitle = isScriptFileSupported ? `Ensure terminal type is supported (Shell: ${shellType.name})` : + `Ensure terminal type is not supported (Shell: ${shellType.name})`; + + test(titleTitle, async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); + + const supported = bash.isShellSupported(shellType.value); + switch (shellType.value) { + case TerminalShellType.commandPrompt: + case TerminalShellType.powershellCore: + case TerminalShellType.powershell: { + expect(supported).to.be.equal(true, `${shellType.name} shell not supported (it should be)`); + break; + } + default: { + expect(supported).to.be.equal(false, `${shellType.name} incorrectly supported (should not be)`); + } + } + }); + }); + }); + }); + + suite('and script file is activate.bat', () => { let serviceContainer: TypeMoq.IMock; let fileSystem: TypeMoq.IMock; + let platform: TypeMoq.IMock; setup(() => { serviceContainer = TypeMoq.Mock.ofType(); fileSystem = TypeMoq.Mock.ofType(); + platform = TypeMoq.Mock.ofType(); serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); + serviceContainer.setup(c => c.get(IPlatformService)).returns(() => platform.object); const configService = TypeMoq.Mock.ofType(); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); @@ -37,173 +81,125 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); }); - EnumEx.getNamesAndValues(TerminalShellType).forEach(shellType => { - const isScriptFileSupported = ['activate.bat', 'activate.ps1'].indexOf(scriptFileName) >= 0; - const titleTitle = isScriptFileSupported ? `Ensure terminal type is supported (Shell: ${shellType.name})` : - `Ensure terminal type is not supported (Shell: ${shellType.name})`; - - test(titleTitle, async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); - - const supported = bash.isShellSupported(shellType.value); - switch (shellType.value) { - case TerminalShellType.commandPrompt: - case TerminalShellType.powershellCore: - case TerminalShellType.powershell: { - expect(supported).to.be.equal(true, `${shellType.name} shell not supported (it should be)`); - break; - } - default: { - expect(supported).to.be.equal(false, `${shellType.name} incorrectly supported (should not be)`); - } - } - }); - }); - }); - }); - - suite('and script file is activate.bat', () => { - let serviceContainer: TypeMoq.IMock; - let fileSystem: TypeMoq.IMock; - let platform: TypeMoq.IMock; - setup(() => { - serviceContainer = TypeMoq.Mock.ofType(); - fileSystem = TypeMoq.Mock.ofType(); - platform = TypeMoq.Mock.ofType(); - serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); - serviceContainer.setup(c => c.get(IPlatformService)).returns(() => platform.object); - - const configService = TypeMoq.Mock.ofType(); - serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); - const settings = TypeMoq.Mock.ofType(); - settings.setup(s => s.pythonPath).returns(() => pythonPath); - configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); - }); + test('Ensure batch files are supported by command prompt', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - test('Ensure batch files are supported by command prompt', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const commands = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const commands = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); + // Ensure the script file is of the following form: + // source "" + // Ensure the path is quoted if it contains any spaces. + // Ensure it contains the name of the environment as an argument to the script file. - // Ensure the script file is of the following form: - // source "" - // Ensure the path is quoted if it contains any spaces. - // Ensure it contains the name of the environment as an argument to the script file. - - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - expect(commands).to.be.deep.equal([`${quotedScriptFile}`.trim()], 'Invalid command'); - }); + expect(commands).to.be.deep.equal([pathToScriptFile.fileToCommandArgument()], 'Invalid command'); + }); - test('Ensure batch files are supported by powershell (on windows)', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure batch files are supported by powershell (on windows)', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); - // Executing batch files from powershell requires going back to cmd, then into powershell + // Executing batch files from powershell requires going back to cmd, then into powershell - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - const activationCommand = `${quotedScriptFile}`.trim(); - const commands = [`& cmd /k "${activationCommand} & powershell"`]; - expect(command).to.be.deep.equal(commands, 'Invalid command'); - }); + const activationCommand = pathToScriptFile.fileToCommandArgument(); + const commands = [`& cmd /k "${activationCommand} & powershell"`]; + expect(command).to.be.deep.equal(commands, 'Invalid command'); + }); - test('Ensure batch files are supported by powershell core (on windows)', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure batch files are supported by powershell core (on windows)', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); - // Executing batch files from powershell requires going back to cmd, then into powershell + // Executing batch files from powershell requires going back to cmd, then into powershell - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - const activationCommand = `${quotedScriptFile}`.trim(); - const commands = [`& cmd /k "${activationCommand} & pwsh"`]; - expect(command).to.be.deep.equal(commands, 'Invalid command'); - }); + const activationCommand = pathToScriptFile.fileToCommandArgument(); + const commands = [`& cmd /k "${activationCommand} & pwsh"`]; + expect(command).to.be.deep.equal(commands, 'Invalid command'); + }); - test('Ensure batch files are not supported by powershell (on non-windows)', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure batch files are not supported by powershell (on non-windows)', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => false); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); + platform.setup(p => p.isWindows).returns(() => false); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); - expect(command).to.be.equal(undefined, 'Invalid command'); - }); + expect(command).to.be.equal(undefined, 'Invalid command'); + }); - test('Ensure batch files are not supported by powershell core (on non-windows)', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure batch files are not supported by powershell core (on non-windows)', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => false); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); + platform.setup(p => p.isWindows).returns(() => false); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); - expect(command).to.be.equal(undefined, 'Invalid command'); + expect(command).to.be.equal(undefined, 'Invalid command'); + }); }); - }); - suite('and script file is activate.ps1', () => { - let serviceContainer: TypeMoq.IMock; - let fileSystem: TypeMoq.IMock; - let platform: TypeMoq.IMock; - setup(() => { - serviceContainer = TypeMoq.Mock.ofType(); - fileSystem = TypeMoq.Mock.ofType(); - platform = TypeMoq.Mock.ofType(); - serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); - serviceContainer.setup(c => c.get(IPlatformService)).returns(() => platform.object); - - const configService = TypeMoq.Mock.ofType(); - serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); - const settings = TypeMoq.Mock.ofType(); - settings.setup(s => s.pythonPath).returns(() => pythonPath); - configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); - }); + suite('and script file is activate.ps1', () => { + let serviceContainer: TypeMoq.IMock; + let fileSystem: TypeMoq.IMock; + let platform: TypeMoq.IMock; + setup(() => { + serviceContainer = TypeMoq.Mock.ofType(); + fileSystem = TypeMoq.Mock.ofType(); + platform = TypeMoq.Mock.ofType(); + serviceContainer.setup(c => c.get(IFileSystem)).returns(() => fileSystem.object); + serviceContainer.setup(c => c.get(IPlatformService)).returns(() => platform.object); + + const configService = TypeMoq.Mock.ofType(); + serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService))).returns(() => configService.object); + const settings = TypeMoq.Mock.ofType(); + settings.setup(s => s.pythonPath).returns(() => pythonPath); + configService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); + }); - test('Ensure powershell files are not supported by command prompt', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure powershell files are not supported by command prompt', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); - expect(command).to.be.deep.equal([], 'Invalid command (running powershell files are not supported on command prompt)'); - }); + expect(command).to.be.deep.equal([], 'Invalid command (running powershell files are not supported on command prompt)'); + }); - test('Ensure powershell files are supported by powershell', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure powershell files are supported by powershell', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - expect(command).to.be.deep.equal([`& ${quotedScriptFile}`.trim()], 'Invalid command'); - }); + expect(command).to.be.deep.equal([`& ${pathToScriptFile.fileToCommandArgument()}`.trim()], 'Invalid command'); + }); - test('Ensure powershell files are supported by powershell core', async () => { - const bash = new CommandPromptAndPowerShell(serviceContainer.object); + test('Ensure powershell files are supported by powershell core', async () => { + const bash = new CommandPromptAndPowerShell(serviceContainer.object); - platform.setup(p => p.isWindows).returns(() => true); - const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); - const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); + platform.setup(p => p.isWindows).returns(() => true); + const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); - const quotedScriptFile = pathToScriptFile.indexOf(' ') > 0 ? `"${pathToScriptFile}"` : pathToScriptFile; - expect(command).to.be.deep.equal([`& ${quotedScriptFile}`.trim()], 'Invalid command'); + expect(command).to.be.deep.equal([`& ${pathToScriptFile.fileToCommandArgument()}`.trim()], 'Invalid command'); + }); }); }); }); - }); }); diff --git a/src/test/common/terminals/helper.test.ts b/src/test/common/terminals/helper.test.ts index dea22cd4eef5..e692f480a6ca 100644 --- a/src/test/common/terminals/helper.test.ts +++ b/src/test/common/terminals/helper.test.ts @@ -102,7 +102,7 @@ suite('Terminal Service helpers', () => { const command = 'c:\\python 3.7.exe'; const args = ['1', '2']; const commandPrefix = (item.value === TerminalShellType.powershell || item.value === TerminalShellType.powershellCore) ? '& ' : ''; - const expectedTerminalCommand = `${commandPrefix}"${command}" 1 2`; + const expectedTerminalCommand = `${commandPrefix}${command.fileToCommandArgument()} 1 2`; const terminalCommand = helper.buildCommandForTerminal(item.value, command, args); expect(terminalCommand).to.equal(expectedTerminalCommand, `Incorrect command for Shell ${item.name}`); @@ -126,7 +126,7 @@ suite('Terminal Service helpers', () => { const command = 'c:\\python 3.7.exe'; const args = []; const commandPrefix = (item.value === TerminalShellType.powershell || item.value === TerminalShellType.powershellCore) ? '& ' : ''; - const expectedTerminalCommand = `${commandPrefix}"${command}"`; + const expectedTerminalCommand = `${commandPrefix}${command.fileToCommandArgument()}`; const terminalCommand = helper.buildCommandForTerminal(item.value, command, args); expect(terminalCommand).to.equal(expectedTerminalCommand, `Incorrect command for Shell ${item.name}`); diff --git a/src/test/interpreters/condaEnvFileService.test.ts b/src/test/interpreters/condaEnvFileService.test.ts index 85844e19c022..2207be82732a 100644 --- a/src/test/interpreters/condaEnvFileService.test.ts +++ b/src/test/interpreters/condaEnvFileService.test.ts @@ -104,15 +104,20 @@ suite('Interpreters from Conda Environments Text File', () => { const interpreterPaths = [ path.join(environmentsPath, 'conda', 'envs', 'numpy') ]; + const pythonPath = path.join(interpreterPaths[0], 'pythonPath'); condaService.setup(c => c.condaEnvironmentsFile).returns(() => environmentsFilePath); + condaService.setup(c => c.getInterpreterPath(TypeMoq.It.isAny())).returns(() => pythonPath); + fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(true)); fileSystem.setup(fs => fs.readFile(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(interpreterPaths.join(EOL))); - AnacondaCompanyNames.forEach(async companyDisplayName => { + for (const companyName of AnacondaCompanyNames) { + const versionWithCompanyName = `Mock Version :: ${companyName}`; + interpreterVersion.setup(c => c.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve(versionWithCompanyName)); const interpreters = await condaFileProvider.getInterpreters(); assert.equal(interpreters.length, 1, 'Incorrect number of entries'); - assert.equal(interpreters[0].displayName, `${AnacondaDisplayName} Mock Version (numpy)`, 'Incorrect display name'); - }); + assert.equal(interpreters[0].displayName, `${AnacondaDisplayName} Mock Version`, 'Incorrect display name'); + } }); }); diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 727ce969dd09..86deb9282249 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -76,4 +76,11 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(i).type, TokenType.Comment); } }); + test('Unknown token', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('.'); + assert.equal(tokens.count, 1); + + assert.equal(tokens.getItemAt(0).type, TokenType.Unknown); + }); }); diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts index 52cc7ea7c497..9b43d4d57657 100644 --- a/src/test/markdown/restTextConverter.test.ts +++ b/src/test/markdown/restTextConverter.test.ts @@ -15,7 +15,7 @@ function compareFiles(expectedContent: string, actualContent: string) { for (let i = 0; i < Math.min(expectedLines.length, actualLines.length); i += 1) { const e = expectedLines[i]; const a = actualLines[i]; - expect(a, `Difference at line ${i}`).to.be.equal(e); + expect(e, `Difference at line ${i}`).to.be.equal(a); } expect(actualLines.length, @@ -36,5 +36,10 @@ async function testConversion(fileName: string): Promise { // tslint:disable-next-line:max-func-body-length suite('Hover - RestTextConverter', () => { + test('scipy', async () => await testConversion('scipy')); + test('scipy.spatial', async () => await testConversion('scipy.spatial')); test('scipy.spatial.distance', async () => await testConversion('scipy.spatial.distance')); + test('anydbm', async () => await testConversion('anydbm')); + test('aifc', async () => await testConversion('aifc')); + test('astroid', async () => await testConversion('astroid')); }); diff --git a/src/test/pythonFiles/markdown/aifc.md b/src/test/pythonFiles/markdown/aifc.md new file mode 100644 index 000000000000..fff22dece1e5 --- /dev/null +++ b/src/test/pythonFiles/markdown/aifc.md @@ -0,0 +1,142 @@ +Stuff to parse AIFF-C and AIFF files. + +Unless explicitly stated otherwise, the description below is true +both for AIFF-C files and AIFF files. + +An AIFF-C file has the following structure. +```html + +-----------------+ + | FORM | + +-----------------+ + | size | + +----+------------+ + | | AIFC | + | +------------+ + | | chunks | + | | . | + | | . | + | | . | + +----+------------+ +``` +An AIFF file has the string "AIFF" instead of "AIFC". + +A chunk consists of an identifier (4 bytes) followed by a size (4 bytes, +big endian order), followed by the data. The size field does not include +the size of the 8 byte header. + +The following chunk types are recognized. +```html + FVER + version number of AIFF-C defining document (AIFF-C only). + MARK + # of markers (2 bytes) + list of markers: + marker ID (2 bytes, must be 0) + position (4 bytes) + marker name ("pstring") + COMM + # of channels (2 bytes) + # of sound frames (4 bytes) + size of the samples (2 bytes) + sampling frequency (10 bytes, IEEE 80-bit extended + floating point) + in AIFF-C files only: + compression type (4 bytes) + human-readable version of compression type ("pstring") + SSND + offset (4 bytes, not used by this program) + blocksize (4 bytes, not used by this program) + sound data +``` +A pstring consists of 1 byte length, a string of characters, and 0 or 1 +byte pad to make the total length even. + +Usage. + +Reading AIFF files: +```html + f = aifc.open(file, 'r') +``` +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +In some types of audio files, if the setpos() method is not used, +the seek() method is not necessary. + +This returns an instance of a class with the following public methods: +```html + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for AIFF files) + getcompname() -- returns human-readable version of + compression type ('not compressed' for AIFF files) + getparams() -- returns a tuple consisting of all of the + above in the above order + getmarkers() -- get the list of marks in the audio file or None + if there are no marks + getmark(id) -- get mark with the specified id (raises an error + if the mark does not exist) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +``` +The position returned by tell(), the position given to setpos() and +the position of marks are all compatible and have nothing to do with +the actual position in the file. +The close() method is called automatically when the class instance +is destroyed. + +Writing AIFF files: +```html + f = aifc.open(file, 'w') +``` +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: +```html + aiff() -- create an AIFF file (AIFF-C default) + aifc() -- create an AIFF-C file + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + setmark(id, pos, name) + -- add specified mark to the list of marks + tell() -- return current position in output file (useful + in combination with setmark()) + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +``` +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes('') or +close() to patch up the sizes in the header. +Marks can be added anytime. If there are any marks, you must call +close() after all frames have been written. +The close() method is called automatically when the class instance +is destroyed. + +When a file is opened with the extension '.aiff', an AIFF file is +written, otherwise an AIFF-C file is written. This default can be +changed by calling aiff() or aifc() before the first writeframes or +writeframesraw. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/aifc.pydoc b/src/test/pythonFiles/markdown/aifc.pydoc new file mode 100644 index 000000000000..a4cc346d5531 --- /dev/null +++ b/src/test/pythonFiles/markdown/aifc.pydoc @@ -0,0 +1,134 @@ +Stuff to parse AIFF-C and AIFF files. + +Unless explicitly stated otherwise, the description below is true +both for AIFF-C files and AIFF files. + +An AIFF-C file has the following structure. + + +-----------------+ + | FORM | + +-----------------+ + | | + +----+------------+ + | | AIFC | + | +------------+ + | | | + | | . | + | | . | + | | . | + +----+------------+ + +An AIFF file has the string "AIFF" instead of "AIFC". + +A chunk consists of an identifier (4 bytes) followed by a size (4 bytes, +big endian order), followed by the data. The size field does not include +the size of the 8 byte header. + +The following chunk types are recognized. + + FVER + (AIFF-C only). + MARK + <# of markers> (2 bytes) + list of markers: + (2 bytes, must be > 0) + (4 bytes) + ("pstring") + COMM + <# of channels> (2 bytes) + <# of sound frames> (4 bytes) + (2 bytes) + (10 bytes, IEEE 80-bit extended + floating point) + in AIFF-C files only: + (4 bytes) + ("pstring") + SSND + (4 bytes, not used by this program) + (4 bytes, not used by this program) + + +A pstring consists of 1 byte length, a string of characters, and 0 or 1 +byte pad to make the total length even. + +Usage. + +Reading AIFF files: + f = aifc.open(file, 'r') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +In some types of audio files, if the setpos() method is not used, +the seek() method is not necessary. + +This returns an instance of a class with the following public methods: + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for AIFF files) + getcompname() -- returns human-readable version of + compression type ('not compressed' for AIFF files) + getparams() -- returns a tuple consisting of all of the + above in the above order + getmarkers() -- get the list of marks in the audio file or None + if there are no marks + getmark(id) -- get mark with the specified id (raises an error + if the mark does not exist) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +The position returned by tell(), the position given to setpos() and +the position of marks are all compatible and have nothing to do with +the actual position in the file. +The close() method is called automatically when the class instance +is destroyed. + +Writing AIFF files: + f = aifc.open(file, 'w') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: + aiff() -- create an AIFF file (AIFF-C default) + aifc() -- create an AIFF-C file + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + setmark(id, pos, name) + -- add specified mark to the list of marks + tell() -- return current position in output file (useful + in combination with setmark()) + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes('') or +close() to patch up the sizes in the header. +Marks can be added anytime. If there are any marks, you must call +close() after all frames have been written. +The close() method is called automatically when the class instance +is destroyed. + +When a file is opened with the extension '.aiff', an AIFF file is +written, otherwise an AIFF-C file is written. This default can be +changed by calling aiff() or aifc() before the first writeframes or +writeframesraw. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/anydbm.md b/src/test/pythonFiles/markdown/anydbm.md new file mode 100644 index 000000000000..e5914dcbadde --- /dev/null +++ b/src/test/pythonFiles/markdown/anydbm.md @@ -0,0 +1,33 @@ +Generic interface to all dbm clones. + +Instead of +```html + import dbm + d = dbm.open(file, 'w', 0666) +``` +use +```html + import anydbm + d = anydbm.open(file, 'w') +``` +The returned object is a dbhash, gdbm, dbm or dumbdbm object, +dependent on the type of database being opened (determined by whichdb +module) in the case of an existing dbm. If the dbm does not exist and +the create or new flag ('c' or 'n') was specified, the dbm type will +be determined by the availability of the modules (tested in the above +order). + +It has the following interface (key and data are strings): +```html + d[key] = data # store data at key (may override data at + # existing key) + data = d[key] # retrieve data at key (raise KeyError if no + # such key) + del d[key] # delete data stored at key (raises KeyError + # if no such key) + flag = key in d # true if the key exists + list = d.keys() # return a list of all existing keys (slow!) +``` +Future versions may change the order in which implementations are +tested for existence, and add interfaces to other dbm-like +implementations. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/anydbm.pydoc b/src/test/pythonFiles/markdown/anydbm.pydoc new file mode 100644 index 000000000000..2d46b5881789 --- /dev/null +++ b/src/test/pythonFiles/markdown/anydbm.pydoc @@ -0,0 +1,33 @@ +Generic interface to all dbm clones. + +Instead of + + import dbm + d = dbm.open(file, 'w', 0666) + +use + + import anydbm + d = anydbm.open(file, 'w') + +The returned object is a dbhash, gdbm, dbm or dumbdbm object, +dependent on the type of database being opened (determined by whichdb +module) in the case of an existing dbm. If the dbm does not exist and +the create or new flag ('c' or 'n') was specified, the dbm type will +be determined by the availability of the modules (tested in the above +order). + +It has the following interface (key and data are strings): + + d[key] = data # store data at key (may override data at + # existing key) + data = d[key] # retrieve data at key (raise KeyError if no + # such key) + del d[key] # delete data stored at key (raises KeyError + # if no such key) + flag = key in d # true if the key exists + list = d.keys() # return a list of all existing keys (slow!) + +Future versions may change the order in which implementations are +tested for existence, and add interfaces to other dbm-like +implementations. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/astroid.md b/src/test/pythonFiles/markdown/astroid.md new file mode 100644 index 000000000000..b5ece21c1faf --- /dev/null +++ b/src/test/pythonFiles/markdown/astroid.md @@ -0,0 +1,24 @@ +Python Abstract Syntax Tree New Generation + +The aim of this module is to provide a common base representation of +python source code for projects such as pychecker, pyreverse, +pylint... Well, actually the development of this library is essentially +governed by pylint's needs. + +It extends class defined in the python's \_ast module with some +additional methods and attributes. Instance attributes are added by a +builder object, which can either generate extended ast (let's call +them astroid ;) by visiting an existent ast tree or by inspecting living +object. Methods are added by monkey patching ast classes. + +Main modules are: +```html +* nodes and scoped_nodes for more information about methods and + attributes added to different node classes + +* the manager contains a high level object to get astroid trees from + source files and living objects. It maintains a cache of previously + constructed tree for quick access + +* builder contains the class responsible to build astroid trees +``` \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/astroid.pydoc b/src/test/pythonFiles/markdown/astroid.pydoc new file mode 100644 index 000000000000..84d58487ead5 --- /dev/null +++ b/src/test/pythonFiles/markdown/astroid.pydoc @@ -0,0 +1,23 @@ +Python Abstract Syntax Tree New Generation + +The aim of this module is to provide a common base representation of +python source code for projects such as pychecker, pyreverse, +pylint... Well, actually the development of this library is essentially +governed by pylint's needs. + +It extends class defined in the python's _ast module with some +additional methods and attributes. Instance attributes are added by a +builder object, which can either generate extended ast (let's call +them astroid ;) by visiting an existent ast tree or by inspecting living +object. Methods are added by monkey patching ast classes. + +Main modules are: + +* nodes and scoped_nodes for more information about methods and + attributes added to different node classes + +* the manager contains a high level object to get astroid trees from + source files and living objects. It maintains a cache of previously + constructed tree for quick access + +* builder contains the class responsible to build astroid trees \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.md b/src/test/pythonFiles/markdown/scipy.md new file mode 100644 index 000000000000..d28c1e290abe --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.md @@ -0,0 +1,47 @@ +### SciPy: A scientific computing package for Python + +Documentation is available in the docstrings and +online at https://docs.scipy.org. + +#### Contents +SciPy imports all the functions from the NumPy namespace, and in +addition provides: + +#### Subpackages +Using any of these subpackages requires an explicit import. For example, +`import scipy.cluster`. +```html + cluster --- Vector Quantization / Kmeans + fftpack --- Discrete Fourier Transform algorithms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + linalg.blas --- Wrappers to BLAS library + linalg.lapack --- Wrappers to LAPACK library + misc --- Various utilities that don't have + another home. + ndimage --- n-dimensional image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + sparse --- Sparse Matrices + sparse.linalg --- Sparse Linear Algebra + sparse.linalg.dsolve --- Linear Solvers + sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library: + Conjugate Gradient Method (LOBPCG) + sparse.linalg.eigen --- Sparse Eigenvalue Solvers + sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned + Conjugate Gradient Method (LOBPCG) + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions +``` +#### Utility tools +```html + test --- Run scipy unittests + show_config --- Show scipy build configuration + show_numpy_config --- Show numpy build configuration + __version__ --- Scipy version string + __numpy_version__ --- Numpy version string +``` \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.pydoc b/src/test/pythonFiles/markdown/scipy.pydoc new file mode 100644 index 000000000000..293445fbea5b --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.pydoc @@ -0,0 +1,53 @@ +SciPy: A scientific computing package for Python +================================================ + +Documentation is available in the docstrings and +online at https://docs.scipy.org. + +Contents +-------- +SciPy imports all the functions from the NumPy namespace, and in +addition provides: + +Subpackages +----------- +Using any of these subpackages requires an explicit import. For example, +``import scipy.cluster``. + +:: + + cluster --- Vector Quantization / Kmeans + fftpack --- Discrete Fourier Transform algorithms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + linalg.blas --- Wrappers to BLAS library + linalg.lapack --- Wrappers to LAPACK library + misc --- Various utilities that don't have + another home. + ndimage --- n-dimensional image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + sparse --- Sparse Matrices + sparse.linalg --- Sparse Linear Algebra + sparse.linalg.dsolve --- Linear Solvers + sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library: + Conjugate Gradient Method (LOBPCG) + sparse.linalg.eigen --- Sparse Eigenvalue Solvers + sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned + Conjugate Gradient Method (LOBPCG) + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions + +Utility tools +------------- +:: + + test --- Run scipy unittests + show_config --- Show scipy build configuration + show_numpy_config --- Show numpy build configuration + __version__ --- Scipy version string + __numpy_version__ --- Numpy version string \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.distance.md b/src/test/pythonFiles/markdown/scipy.spatial.distance.md index 125b19f6cdeb..276acddef787 100644 --- a/src/test/pythonFiles/markdown/scipy.spatial.distance.md +++ b/src/test/pythonFiles/markdown/scipy.spatial.distance.md @@ -1,4 +1,4 @@ -### Distance computations (**mod**`scipy.spatial.distance`) +### Distance computations (module:`scipy.spatial.distance`) #### Function Reference @@ -6,53 +6,49 @@ Distance matrix computation from a collection of raw observation vectors stored in a rectangular array. ```html - pdist -- pairwise distances between observation vectors. - cdist -- distances between two collections of observation vectors - squareform -- convert distance matrix to a condensed one and vice versa - directed_hausdorff -- directed Hausdorff distance between arrays - + pdist -- pairwise distances between observation vectors. + cdist -- distances between two collections of observation vectors + squareform -- convert distance matrix to a condensed one and vice versa + directed_hausdorff -- directed Hausdorff distance between arrays ``` Predicates for checking the validity of distance matrices, both condensed and redundant. Also contained in this module are functions for computing the number of observations in a distance matrix. ```html - is_valid_dm -- checks for a valid distance matrix - is_valid_y -- checks for a valid condensed distance matrix - num_obs_dm -- # of observations in a distance matrix - num_obs_y -- # of observations in a condensed distance matrix - + is_valid_dm -- checks for a valid distance matrix + is_valid_y -- checks for a valid condensed distance matrix + num_obs_dm -- # of observations in a distance matrix + num_obs_y -- # of observations in a condensed distance matrix ``` Distance functions between two numeric vectors `u` and `v`. Computing distances over a large collection of vectors is inefficient for these functions. Use `pdist` for this purpose. ```html - braycurtis -- the Bray-Curtis distance. - canberra -- the Canberra distance. - chebyshev -- the Chebyshev distance. - cityblock -- the Manhattan distance. - correlation -- the Correlation distance. - cosine -- the Cosine distance. - euclidean -- the Euclidean distance. - mahalanobis -- the Mahalanobis distance. - minkowski -- the Minkowski distance. - seuclidean -- the normalized Euclidean distance. - sqeuclidean -- the squared Euclidean distance. - wminkowski -- (deprecated) alias of `minkowski`. - + braycurtis -- the Bray-Curtis distance. + canberra -- the Canberra distance. + chebyshev -- the Chebyshev distance. + cityblock -- the Manhattan distance. + correlation -- the Correlation distance. + cosine -- the Cosine distance. + euclidean -- the Euclidean distance. + mahalanobis -- the Mahalanobis distance. + minkowski -- the Minkowski distance. + seuclidean -- the normalized Euclidean distance. + sqeuclidean -- the squared Euclidean distance. + wminkowski -- (deprecated) alias of `minkowski`. ``` Distance functions between two boolean vectors (representing sets) `u` and `v`. As in the case of numerical vectors, `pdist` is more efficient for computing the distances between all pairs. ```html - dice -- the Dice dissimilarity. - hamming -- the Hamming distance. - jaccard -- the Jaccard distance. - kulsinski -- the Kulsinski distance. - rogerstanimoto -- the Rogers-Tanimoto dissimilarity. - russellrao -- the Russell-Rao dissimilarity. - sokalmichener -- the Sokal-Michener dissimilarity. - sokalsneath -- the Sokal-Sneath dissimilarity. - yule -- the Yule dissimilarity. - + dice -- the Dice dissimilarity. + hamming -- the Hamming distance. + jaccard -- the Jaccard distance. + kulsinski -- the Kulsinski distance. + rogerstanimoto -- the Rogers-Tanimoto dissimilarity. + russellrao -- the Russell-Rao dissimilarity. + sokalmichener -- the Sokal-Michener dissimilarity. + sokalsneath -- the Sokal-Sneath dissimilarity. + yule -- the Yule dissimilarity. ``` -**func**`hamming` also operates over discrete numerical vectors. \ No newline at end of file +:func:`hamming` also operates over discrete numerical vectors. \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.md b/src/test/pythonFiles/markdown/scipy.spatial.md new file mode 100644 index 000000000000..2d5e891db625 --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.spatial.md @@ -0,0 +1,65 @@ +### Spatial algorithms and data structures (module:`scipy.spatial`) + + +### Nearest-neighbor Queries +```html + KDTree -- class for efficient nearest-neighbor queries + cKDTree -- class for efficient nearest-neighbor queries (faster impl.) + distance -- module containing many different distance measures + Rectangle +``` +### Delaunay Triangulation, Convex Hulls and Voronoi Diagrams +```html + Delaunay -- compute Delaunay triangulation of input points + ConvexHull -- compute a convex hull for input points + Voronoi -- compute a Voronoi diagram hull from input points + SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere + HalfspaceIntersection -- compute the intersection points of input halfspaces +``` +### Plotting Helpers +```html + delaunay_plot_2d -- plot 2-D triangulation + convex_hull_plot_2d -- plot 2-D convex hull + voronoi_plot_2d -- plot 2-D voronoi diagram +``` +### Simplex representation +The simplices (triangles, tetrahedra, ...) appearing in the Delaunay +tesselation (N-dim simplices), convex hull facets, and Voronoi ridges +(N-1 dim simplices) are represented in the following scheme: +```html + tess = Delaunay(points) + hull = ConvexHull(points) + voro = Voronoi(points) + + # coordinates of the j-th vertex of the i-th simplex + tess.points[tess.simplices[i, j], :] # tesselation element + hull.points[hull.simplices[i, j], :] # convex hull facet + voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells +``` +For Delaunay triangulations and convex hulls, the neighborhood +structure of the simplices satisfies the condition: +```html + `tess.neighbors[i,j]` is the neighboring simplex of the i-th + simplex, opposite to the j-vertex. It is -1 in case of no + neighbor. +``` +Convex hull facets also define a hyperplane equation: +```html + (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 +``` +Similar hyperplane equations for the Delaunay triangulation correspond +to the convex hull facets on the corresponding N+1 dimensional +paraboloid. + +The Delaunay triangulation objects offer a method for locating the +simplex containing a given point, and barycentric coordinate +computations. + +#### Functions +```html + tsearch + distance_matrix + minkowski_distance + minkowski_distance_p + procrustes +``` \ No newline at end of file diff --git a/src/test/pythonFiles/markdown/scipy.spatial.pydoc b/src/test/pythonFiles/markdown/scipy.spatial.pydoc new file mode 100644 index 000000000000..1613b94384b7 --- /dev/null +++ b/src/test/pythonFiles/markdown/scipy.spatial.pydoc @@ -0,0 +1,86 @@ +============================================================= +Spatial algorithms and data structures (:mod:`scipy.spatial`) +============================================================= + +.. currentmodule:: scipy.spatial + +Nearest-neighbor Queries +======================== +.. autosummary:: + :toctree: generated/ + + KDTree -- class for efficient nearest-neighbor queries + cKDTree -- class for efficient nearest-neighbor queries (faster impl.) + distance -- module containing many different distance measures + Rectangle + +Delaunay Triangulation, Convex Hulls and Voronoi Diagrams +========================================================= + +.. autosummary:: + :toctree: generated/ + + Delaunay -- compute Delaunay triangulation of input points + ConvexHull -- compute a convex hull for input points + Voronoi -- compute a Voronoi diagram hull from input points + SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere + HalfspaceIntersection -- compute the intersection points of input halfspaces + +Plotting Helpers +================ + +.. autosummary:: + :toctree: generated/ + + delaunay_plot_2d -- plot 2-D triangulation + convex_hull_plot_2d -- plot 2-D convex hull + voronoi_plot_2d -- plot 2-D voronoi diagram + +.. seealso:: :ref:`Tutorial ` + + +Simplex representation +====================== +The simplices (triangles, tetrahedra, ...) appearing in the Delaunay +tesselation (N-dim simplices), convex hull facets, and Voronoi ridges +(N-1 dim simplices) are represented in the following scheme:: + + tess = Delaunay(points) + hull = ConvexHull(points) + voro = Voronoi(points) + + # coordinates of the j-th vertex of the i-th simplex + tess.points[tess.simplices[i, j], :] # tesselation element + hull.points[hull.simplices[i, j], :] # convex hull facet + voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells + +For Delaunay triangulations and convex hulls, the neighborhood +structure of the simplices satisfies the condition: + + ``tess.neighbors[i,j]`` is the neighboring simplex of the i-th + simplex, opposite to the j-vertex. It is -1 in case of no + neighbor. + +Convex hull facets also define a hyperplane equation:: + + (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 + +Similar hyperplane equations for the Delaunay triangulation correspond +to the convex hull facets on the corresponding N+1 dimensional +paraboloid. + +The Delaunay triangulation objects offer a method for locating the +simplex containing a given point, and barycentric coordinate +computations. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + tsearch + distance_matrix + minkowski_distance + minkowski_distance_p + procrustes \ No newline at end of file diff --git a/src/test/refactor/extension.refactor.extract.var.test.ts b/src/test/refactor/extension.refactor.extract.var.test.ts index 5ce6cc3f743c..d12283a74198 100644 --- a/src/test/refactor/extension.refactor.extract.var.test.ts +++ b/src/test/refactor/extension.refactor.extract.var.test.ts @@ -101,13 +101,13 @@ suite('Variable Extraction', () => { test('Extract Variable', async () => { const startPos = new vscode.Position(234, 29); const endPos = new vscode.Position(234, 38); - testingVariableExtraction(false, startPos, endPos); + await testingVariableExtraction(false, startPos, endPos); }); test('Extract Variable fails if whole string not selected', async () => { const startPos = new vscode.Position(234, 20); const endPos = new vscode.Position(234, 38); - testingVariableExtraction(true, startPos, endPos); + await testingVariableExtraction(true, startPos, endPos); }); function testingVariableExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position) { diff --git a/src/test/terminals/codeExecution/djangoShellCodeExect.test.ts b/src/test/terminals/codeExecution/djangoShellCodeExect.test.ts index 4778ffa55512..7b1dc4a55742 100644 --- a/src/test/terminals/codeExecution/djangoShellCodeExect.test.ts +++ b/src/test/terminals/codeExecution/djangoShellCodeExect.test.ts @@ -150,7 +150,7 @@ suite('Terminal - Django Shell Code Execution', () => { const workspaceFolder: WorkspaceFolder = { index: 0, name: 'blah', uri: workspaceUri }; workspace.setup(w => w.getWorkspaceFolder(TypeMoq.It.isAny())).returns(() => undefined); workspace.setup(w => w.workspaceFolders).returns(() => [workspaceFolder]); - const expectedTerminalArgs = terminalArgs.concat(path.join(workspaceUri.fsPath, 'manage.py'), 'shell'); + const expectedTerminalArgs = terminalArgs.concat(path.join(workspaceUri.fsPath, 'manage.py').fileToCommandArgument(), 'shell'); testReplCommandArguments(true, pythonPath, pythonPath, terminalArgs, expectedTerminalArgs, Uri.file('x')); }); diff --git a/src/test/terminals/codeExecution/terminalCodeExec.test.ts b/src/test/terminals/codeExecution/terminalCodeExec.test.ts index 90c92560ee20..0a907c1ce548 100644 --- a/src/test/terminals/codeExecution/terminalCodeExec.test.ts +++ b/src/test/terminals/codeExecution/terminalCodeExec.test.ts @@ -154,7 +154,8 @@ suite('Terminal Code Execution', () => { terminalSettings.setup(t => t.launchArgs).returns(() => []); await executor.executeFile(file); - terminalService.verify(async t => await t.sendText(TypeMoq.It.isValue(`cd "${path.dirname(file.fsPath)}"`)), TypeMoq.Times.once()); + const dir = `"${path.dirname(file.fsPath)}"`.fileToCommandArgument(); + terminalService.verify(async t => await t.sendText(TypeMoq.It.isValue(`cd ${dir}`)), TypeMoq.Times.once()); } test('Ensure we set current directory (and quote it when containing spaces) before executing file (non windows)', async () => { @@ -213,7 +214,7 @@ suite('Terminal Code Execution', () => { await executor.executeFile(file); const expectedPythonPath = isWindows ? pythonPath.replace(/\\/g, '/') : pythonPath; - const expectedArgs = terminalArgs.concat(file.fsPath.indexOf(' ') > 0 ? `"${file.fsPath}"` : file.fsPath); + const expectedArgs = terminalArgs.concat(file.fsPath.fileToCommandArgument()); terminalService.verify(async t => await t.sendCommand(TypeMoq.It.isValue(expectedPythonPath), TypeMoq.It.isValue(expectedArgs)), TypeMoq.Times.once()); } diff --git a/src/test/unittests/debugger.test.ts b/src/test/unittests/debugger.test.ts index 2daba9a848e1..c526961527a9 100644 --- a/src/test/unittests/debugger.test.ts +++ b/src/test/unittests/debugger.test.ts @@ -71,15 +71,30 @@ suite('Unit Tests - debugging', () => { assert.equal(tests.testFunctions.length, 2, 'Incorrect number of test functions'); assert.equal(tests.testSuites.length, 2, 'Incorrect number of test suites'); + const deferred = createDeferred(); const testFunction = [tests.testFunctions[0].testFunction]; - testManager.runTest(CommandSource.commandPalette, { testFunction }, false, true); - const launched = await mockDebugLauncher.launched; - assert.isTrue(launched, 'Debugger not launched'); + const runningPromise = testManager.runTest(CommandSource.commandPalette, { testFunction }, false, true); + + // This promise should never resolve nor reject. + runningPromise + .then(() => deferred.reject('Debugger stopped when it shouldn\'t have')) + .catch(error => deferred.reject(error)); + + mockDebugLauncher.launched + .then((launched) => { + if (launched) { + deferred.resolve(''); + } else { + deferred.reject('Debugger not launched'); + } + }) .catch(error => deferred.reject(error)); + + await deferred.promise; } test('Debugger should start (unittest)', async () => { await updateSetting('unitTest.unittestArgs', ['-s=./tests', '-p=test_*.py'], rootWorkspaceUri, configTarget); - await testStartingDebugger('unittest'); + await testStartingDebugger('unittest'); }); test('Debugger should start (pytest)', async () => { @@ -105,9 +120,10 @@ suite('Unit Tests - debugging', () => { const launched = await mockDebugLauncher.launched; assert.isTrue(launched, 'Debugger not launched'); - testManager.discoverTests(CommandSource.commandPalette, true, true, true); - + const discoveryPromise = testManager.discoverTests(CommandSource.commandPalette, true, true, true); await expect(runningPromise).to.be.rejectedWith(CANCELLATION_REASON, 'Incorrect reason for ending the debugger'); + ioc.dispose(); // will cancel test discovery + await expect(discoveryPromise).to.be.rejectedWith(CANCELLATION_REASON, 'Incorrect reason for ending the debugger'); } test('Debugger should stop when user invokes a test discovery (unittest)', async () => { @@ -151,6 +167,7 @@ suite('Unit Tests - debugging', () => { runningPromise .then(() => 'Debugger stopped when it shouldn\'t have') .catch(() => 'Debugger crashed when it shouldn\'t have') + // tslint:disable-next-line: no-floating-promises .then(error => { deferred.reject(error); }); diff --git a/src/test/unittests/stoppingDiscoverAndTest.test.ts b/src/test/unittests/stoppingDiscoverAndTest.test.ts index 3386ee2b6955..3b3558f12bd2 100644 --- a/src/test/unittests/stoppingDiscoverAndTest.test.ts +++ b/src/test/unittests/stoppingDiscoverAndTest.test.ts @@ -5,6 +5,7 @@ import { expect, use } from 'chai'; import * as chaiAsPromised from 'chai-as-promised'; import * as path from 'path'; import { Uri } from 'vscode'; +import {createDeferred} from '../../client/common/helpers'; import { Product } from '../../client/common/types'; import { CANCELLATION_REASON, CommandSource, UNITTEST_PROVIDER } from '../../client/unittests/common/constants'; import { ITestDiscoveryService } from '../../client/unittests/common/types'; @@ -60,9 +61,23 @@ suite('Unit Tests Stopping Discovery and Runner', () => { const discoveryPromise = mockTestManager.discoverTests(CommandSource.auto); mockTestManager.discoveryDeferred.resolve(EmptyTests); - mockTestManager.runTest(CommandSource.ui); + const runningPromise = mockTestManager.runTest(CommandSource.ui); + const deferred = createDeferred(); - await expect(discoveryPromise).to.eventually.equal(EmptyTests); + // This promise should never resolve nor reject. + runningPromise + .then(() => Promise.reject('Debugger stopped when it shouldn\'t have')) + .catch(error => deferred.reject(error)); + + discoveryPromise.then(result => { + if (result === EmptyTests) { + deferred.resolve(''); + } else { + deferred.reject('tests not empty'); + } + }).catch(error => deferred.reject(error)); + + await deferred.promise; }); test('Discovering tests should stop running tests', async () => { @@ -75,7 +90,7 @@ suite('Unit Tests Stopping Discovery and Runner', () => { await new Promise(resolve => setTimeout(resolve, 1000)); // User manually discovering tests will kill the existing test runner. - mockTestManager.discoverTests(CommandSource.ui, true, false, true); + await mockTestManager.discoverTests(CommandSource.ui, true, false, true); await expect(runPromise).to.eventually.be.rejectedWith(CANCELLATION_REASON); }); }); From 35838b9fb2a693b3f310eb37e32fa28c5f7a1c15 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Feb 2018 14:51:52 -0800 Subject: [PATCH 049/103] Whitespace difference --- src/client/providers/itemInfoSource.ts | 4 ++-- src/test/definitions/hover.test.ts | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index 4159e9396f82..b78515c1822f 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -106,7 +106,7 @@ export class ItemInfoSource { // Tooltip is only used in hover if (signature.length > 0) { - tooltip = tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); + tooltip = tooltip.appendMarkdown(['```python', signature, '```', ''].join(EOL)); } const description = this.textConverter.toMarkdown(lines.join(EOL)); @@ -127,7 +127,7 @@ export class ItemInfoSource { if (item.description) { if (signature.length > 0) { - tooltip.appendMarkdown(['```python', signature, '```', EOL].join(EOL)); + tooltip.appendMarkdown(['```python', signature, '```', ''].join(EOL)); } const description = this.textConverter.toMarkdown(item.description); tooltip.appendMarkdown(description); diff --git a/src/test/definitions/hover.test.ts b/src/test/definitions/hover.test.ts index 7e64db251464..0abda7ea51c8 100644 --- a/src/test/definitions/hover.test.ts +++ b/src/test/definitions/hover.test.ts @@ -157,8 +157,8 @@ suite('Hover Definition', () => { 'share state.' + EOL + '' + EOL + 'Class Random can also be subclassed if you want to use a different basic' + EOL + - 'generator of your own devising: in that case, override the following' + EOL + EOL + - '`methods` random(), seed(), getstate(), and setstate().' + EOL + EOL + + 'generator of your own devising: in that case, override the following' + EOL + + 'methods: random(), seed(), getstate(), and setstate().' + EOL + 'Optionally, implement a getrandbits() method so that randrange()' + EOL + 'can cover arbitrarily large ranges.'; From 6a10786015c455f44042313dd8ef31935fbc50f8 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 12 Feb 2018 13:31:50 -0800 Subject: [PATCH 050/103] Update Jedi to 0.11.1 --- pythonFiles/release/jedi/__init__.py | 6 +- pythonFiles/release/jedi/__main__.py | 19 +- pythonFiles/release/jedi/_compatibility.py | 146 ++- pythonFiles/release/jedi/api/__init__.py | 698 +++++--------- pythonFiles/release/jedi/api/classes.py | 591 ++++++------ pythonFiles/release/jedi/api/completion.py | 291 ++++++ pythonFiles/release/jedi/api/helpers.py | 360 ++++++-- pythonFiles/release/jedi/api/interpreter.py | 126 +-- pythonFiles/release/jedi/api/keywords.py | 110 ++- pythonFiles/release/jedi/api/usages.py | 49 - pythonFiles/release/jedi/cache.py | 283 +----- pythonFiles/release/jedi/common/__init__.py | 1 + pythonFiles/release/jedi/common/context.py | 67 ++ pythonFiles/release/jedi/debug.py | 89 +- pythonFiles/release/jedi/evaluate/__init__.py | 540 ++++++----- pythonFiles/release/jedi/evaluate/analysis.py | 236 ++--- .../release/jedi/evaluate/arguments.py | 245 +++++ .../release/jedi/evaluate/base_context.py | 260 ++++++ pythonFiles/release/jedi/evaluate/cache.py | 45 +- .../jedi/evaluate/compiled/__init__.py | 625 +++++++------ .../release/jedi/evaluate/compiled/fake.py | 182 +++- .../jedi/evaluate/compiled/fake/_weakref.pym | 3 +- .../jedi/evaluate/compiled/fake/builtins.pym | 32 +- .../jedi/evaluate/compiled/fake/io.pym | 6 + .../jedi/evaluate/compiled/fake/operator.pym | 33 + .../jedi/evaluate/compiled/getattr_static.py | 175 ++++ .../release/jedi/evaluate/compiled/mixed.py | 231 +++++ .../release/jedi/evaluate/context/__init__.py | 5 + .../release/jedi/evaluate/context/function.py | 226 +++++ .../release/jedi/evaluate/context/instance.py | 435 +++++++++ .../release/jedi/evaluate/context/iterable.py | 691 ++++++++++++++ .../release/jedi/evaluate/context/klass.py | 197 ++++ .../release/jedi/evaluate/context/module.py | 213 +++++ .../jedi/evaluate/context/namespace.py | 74 ++ .../release/jedi/evaluate/docstrings.py | 192 +++- pythonFiles/release/jedi/evaluate/dynamic.py | 249 +++-- pythonFiles/release/jedi/evaluate/filters.py | 434 +++++++++ pythonFiles/release/jedi/evaluate/finder.py | 641 ++++--------- .../release/jedi/evaluate/flow_analysis.py | 94 +- pythonFiles/release/jedi/evaluate/helpers.py | 300 +++--- pythonFiles/release/jedi/evaluate/imports.py | 485 +++++----- pythonFiles/release/jedi/evaluate/iterable.py | 631 ------------- .../release/jedi/evaluate/jedi_typing.py | 100 ++ .../release/jedi/evaluate/lazy_context.py | 61 ++ pythonFiles/release/jedi/evaluate/param.py | 480 +++------- .../release/jedi/evaluate/parser_cache.py | 6 + pythonFiles/release/jedi/evaluate/pep0484.py | 222 +++++ .../release/jedi/evaluate/precedence.py | 174 ---- pythonFiles/release/jedi/evaluate/project.py | 40 + .../release/jedi/evaluate/recursion.py | 232 +++-- .../release/jedi/evaluate/representation.py | 857 ------------------ pythonFiles/release/jedi/evaluate/site.py | 110 +++ pythonFiles/release/jedi/evaluate/stdlib.py | 243 +++-- .../release/jedi/evaluate/syntax_tree.py | 588 ++++++++++++ pythonFiles/release/jedi/evaluate/sys_path.py | 263 +++--- pythonFiles/release/jedi/evaluate/usages.py | 62 ++ .../jedi/{common.py => evaluate/utils.py} | 82 +- pythonFiles/release/jedi/parser_utils.py | 241 +++++ pythonFiles/release/jedi/refactoring.py | 76 +- pythonFiles/release/jedi/settings.py | 78 +- pythonFiles/release/jedi/utils.py | 34 +- src/test/.vscode/settings.json | 6 +- src/test/index.ts | 3 +- .../extension.refactor.extract.method.test.ts | 70 +- 64 files changed, 8672 insertions(+), 5672 deletions(-) create mode 100644 pythonFiles/release/jedi/api/completion.py delete mode 100755 pythonFiles/release/jedi/api/usages.py create mode 100644 pythonFiles/release/jedi/common/__init__.py create mode 100644 pythonFiles/release/jedi/common/context.py create mode 100644 pythonFiles/release/jedi/evaluate/arguments.py create mode 100644 pythonFiles/release/jedi/evaluate/base_context.py create mode 100644 pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym create mode 100644 pythonFiles/release/jedi/evaluate/compiled/getattr_static.py create mode 100644 pythonFiles/release/jedi/evaluate/compiled/mixed.py create mode 100644 pythonFiles/release/jedi/evaluate/context/__init__.py create mode 100644 pythonFiles/release/jedi/evaluate/context/function.py create mode 100644 pythonFiles/release/jedi/evaluate/context/instance.py create mode 100644 pythonFiles/release/jedi/evaluate/context/iterable.py create mode 100644 pythonFiles/release/jedi/evaluate/context/klass.py create mode 100644 pythonFiles/release/jedi/evaluate/context/module.py create mode 100644 pythonFiles/release/jedi/evaluate/context/namespace.py create mode 100644 pythonFiles/release/jedi/evaluate/filters.py delete mode 100755 pythonFiles/release/jedi/evaluate/iterable.py create mode 100644 pythonFiles/release/jedi/evaluate/jedi_typing.py create mode 100644 pythonFiles/release/jedi/evaluate/lazy_context.py create mode 100644 pythonFiles/release/jedi/evaluate/parser_cache.py create mode 100644 pythonFiles/release/jedi/evaluate/pep0484.py delete mode 100755 pythonFiles/release/jedi/evaluate/precedence.py create mode 100644 pythonFiles/release/jedi/evaluate/project.py delete mode 100755 pythonFiles/release/jedi/evaluate/representation.py create mode 100644 pythonFiles/release/jedi/evaluate/site.py create mode 100644 pythonFiles/release/jedi/evaluate/syntax_tree.py create mode 100644 pythonFiles/release/jedi/evaluate/usages.py rename pythonFiles/release/jedi/{common.py => evaluate/utils.py} (62%) mode change 100755 => 100644 create mode 100644 pythonFiles/release/jedi/parser_utils.py diff --git a/pythonFiles/release/jedi/__init__.py b/pythonFiles/release/jedi/__init__.py index ca99329cda9c..1a1080ad2fd4 100755 --- a/pythonFiles/release/jedi/__init__.py +++ b/pythonFiles/release/jedi/__init__.py @@ -36,8 +36,8 @@ good text editor, while still having very good IDE features for Python. """ -__version__ = '0.9.0' +__version__ = '0.11.1' -from jedi.api import Script, Interpreter, NotFoundError, set_debug_function -from jedi.api import preload_module, defined_names, names +from jedi.api import Script, Interpreter, set_debug_function, \ + preload_module, names from jedi import settings diff --git a/pythonFiles/release/jedi/__main__.py b/pythonFiles/release/jedi/__main__.py index b26397138312..f2ee0477695b 100755 --- a/pythonFiles/release/jedi/__main__.py +++ b/pythonFiles/release/jedi/__main__.py @@ -1,18 +1,13 @@ -from sys import argv +import sys from os.path import join, dirname, abspath, isdir -if len(argv) == 2 and argv[1] == 'repl': - # don't want to use __main__ only for repl yet, maybe we want to use it for - # something else. So just use the keyword ``repl`` for now. - print(join(dirname(abspath(__file__)), 'api', 'replstartup.py')) -elif len(argv) > 1 and argv[1] == 'linter': +def _start_linter(): """ This is a pre-alpha API. You're not supposed to use it at all, except for testing. It will very likely change. """ import jedi - import sys if '--debug' in sys.argv: jedi.set_debug_function() @@ -37,7 +32,17 @@ print(error) except Exception: if '--pdb' in sys.argv: + import traceback + traceback.print_exc() import pdb pdb.post_mortem() else: raise + + +if len(sys.argv) == 2 and sys.argv[1] == 'repl': + # don't want to use __main__ only for repl yet, maybe we want to use it for + # something else. So just use the keyword ``repl`` for now. + print(join(dirname(abspath(__file__)), 'api', 'replstartup.py')) +elif len(sys.argv) > 1 and sys.argv[1] == 'linter': + _start_linter() diff --git a/pythonFiles/release/jedi/_compatibility.py b/pythonFiles/release/jedi/_compatibility.py index 1a1e943f43c7..52a20fe2c07c 100755 --- a/pythonFiles/release/jedi/_compatibility.py +++ b/pythonFiles/release/jedi/_compatibility.py @@ -6,26 +6,72 @@ import imp import os import re +import pkgutil +import warnings try: import importlib except ImportError: pass +# Cannot use sys.version.major and minor names, because in Python 2.6 it's not +# a namedtuple. is_py3 = sys.version_info[0] >= 3 -is_py33 = is_py3 and sys.version_info.minor >= 3 +is_py33 = is_py3 and sys.version_info[1] >= 3 +is_py34 = is_py3 and sys.version_info[1] >= 4 +is_py35 = is_py3 and sys.version_info[1] >= 5 is_py26 = not is_py3 and sys.version_info[1] < 7 +py_version = int(str(sys.version_info[0]) + str(sys.version_info[1])) -def find_module_py33(string, path=None): - loader = importlib.machinery.PathFinder.find_module(string, path) +class DummyFile(object): + def __init__(self, loader, string): + self.loader = loader + self.string = string + + def read(self): + return self.loader.get_source(self.string) + + def close(self): + del self.loader + + +def find_module_py34(string, path=None, fullname=None): + implicit_namespace_pkg = False + spec = None + loader = None + + spec = importlib.machinery.PathFinder.find_spec(string, path) + if hasattr(spec, 'origin'): + origin = spec.origin + implicit_namespace_pkg = origin == 'namespace' + + # We try to disambiguate implicit namespace pkgs with non implicit namespace pkgs + if implicit_namespace_pkg: + fullname = string if not path else fullname + implicit_ns_info = ImplicitNSInfo(fullname, spec.submodule_search_locations._path) + return None, implicit_ns_info, False + + # we have found the tail end of the dotted path + if hasattr(spec, 'loader'): + loader = spec.loader + return find_module_py33(string, path, loader) + +def find_module_py33(string, path=None, loader=None, fullname=None): + loader = loader or importlib.machinery.PathFinder.find_module(string, path) if loader is None and path is None: # Fallback to find builtins try: - loader = importlib.find_loader(string) + with warnings.catch_warnings(record=True): + # Mute "DeprecationWarning: Use importlib.util.find_spec() + # instead." While we should replace that in the future, it's + # probably good to wait until we deprecate Python 3.3, since + # it was added in Python 3.4 and find_loader hasn't been + # removed in 3.6. + loader = importlib.find_loader(string) except ValueError as e: # See #491. Importlib might raise a ValueError, to avoid this, we # just raise an ImportError to fix the issue. - raise ImportError("Originally ValueError: " + e.message) + raise ImportError("Originally " + repr(e)) if loader is None: raise ImportError("Couldn't find a loader for {0}".format(string)) @@ -33,33 +79,77 @@ def find_module_py33(string, path=None): try: is_package = loader.is_package(string) if is_package: - module_path = os.path.dirname(loader.path) - module_file = None + if hasattr(loader, 'path'): + module_path = os.path.dirname(loader.path) + else: + # At least zipimporter does not have path attribute + module_path = os.path.dirname(loader.get_filename(string)) + if hasattr(loader, 'archive'): + module_file = DummyFile(loader, string) + else: + module_file = None else: module_path = loader.get_filename(string) - module_file = open(module_path, 'rb') + module_file = DummyFile(loader, string) except AttributeError: # ExtensionLoader has not attribute get_filename, instead it has a # path attribute that we can use to retrieve the module path try: module_path = loader.path - module_file = open(loader.path, 'rb') + module_file = DummyFile(loader, string) except AttributeError: module_path = string module_file = None finally: is_package = False + if hasattr(loader, 'archive'): + module_path = loader.archive + return module_file, module_path, is_package -def find_module_pre_py33(string, path=None): - module_file, module_path, description = imp.find_module(string, path) - module_type = description[2] - return module_file, module_path, module_type is imp.PKG_DIRECTORY +def find_module_pre_py33(string, path=None, fullname=None): + try: + module_file, module_path, description = imp.find_module(string, path) + module_type = description[2] + return module_file, module_path, module_type is imp.PKG_DIRECTORY + except ImportError: + pass + + if path is None: + path = sys.path + for item in path: + loader = pkgutil.get_importer(item) + if loader: + try: + loader = loader.find_module(string) + if loader: + is_package = loader.is_package(string) + is_archive = hasattr(loader, 'archive') + try: + module_path = loader.get_filename(string) + except AttributeError: + # fallback for py26 + try: + module_path = loader._get_filename(string) + except AttributeError: + continue + if is_package: + module_path = os.path.dirname(module_path) + if is_archive: + module_path = loader.archive + file = None + if not is_package or is_archive: + file = DummyFile(loader, string) + return (file, module_path, is_package) + except ImportError: + pass + raise ImportError("No module named {0}".format(string)) find_module = find_module_py33 if is_py33 else find_module_pre_py33 +find_module = find_module_py34 if is_py34 else find_module find_module.__doc__ = """ Provides information about a module. @@ -71,28 +161,18 @@ def find_module_pre_py33(string, path=None): """ +class ImplicitNSInfo(object): + """Stores information returned from an implicit namespace spec""" + def __init__(self, name, paths): + self.name = name + self.paths = paths + # unicode function try: unicode = unicode except NameError: unicode = str -if is_py3: - u = lambda s: s -else: - u = lambda s: s.decode('utf-8') - -u.__doc__ = """ -Decode a raw string into unicode object. Do nothing in Python 3. -""" - -# exec function -if is_py3: - def exec_function(source, global_map): - exec(source, global_map) -else: - eval(compile("""def exec_function(source, global_map): - exec source in global_map """, 'blub', 'exec')) # re-raise function if is_py3: @@ -147,7 +227,8 @@ def u(string): """ if is_py3: return str(string) - elif not isinstance(string, unicode): + + if not isinstance(string, unicode): return unicode(str(string), 'UTF-8') return string @@ -174,6 +255,11 @@ def literal_eval(string): except ImportError: from itertools import izip_longest as zip_longest # Python 2 +try: + FileNotFoundError = FileNotFoundError +except NameError: + FileNotFoundError = IOError + def no_unicode_pprint(dct): """ diff --git a/pythonFiles/release/jedi/api/__init__.py b/pythonFiles/release/jedi/api/__init__.py index 5c710623f86d..871dc84b78e3 100755 --- a/pythonFiles/release/jedi/api/__init__.py +++ b/pythonFiles/release/jedi/api/__init__.py @@ -3,51 +3,43 @@ use its methods. Additionally you can add a debug function with :func:`set_debug_function`. +Alternatively, if you don't need a custom function and are happy with printing +debug messages to stdout, simply call :func:`set_debug_function` without +arguments. .. warning:: Please, note that Jedi is **not thread safe**. """ -import re import os -import warnings import sys -from itertools import chain -from jedi._compatibility import unicode, builtins -from jedi.parser import Parser, load_grammar -from jedi.parser.tokenize import source_tokens -from jedi.parser import tree -from jedi.parser.user_context import UserContext, UserContextParser +import parso +from parso.python import tree +from parso import python_bytes_to_unicode, split_lines + +from jedi.parser_utils import get_executable_nodes, get_statement_of_position from jedi import debug from jedi import settings -from jedi import common from jedi import cache -from jedi.api import keywords from jedi.api import classes from jedi.api import interpreter -from jedi.api import usages from jedi.api import helpers +from jedi.api.completion import Completion from jedi.evaluate import Evaluator -from jedi.evaluate import representation as er -from jedi.evaluate import compiled from jedi.evaluate import imports -from jedi.evaluate.cache import memoize_default -from jedi.evaluate.helpers import FakeName, get_module_names -from jedi.evaluate.finder import global_names_dict_generator, filter_definition_names -from jedi.evaluate import analysis +from jedi.evaluate import usages +from jedi.evaluate.project import Project +from jedi.evaluate.arguments import try_iter_content +from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf +from jedi.evaluate.sys_path import dotted_path_in_sys_path +from jedi.evaluate.filters import TreeNameDefinition +from jedi.evaluate.syntax_tree import tree_name_to_contexts +from jedi.evaluate.context import ModuleContext +from jedi.evaluate.context.module import ModuleName +from jedi.evaluate.context.iterable import unpack_tuple_to_dict # Jedi uses lots and lots of recursion. By setting this a little bit higher, we # can remove some "maximum recursion depth" errors. -sys.setrecursionlimit(2000) - - -class NotFoundError(Exception): - """A custom error to avoid catching the wrong exceptions. - - .. deprecated:: 0.9.0 - Not in use anymore, Jedi just returns no goto result if you're not on a - valid name. - .. todo:: Remove! - """ +sys.setrecursionlimit(3000) class Script(object): @@ -58,12 +50,24 @@ class Script(object): You can either use the ``source`` parameter or ``path`` to read a file. Usually you're going to want to use both of them (in an editor). + The script might be analyzed in a different ``sys.path`` than |jedi|: + + - if `sys_path` parameter is not ``None``, it will be used as ``sys.path`` + for the script; + + - if `sys_path` parameter is ``None`` and ``VIRTUAL_ENV`` environment + variable is defined, ``sys.path`` for the specified environment will be + guessed (see :func:`jedi.evaluate.sys_path.get_venv_path`) and used for + the script; + + - otherwise ``sys.path`` will match that of |jedi|. + :param source: The source code of the current file, separated by newlines. :type source: str :param line: The line to perform actions on (starting with 1). :type line: int - :param col: The column of the cursor (starting with 0). - :type col: int + :param column: The column of the cursor (starting with 0). + :type column: int :param path: The path of the file in the file system, or ``''`` if it hasn't been saved yet. :type path: str or None @@ -73,62 +77,67 @@ class Script(object): :param source_encoding: The encoding of ``source``, if it is not a ``unicode`` object (default ``'utf-8'``). :type encoding: str + :param sys_path: ``sys.path`` to use during analysis of the script + :type sys_path: list + """ def __init__(self, source=None, line=None, column=None, path=None, - encoding='utf-8', source_path=None, source_encoding=None): - if source_path is not None: - warnings.warn("Use path instead of source_path.", DeprecationWarning) - path = source_path - if source_encoding is not None: - warnings.warn("Use encoding instead of source_encoding.", DeprecationWarning) - encoding = source_encoding - + encoding='utf-8', sys_path=None): self._orig_path = path - self.path = None if path is None else os.path.abspath(path) + # An empty path (also empty string) should always result in no path. + self.path = os.path.abspath(path) if path else None if source is None: - try: - with open(path) as f: - source = f.read() - except UnicodeDecodeError: - with open(path, encoding=encoding) as f: - source = f.read() - - self.source = common.source_to_unicode(source, encoding) - lines = common.splitlines(self.source) - line = max(len(lines), 1) if line is None else line - if not (0 < line <= len(lines)): + # TODO add a better warning than the traceback! + with open(path, 'rb') as f: + source = f.read() + + # TODO do we really want that? + self._source = python_bytes_to_unicode(source, encoding, errors='replace') + self._code_lines = split_lines(self._source) + line = max(len(self._code_lines), 1) if line is None else line + if not (0 < line <= len(self._code_lines)): raise ValueError('`line` parameter is not in a valid range.') - line_len = len(lines[line - 1]) + line_len = len(self._code_lines[line - 1]) column = line_len if column is None else column if not (0 <= column <= line_len): raise ValueError('`column` parameter is not in a valid range.') self._pos = line, column + self._path = path cache.clear_time_caches() debug.reset_time() - self._grammar = load_grammar('grammar%s.%s' % sys.version_info[:2]) - self._user_context = UserContext(self.source, self._pos) - self._parser = UserContextParser(self._grammar, self.source, path, - self._pos, self._user_context, - self._parsed_callback) - self._evaluator = Evaluator(self._grammar) - debug.speed('init') - def _parsed_callback(self, parser): - module = self._evaluator.wrap(parser.module) - imports.add_module(self._evaluator, unicode(module.name), module) + # Load the Python grammar of the current interpreter. + self._grammar = parso.load_grammar() + project = Project(sys_path=sys_path) + self._evaluator = Evaluator(self._grammar, project) + project.add_script_path(self.path) + debug.speed('init') - @property - def source_path(self): - """ - .. deprecated:: 0.7.0 - Use :attr:`.path` instead. - .. todo:: Remove! - """ - warnings.warn("Use path instead of source_path.", DeprecationWarning) - return self.path + @cache.memoize_method + def _get_module_node(self): + return self._grammar.parse( + code=self._source, + path=self.path, + cache=False, # No disk cache, because the current script often changes. + diff_cache=True, + cache_path=settings.cache_directory + ) + + @cache.memoize_method + def _get_module(self): + module = ModuleContext( + self._evaluator, + self._get_module_node(), + self.path + ) + if self.path is not None: + name = dotted_path_in_sys_path(self._evaluator.project.sys_path, self.path) + if name is not None: + imports.add_module(self._evaluator, name, module) + return module def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, repr(self._orig_path)) @@ -141,187 +150,14 @@ def completions(self): :return: Completion objects, sorted by name and __ comes last. :rtype: list of :class:`classes.Completion` """ - def get_completions(user_stmt, bs): - # TODO this closure is ugly. it also doesn't work with - # simple_complete (used for Interpreter), somehow redo. - module = self._evaluator.wrap(self._parser.module()) - names, level, only_modules, unfinished_dotted = \ - helpers.check_error_statements(module, self._pos) - completion_names = [] - if names is not None: - imp_names = tuple(str(n) for n in names if n.end_pos < self._pos) - i = imports.Importer(self._evaluator, imp_names, module, level) - completion_names = i.completion_names(self._evaluator, only_modules) - - # TODO this paragraph is necessary, but not sure it works. - context = self._user_context.get_context() - if not next(context).startswith('.'): # skip the path - if next(context) == 'from': - # completion is just "import" if before stands from .. - if unfinished_dotted: - return completion_names - else: - return keywords.keyword_names('import') - - if isinstance(user_stmt, tree.Import): - module = self._parser.module() - completion_names += imports.completion_names(self._evaluator, - user_stmt, self._pos) - return completion_names - - if names is None and not isinstance(user_stmt, tree.Import): - if not path and not dot: - # add keywords - completion_names += keywords.keyword_names(all=True) - # TODO delete? We should search for valid parser - # transformations. - completion_names += self._simple_complete(path, dot, like) - return completion_names - debug.speed('completions start') - path = self._user_context.get_path_until_cursor() - # Dots following an int are not the start of a completion but a float - # literal. - if re.search(r'^\d\.$', path): - return [] - path, dot, like = helpers.completion_parts(path) - - user_stmt = self._parser.user_stmt_with_whitespace() - - b = compiled.builtin - completion_names = get_completions(user_stmt, b) - - if not dot: - # add named params - for call_sig in self.call_signatures(): - # Allow protected access, because it's a public API. - module = call_sig._name.get_parent_until() - # Compiled modules typically don't allow keyword arguments. - if not isinstance(module, compiled.CompiledObject): - for p in call_sig.params: - # Allow access on _definition here, because it's a - # public API and we don't want to make the internal - # Name object public. - if p._definition.stars == 0: # no *args/**kwargs - completion_names.append(p._name) - - needs_dot = not dot and path - - comps = [] - comp_dct = {} - for c in set(completion_names): - n = str(c) - if settings.case_insensitive_completion \ - and n.lower().startswith(like.lower()) \ - or n.startswith(like): - if isinstance(c.parent, (tree.Function, tree.Class)): - # TODO I think this is a hack. It should be an - # er.Function/er.Class before that. - c = self._evaluator.wrap(c.parent).name - new = classes.Completion(self._evaluator, c, needs_dot, len(like)) - k = (new.name, new.complete) # key - if k in comp_dct and settings.no_completion_duplicates: - comp_dct[k]._same_name_completions.append(new) - else: - comp_dct[k] = new - comps.append(new) - + completion = Completion( + self._evaluator, self._get_module(), self._code_lines, + self._pos, self.call_signatures + ) + completions = completion.completions() debug.speed('completions end') - - return sorted(comps, key=lambda x: (x.name.startswith('__'), - x.name.startswith('_'), - x.name.lower())) - - def _simple_complete(self, path, dot, like): - if not path and not dot: - scope = self._parser.user_scope() - if not scope.is_scope(): # Might be a flow (if/while/etc). - scope = scope.get_parent_scope() - names_dicts = global_names_dict_generator( - self._evaluator, - self._evaluator.wrap(scope), - self._pos - ) - completion_names = [] - for names_dict, pos in names_dicts: - names = list(chain.from_iterable(names_dict.values())) - if not names: - continue - completion_names += filter_definition_names(names, self._parser.user_stmt(), pos) - elif self._get_under_cursor_stmt(path) is None: - return [] - else: - scopes = list(self._prepare_goto(path, True)) - completion_names = [] - debug.dbg('possible completion scopes: %s', scopes) - for s in scopes: - names = [] - for names_dict in s.names_dicts(search_global=False): - names += chain.from_iterable(names_dict.values()) - - completion_names += filter_definition_names(names, self._parser.user_stmt()) - return completion_names - - def _prepare_goto(self, goto_path, is_completion=False): - """ - Base for completions/goto. Basically it returns the resolved scopes - under cursor. - """ - debug.dbg('start: %s in %s', goto_path, self._parser.user_scope()) - - user_stmt = self._parser.user_stmt_with_whitespace() - if not user_stmt and len(goto_path.split('\n')) > 1: - # If the user_stmt is not defined and the goto_path is multi line, - # something's strange. Most probably the backwards tokenizer - # matched to much. - return [] - - if isinstance(user_stmt, tree.Import): - i, _ = helpers.get_on_import_stmt(self._evaluator, self._user_context, - user_stmt, is_completion) - if i is None: - return [] - scopes = [i] - else: - # just parse one statement, take it and evaluate it - eval_stmt = self._get_under_cursor_stmt(goto_path) - if eval_stmt is None: - return [] - - module = self._evaluator.wrap(self._parser.module()) - names, level, _, _ = helpers.check_error_statements(module, self._pos) - if names: - names = [str(n) for n in names] - i = imports.Importer(self._evaluator, names, module, level) - return i.follow() - - scopes = self._evaluator.eval_element(eval_stmt) - - return scopes - - @memoize_default() - def _get_under_cursor_stmt(self, cursor_txt, start_pos=None): - tokenizer = source_tokens(cursor_txt) - r = Parser(self._grammar, cursor_txt, tokenizer=tokenizer) - try: - # Take the last statement available that is not an endmarker. - # And because it's a simple_stmt, we need to get the first child. - stmt = r.module.children[-2].children[0] - except (AttributeError, IndexError): - return None - - user_stmt = self._parser.user_stmt() - if user_stmt is None: - # Set the start_pos to a pseudo position, that doesn't exist but - # works perfectly well (for both completions in docstrings and - # statements). - pos = start_pos or self._pos - else: - pos = user_stmt.start_pos - - stmt.move(pos[0] - 1, pos[1]) # Moving the offset. - stmt.parent = self._parser.user_scope() - return stmt + return completions def goto_definitions(self): """ @@ -335,120 +171,59 @@ def goto_definitions(self): :rtype: list of :class:`classes.Definition` """ - def resolve_import_paths(scopes): - for s in scopes.copy(): - if isinstance(s, imports.ImportWrapper): - scopes.remove(s) - scopes.update(resolve_import_paths(set(s.follow()))) - return scopes - - goto_path = self._user_context.get_path_under_cursor() - context = self._user_context.get_context() - definitions = set() - if next(context) in ('class', 'def'): - definitions = set([self._evaluator.wrap(self._parser.user_scope())]) - else: - # Fetch definition of callee, if there's no path otherwise. - if not goto_path: - definitions = set(signature._definition - for signature in self.call_signatures()) - - if re.match('\w[\w\d_]*$', goto_path) and not definitions: - user_stmt = self._parser.user_stmt() - if user_stmt is not None and user_stmt.type == 'expr_stmt': - for name in user_stmt.get_defined_names(): - if name.start_pos <= self._pos <= name.end_pos: - # TODO scaning for a name and then using it should be - # the default. - definitions = set(self._evaluator.goto_definition(name)) - - if not definitions and goto_path: - definitions = set(self._prepare_goto(goto_path)) - - definitions = resolve_import_paths(definitions) + module_node = self._get_module_node() + leaf = module_node.get_name_of_position(self._pos) + if leaf is None: + leaf = module_node.get_leaf_for_position(self._pos) + if leaf is None: + return [] + + context = self._evaluator.create_context(self._get_module(), leaf) + definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf) + names = [s.name for s in definitions] defs = [classes.Definition(self._evaluator, name) for name in names] + # The additional set here allows the definitions to become unique in an + # API sense. In the internals we want to separate more things than in + # the API. return helpers.sorted_definitions(set(defs)) - def goto_assignments(self): + def goto_assignments(self, follow_imports=False): """ - Return the first definition found. Imports and statements aren't - followed. Multiple objects may be returned, because Python itself is a + Return the first definition found, while optionally following imports. + Multiple objects may be returned, because Python itself is a dynamic language, which means depending on an option you can have two different versions of a function. :rtype: list of :class:`classes.Definition` """ - results = self._goto() - d = [classes.Definition(self._evaluator, d) for d in set(results)] - return helpers.sorted_definitions(d) - - def _goto(self, add_import_name=False): - """ - Used for goto_assignments and usages. + def filter_follow_imports(names, check): + for name in names: + if check(name): + for result in filter_follow_imports(name.goto(), check): + yield result + else: + yield name - :param add_import_name: Add the the name (if import) to the result. - """ - def follow_inexistent_imports(defs): - """ Imports can be generated, e.g. following - `multiprocessing.dummy` generates an import dummy in the - multiprocessing module. The Import doesn't exist -> follow. - """ - definitions = set(defs) - for d in defs: - if isinstance(d.parent, tree.Import) \ - and d.start_pos == (0, 0): - i = imports.ImportWrapper(self._evaluator, d.parent).follow(is_goto=True) - definitions.remove(d) - definitions |= follow_inexistent_imports(i) - return definitions - - goto_path = self._user_context.get_path_under_cursor() - context = self._user_context.get_context() - user_stmt = self._parser.user_stmt() - user_scope = self._parser.user_scope() - - stmt = self._get_under_cursor_stmt(goto_path) - if stmt is None: + tree_name = self._get_module_node().get_name_of_position(self._pos) + if tree_name is None: return [] - - if user_scope is None: - last_name = None - else: - # Try to use the parser if possible. - last_name = user_scope.name_for_position(self._pos) - - if last_name is None: - last_name = stmt - while not isinstance(last_name, tree.Name): - try: - last_name = last_name.children[-1] - except AttributeError: - # Doesn't have a name in it. - return [] - - if next(context) in ('class', 'def'): - # The cursor is on a class/function name. - user_scope = self._parser.user_scope() - definitions = set([user_scope.name]) - elif isinstance(user_stmt, tree.Import): - s, name = helpers.get_on_import_stmt(self._evaluator, - self._user_context, user_stmt) - - definitions = self._evaluator.goto(name) + context = self._evaluator.create_context(self._get_module(), tree_name) + names = list(self._evaluator.goto(context, tree_name)) + + if follow_imports: + def check(name): + if isinstance(name, ModuleName): + return False + return name.api_type == 'module' else: - # The Evaluator.goto function checks for definitions, but since we - # use a reverse tokenizer, we have new name_part objects, so we - # have to check the user_stmt here for positions. - if isinstance(user_stmt, tree.ExprStmt) \ - and isinstance(last_name.parent, tree.ExprStmt): - for name in user_stmt.get_defined_names(): - if name.start_pos <= self._pos <= name.end_pos: - return [name] - - defs = self._evaluator.goto(last_name) - definitions = follow_inexistent_imports(defs) - return definitions + def check(name): + return isinstance(name, imports.SubModuleName) + + names = filter_follow_imports(names, check) + + defs = [classes.Definition(self._evaluator, d) for d in set(names)] + return helpers.sorted_definitions(defs) def usages(self, additional_module_paths=()): """ @@ -461,40 +236,15 @@ def usages(self, additional_module_paths=()): :rtype: list of :class:`classes.Definition` """ - temp, settings.dynamic_flow_information = \ - settings.dynamic_flow_information, False - try: - user_stmt = self._parser.user_stmt() - definitions = self._goto(add_import_name=True) - if not definitions and isinstance(user_stmt, tree.Import): - # For not defined imports (goto doesn't find something, we take - # the name as a definition. This is enough, because every name - # points to it. - name = user_stmt.name_for_position(self._pos) - if name is None: - # Must be syntax - return [] - definitions = [name] - - if not definitions: - # Without a definition for a name we cannot find references. - return [] - - if not isinstance(user_stmt, tree.Import): - # import case is looked at with add_import_name option - definitions = usages.usages_add_import_modules(self._evaluator, - definitions) - - module = set([d.get_parent_until() for d in definitions]) - module.add(self._parser.module()) - names = usages.usages(self._evaluator, definitions, module) + tree_name = self._get_module_node().get_name_of_position(self._pos) + if tree_name is None: + # Must be syntax + return [] - for d in set(definitions): - names.append(classes.Definition(self._evaluator, d)) - finally: - settings.dynamic_flow_information = temp + names = usages.usages(self._get_module(), tree_name) - return helpers.sorted_definitions(set(names)) + definitions = [classes.Definition(self._evaluator, n) for n in names] + return helpers.sorted_definitions(definitions) def call_signatures(self): """ @@ -508,50 +258,67 @@ def call_signatures(self): abs()# <-- cursor is here - This would return ``None``. + This would return an empty list.. :rtype: list of :class:`classes.CallSignature` """ - call_txt, call_index, key_name, start_pos = self._user_context.call_signature() - if call_txt is None: + call_signature_details = \ + helpers.get_call_signature_details(self._get_module_node(), self._pos) + if call_signature_details is None: return [] - stmt = self._get_under_cursor_stmt(call_txt, start_pos) - if stmt is None: - return [] - - with common.scale_speed_settings(settings.scale_call_signatures): - origins = cache.cache_call_signatures(self._evaluator, stmt, - self.source, self._pos) + context = self._evaluator.create_context( + self._get_module(), + call_signature_details.bracket_leaf + ) + definitions = helpers.cache_call_signatures( + self._evaluator, + context, + call_signature_details.bracket_leaf, + self._code_lines, + self._pos + ) debug.speed('func_call followed') - return [classes.CallSignature(self._evaluator, o.name, stmt, call_index, key_name) - for o in origins if hasattr(o, 'py__call__')] + return [classes.CallSignature(self._evaluator, d.name, + call_signature_details.bracket_leaf.start_pos, + call_signature_details.call_index, + call_signature_details.keyword_name_str) + for d in definitions if hasattr(d, 'py__call__')] def _analysis(self): - def check_types(types): - for typ in types: - try: - f = typ.iter_content - except AttributeError: - pass + self._evaluator.is_analysis = True + module_node = self._get_module_node() + self._evaluator.analysis_modules = [module_node] + try: + for node in get_executable_nodes(module_node): + context = self._get_module().create_context(node) + if node.type in ('funcdef', 'classdef'): + # Resolve the decorators. + tree_name_to_contexts(self._evaluator, context, node.children[1]) + elif isinstance(node, tree.Import): + import_names = set(node.get_defined_names()) + if node.is_nested(): + import_names |= set(path[-1] for path in node.get_paths()) + for n in import_names: + imports.infer_import(context, n) + elif node.type == 'expr_stmt': + types = context.eval_node(node) + for testlist in node.children[:-1:2]: + # Iterate tuples. + unpack_tuple_to_dict(context, types, testlist) else: - check_types(f()) - - #statements = set(chain(*self._parser.module().used_names.values())) - nodes, imp_names, decorated_funcs = \ - analysis.get_module_statements(self._parser.module()) - # Sort the statements so that the results are reproducible. - for n in imp_names: - imports.ImportWrapper(self._evaluator, n).follow() - for node in sorted(nodes, key=lambda obj: obj.start_pos): - check_types(self._evaluator.eval_element(node)) - - for dec_func in decorated_funcs: - er.Function(self._evaluator, dec_func).get_decorated_func() + if node.type == 'name': + defs = self._evaluator.goto_definitions(context, node) + else: + defs = evaluate_call_of_leaf(context, node) + try_iter_content(defs) + self._evaluator.reset_recursion_limitations() - ana = [a for a in self._evaluator.analysis if self.path == a.path] - return sorted(set(ana), key=lambda x: x.line) + ana = [a for a in self._evaluator.analysis if self.path == a.path] + return sorted(set(ana), key=lambda x: x.line) + finally: + self._evaluator.is_analysis = False class Interpreter(Script): @@ -565,7 +332,7 @@ class Interpreter(Script): >>> from os.path import join >>> namespace = locals() - >>> script = Interpreter('join().up', [namespace]) + >>> script = Interpreter('join("").up', [namespace]) >>> print(script.completions()[0].name) upper """ @@ -584,81 +351,22 @@ def __init__(self, source, namespaces, **kwds): If `line` and `column` are None, they are assumed be at the end of `source`. """ - if type(namespaces) is not list or len(namespaces) == 0 or \ - any([type(x) is not dict for x in namespaces]): - raise TypeError("namespaces must be a non-empty list of dict") + try: + namespaces = [dict(n) for n in namespaces] + except Exception: + raise TypeError("namespaces must be a non-empty list of dicts.") super(Interpreter, self).__init__(source, **kwds) self.namespaces = namespaces - # Don't use the fast parser, because it does crazy stuff that we don't - # need in our very simple and small code here (that is always - # changing). - self._parser = UserContextParser(self._grammar, self.source, - self._orig_path, self._pos, - self._user_context, self._parsed_callback, - use_fast_parser=False) - interpreter.add_namespaces_to_parser(self._evaluator, namespaces, - self._parser.module()) - - def _simple_complete(self, path, dot, like): - user_stmt = self._parser.user_stmt_with_whitespace() - is_simple_path = not path or re.search('^[\w][\w\d.]*$', path) - if isinstance(user_stmt, tree.Import) or not is_simple_path: - return super(Interpreter, self)._simple_complete(path, dot, like) - else: - class NamespaceModule(object): - def __getattr__(_, name): - for n in self.namespaces: - try: - return n[name] - except KeyError: - pass - raise AttributeError() - - def __dir__(_): - gen = (n.keys() for n in self.namespaces) - return list(set(chain.from_iterable(gen))) - - paths = path.split('.') if path else [] - - namespaces = (NamespaceModule(), builtins) - for p in paths: - old, namespaces = namespaces, [] - for n in old: - try: - namespaces.append(getattr(n, p)) - except Exception: - pass - - completion_names = [] - for namespace in namespaces: - for name in dir(namespace): - if name.lower().startswith(like.lower()): - scope = self._parser.module() - n = FakeName(name, scope) - completion_names.append(n) - return completion_names - - -def defined_names(source, path=None, encoding='utf-8'): - """ - Get all definitions in `source` sorted by its position. - - This functions can be used for listing functions, classes and - data defined in a file. This can be useful if you want to list - them in "sidebar". Each element in the returned list also has - `defined_names` method which can be used to get sub-definitions - (e.g., methods in class). - - :rtype: list of classes.Definition - - .. deprecated:: 0.9.0 - Use :func:`names` instead. - .. todo:: Remove! - """ - warnings.warn("Use call_signatures instead.", DeprecationWarning) - return names(source, path, encoding) + def _get_module(self): + parser_module = super(Interpreter, self)._get_module_node() + return interpreter.MixedModuleContext( + self._evaluator, + parser_module, + self.namespaces, + path=self.path + ) def names(source=None, path=None, encoding='utf-8', all_scopes=False, @@ -678,13 +386,21 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False, ``definitions=True``. E.g. ``a = b`` returns ``b``. """ def def_ref_filter(_def): - is_def = _def.is_definition() + is_def = _def._name.tree_name.is_definition() return definitions and is_def or references and not is_def # Set line/column to a random position, because they don't matter. script = Script(source, line=1, column=0, path=path, encoding=encoding) - defs = [classes.Definition(script._evaluator, name_part) - for name_part in get_module_names(script._parser.module(), all_scopes)] + module_context = script._get_module() + defs = [ + classes.Definition( + script._evaluator, + TreeNameDefinition( + module_context.create_context(name if name.parent.type == 'file_input' else name.parent), + name + ) + ) for name in get_module_names(script._get_module_node(), all_scopes) + ] return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column)) @@ -705,6 +421,8 @@ def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, """ Define a callback debug function to get all the debug messages. + If you don't specify any arguments, debug messages will be printed to stdout. + :param func_cb: The callback function for debug messages, with n params. """ debug.debug_function = func_cb diff --git a/pythonFiles/release/jedi/api/classes.py b/pythonFiles/release/jedi/api/classes.py index a1d42bd0ffe2..1937cc6065a4 100755 --- a/pythonFiles/release/jedi/api/classes.py +++ b/pythonFiles/release/jedi/api/classes.py @@ -3,34 +3,38 @@ These classes are the much bigger part of the whole API, because they contain the interesting information about completion and goto operations. """ -import warnings -from itertools import chain import re -from jedi._compatibility import unicode, use_metaclass +from parso.cache import parser_cache +from parso.python.tree import search_ancestor + +from jedi._compatibility import u from jedi import settings -from jedi import common -from jedi.parser import tree -from jedi.evaluate.cache import memoize_default, CachedMetaClass -from jedi.evaluate import representation as er -from jedi.evaluate import iterable +from jedi.evaluate.utils import ignored, unite +from jedi.cache import memoize_method from jedi.evaluate import imports from jedi.evaluate import compiled -from jedi.api import keywords -from jedi.evaluate.finder import filter_definition_names +from jedi.evaluate.filters import ParamName +from jedi.evaluate.imports import ImportName +from jedi.evaluate.context import instance +from jedi.evaluate.context import ClassContext, FunctionContext, FunctionExecutionContext +from jedi.api.keywords import KeywordName + +def _sort_names_by_start_pos(names): + return sorted(names, key=lambda s: s.start_pos or (0, 0)) -def defined_names(evaluator, scope): + +def defined_names(evaluator, context): """ List sub-definitions (e.g., methods in class). :type scope: Scope :rtype: list of Definition """ - dct = scope.names_dict - names = list(chain.from_iterable(dct.values())) - names = filter_definition_names(names, scope) - return [Definition(evaluator, d) for d in sorted(names, key=lambda s: s.start_pos)] + filter = next(context.get_filters(search_global=True)) + names = [name for name in filter.values()] + return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)] class BaseDefinition(object): @@ -51,25 +55,22 @@ class BaseDefinition(object): _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { 'argparse._ActionsContainer': 'argparse.ArgumentParser', - '_sre.SRE_Match': 're.MatchObject', - '_sre.SRE_Pattern': 're.RegexObject', }.items()) def __init__(self, evaluator, name): self._evaluator = evaluator self._name = name """ - An instance of :class:`jedi.parser.reprsentation.Name` subclass. + An instance of :class:`parso.reprsentation.Name` subclass. """ - self._definition = evaluator.wrap(self._name.get_definition()) - self.is_keyword = isinstance(self._definition, keywords.Keyword) + self.is_keyword = isinstance(self._name, KeywordName) # generate a path to the definition - self._module = name.get_parent_until() + self._module = name.get_root_context() if self.in_builtin_module(): self.module_path = None else: - self.module_path = self._module.path + self.module_path = self._module.py__file__() """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``""" @property @@ -81,17 +82,7 @@ def name(self): :rtype: str or None """ - return unicode(self._name) - - @property - def start_pos(self): - """ - .. deprecated:: 0.7.0 - Use :attr:`.line` and :attr:`.column` instead. - .. todo:: Remove! - """ - warnings.warn("Use line/column instead.", DeprecationWarning) - return self._name.start_pos + return self._name.string_name @property def type(self): @@ -130,7 +121,7 @@ def type(self): >>> defs = sorted(defs, key=lambda d: d.line) >>> defs # doctest: +NORMALIZE_WHITESPACE [, , - , ] + , ] Finally, here is what you can get from :attr:`type`: @@ -144,45 +135,59 @@ def type(self): 'function' """ - stripped = self._definition - if isinstance(stripped, er.InstanceElement): - stripped = stripped.var + tree_name = self._name.tree_name + resolve = False + if tree_name is not None: + # TODO move this to their respective names. + definition = tree_name.get_definition() + if definition is not None and definition.type == 'import_from' and \ + tree_name.is_definition(): + resolve = True - if isinstance(stripped, compiled.CompiledObject): - return stripped.api_type() - elif isinstance(stripped, iterable.Array): - return 'instance' - elif isinstance(stripped, tree.Import): - return 'import' - - string = type(stripped).__name__.lower().replace('wrapper', '') - if string == 'exprstmt': - return 'statement' - else: - return string + if isinstance(self._name, imports.SubModuleName) or resolve: + for context in self._name.infer(): + return context.api_type + return self._name.api_type def _path(self): """The path to a module/class/function definition.""" - path = [] - par = self._definition - while par is not None: - if isinstance(par, tree.Import): - path += imports.ImportWrapper(self._evaluator, self._name).import_path - break - try: - name = par.name - except AttributeError: - pass + def to_reverse(): + name = self._name + if name.api_type == 'module': + try: + name = list(name.infer())[0].name + except IndexError: + pass + + if name.api_type == 'module': + module_contexts = name.infer() + if module_contexts: + module_context, = module_contexts + for n in reversed(module_context.py__name__().split('.')): + yield n + else: + # We don't really know anything about the path here. This + # module is just an import that would lead in an + # ImportError. So simply return the name. + yield name.string_name + return else: - if isinstance(par, er.ModuleWrapper): - # TODO just make the path dotted from the beginning, we - # shouldn't really split here. - path[0:0] = par.py__name__().split('.') - break + yield name.string_name + + parent_context = name.parent_context + while parent_context is not None: + try: + method = parent_context.py__name__ + except AttributeError: + try: + yield parent_context.name.string_name + except AttributeError: + pass else: - path.insert(0, unicode(name)) - par = par.parent - return path + for name in reversed(method().split('.')): + yield name + parent_context = parent_context.parent_context + return reversed(list(to_reverse())) @property def module_name(self): @@ -196,7 +201,7 @@ def module_name(self): >>> print(d.module_name) # doctest: +ELLIPSIS json """ - return str(self._module.name) + return self._module.name.string_name def in_builtin_module(self): """Whether this is a builtin module.""" @@ -205,18 +210,20 @@ def in_builtin_module(self): @property def line(self): """The line where the definition occurs (starting with 1).""" - if self.in_builtin_module(): + start_pos = self._name.start_pos + if start_pos is None: return None - return self._name.start_pos[0] + return start_pos[0] @property def column(self): """The column where the definition occurs (starting with 0).""" - if self.in_builtin_module(): + start_pos = self._name.start_pos + if start_pos is None: return None - return self._name.start_pos[1] + return start_pos[1] - def docstring(self, raw=False): + def docstring(self, raw=False, fast=True): r""" Return a document string for this completion object. @@ -241,36 +248,18 @@ def docstring(self, raw=False): >>> print(script.goto_definitions()[0].docstring(raw=True)) Document for function f. + :param fast: Don't follow imports that are only one level deep like + ``import foo``, but follow ``from foo import bar``. This makes + sense for speed reasons. Completing `import a` is slow if you use + the ``foo.docstring(fast=False)`` on every object, because it + parses all libraries starting with ``a``. """ - if raw: - return _Help(self._definition).raw() - else: - return _Help(self._definition).full() - - @property - def doc(self): - """ - .. deprecated:: 0.8.0 - Use :meth:`.docstring` instead. - .. todo:: Remove! - """ - warnings.warn("Use docstring() instead.", DeprecationWarning) - return self.docstring() - - @property - def raw_doc(self): - """ - .. deprecated:: 0.8.0 - Use :meth:`.docstring` instead. - .. todo:: Remove! - """ - warnings.warn("Use docstring() instead.", DeprecationWarning) - return self.docstring(raw=True) + return _Help(self._name).docstring(fast=fast, raw=raw) @property def description(self): """A textual description of the object.""" - return unicode(self._name) + return u(self._name.string_name) @property def full_name(self): @@ -291,16 +280,17 @@ def full_name(self): >>> print(script.goto_definitions()[0].full_name) os.path.join - Notice that it correctly returns ``'os.path.join'`` instead of - (for example) ``'posixpath.join'``. - + Notice that it returns ``'os.path.join'`` instead of (for example) + ``'posixpath.join'``. This is not correct, since the modules name would + be `````. However most users find the latter + more practical. """ - path = [unicode(p) for p in self._path()] + path = list(self._path()) # TODO add further checks, the mapping should only occur on stdlib. if not path: return None # for keywords the path is empty - with common.ignored(KeyError): + with ignored(KeyError): path[0] = self._mapping[path[0]] for key, repl in self._tuple_mapping.items(): if tuple(path[:len(key)]) == key: @@ -309,89 +299,122 @@ def full_name(self): return '.'.join(path if path[0] else path[1:]) def goto_assignments(self): - defs = self._evaluator.goto(self._name) - return [Definition(self._evaluator, d) for d in defs] + if self._name.tree_name is None: + return self - @memoize_default() - def _follow_statements_imports(self): - """ - Follow both statements and imports, as far as possible. - """ - if self._definition.isinstance(tree.ExprStmt): - return self._evaluator.eval_statement(self._definition) - elif self._definition.isinstance(tree.Import): - return imports.ImportWrapper(self._evaluator, self._name).follow() - else: - return [self._definition] + names = self._evaluator.goto(self._name.parent_context, self._name.tree_name) + return [Definition(self._evaluator, n) for n in names] + + def _goto_definitions(self): + # TODO make this function public. + return [Definition(self._evaluator, d.name) for d in self._name.infer()] @property - @memoize_default() + @memoize_method def params(self): """ Raises an ``AttributeError``if the definition is not callable. Otherwise returns a list of `Definition` that represents the params. """ - followed = self._follow_statements_imports() + def get_param_names(context): + param_names = [] + if context.api_type == 'function': + param_names = list(context.get_param_names()) + if isinstance(context, instance.BoundMethod): + param_names = param_names[1:] + elif isinstance(context, (instance.AbstractInstanceContext, ClassContext)): + if isinstance(context, ClassContext): + search = '__init__' + else: + search = '__call__' + names = context.get_function_slot_names(search) + if not names: + return [] + + # Just take the first one here, not optimal, but currently + # there's no better solution. + inferred = names[0].infer() + param_names = get_param_names(next(iter(inferred))) + if isinstance(context, ClassContext): + param_names = param_names[1:] + return param_names + elif isinstance(context, compiled.CompiledObject): + return list(context.get_param_names()) + return param_names + + followed = list(self._name.infer()) if not followed or not hasattr(followed[0], 'py__call__'): raise AttributeError() - followed = followed[0] # only check the first one. + context = followed[0] # only check the first one. - if followed.type == 'funcdef': - if isinstance(followed, er.InstanceElement): - params = followed.params[1:] - else: - params = followed.params - elif followed.isinstance(er.compiled.CompiledObject): - params = followed.params - else: - try: - sub = followed.get_subscope_by_name('__init__') - params = sub.params[1:] # ignore self - except KeyError: - return [] - return [_Param(self._evaluator, p.name) for p in params] + return [Definition(self._evaluator, n) for n in get_param_names(context)] def parent(self): - scope = self._definition.get_parent_scope() - scope = self._evaluator.wrap(scope) - return Definition(self._evaluator, scope.name) + context = self._name.parent_context + if context is None: + return None + + if isinstance(context, FunctionExecutionContext): + # TODO the function context should be a part of the function + # execution context. + context = FunctionContext( + self._evaluator, context.parent_context, context.tree_node) + return Definition(self._evaluator, context.name) def __repr__(self): return "<%s %s>" % (type(self).__name__, self.description) + def get_line_code(self, before=0, after=0): + """ + Returns the line of code where this object was defined. + + :param before: Add n lines before the current line to the output. + :param after: Add n lines after the current line to the output. + + :return str: Returns the line(s) of code or an empty string if it's a + builtin. + """ + if self.in_builtin_module(): + return '' + + path = self._name.get_root_context().py__file__() + lines = parser_cache[self._evaluator.grammar._hashed][path].lines + + index = self._name.start_pos[0] - 1 + start_index = max(index - before, 0) + return ''.join(lines[start_index:index + after + 1]) + class Completion(BaseDefinition): """ `Completion` objects are returned from :meth:`api.Script.completions`. They provide additional information about a completion. """ - def __init__(self, evaluator, name, needs_dot, like_name_length): + def __init__(self, evaluator, name, stack, like_name_length): super(Completion, self).__init__(evaluator, name) - self._needs_dot = needs_dot self._like_name_length = like_name_length + self._stack = stack # Completion objects with the same Completion name (which means # duplicate items in the completion) self._same_name_completions = [] def _complete(self, like_name): - dot = '.' if self._needs_dot else '' append = '' if settings.add_bracket_after_function \ and self.type == 'Function': append = '(' - if settings.add_dot_after_module: - if isinstance(self._definition, tree.Module): - append += '.' - if isinstance(self._definition, tree.Param): - append += '=' + if isinstance(self._name, ParamName) and self._stack is not None: + node_names = list(self._stack.get_node_names(self._evaluator.grammar._pgen_grammar)) + if 'trailer' in node_names and 'argument' not in node_names: + append += '=' - name = str(self._name) + name = self._name.string_name if like_name: name = name[self._like_name_length:] - return dot + name + append + return name + append @property def complete(self): @@ -402,90 +425,51 @@ def complete(self): would return the string 'ce'. It also adds additional stuff, depending on your `settings.py`. + + Assuming the following function definition:: + + def foo(param=0): + pass + + completing ``foo(par`` would give a ``Completion`` which `complete` + would be `am=` + + """ return self._complete(True) @property def name_with_symbols(self): """ - Similar to :attr:`name`, but like :attr:`name` - returns also the symbols, for example:: + Similar to :attr:`name`, but like :attr:`name` returns also the + symbols, for example assuming the following function definition:: - list() + def foo(param=0): + pass + + completing ``foo(`` would give a ``Completion`` which + ``name_with_symbols`` would be "param=". - would return ``.append`` and others (which means it adds a dot). """ return self._complete(False) + def docstring(self, raw=False, fast=True): + if self._like_name_length >= 3: + # In this case we can just resolve the like name, because we + # wouldn't load like > 100 Python modules anymore. + fast = False + return super(Completion, self).docstring(raw=raw, fast=fast) + @property def description(self): """Provide a description of the completion object.""" - if self._definition is None: - return '' - t = self.type - if t == 'statement' or t == 'import': - desc = self._definition.get_code() - else: - desc = '.'.join(unicode(p) for p in self._path()) - - line = '' if self.in_builtin_module else '@%s' % self.line - return '%s: %s%s' % (t, desc, line) + # TODO improve the class structure. + return Definition.description.__get__(self) def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self._name) + return '<%s: %s>' % (type(self).__name__, self._name.string_name) - def docstring(self, raw=False, fast=True): - """ - :param fast: Don't follow imports that are only one level deep like - ``import foo``, but follow ``from foo import bar``. This makes - sense for speed reasons. Completing `import a` is slow if you use - the ``foo.docstring(fast=False)`` on every object, because it - parses all libraries starting with ``a``. - """ - definition = self._definition - if isinstance(definition, tree.Import): - i = imports.ImportWrapper(self._evaluator, self._name) - if len(i.import_path) > 1 or not fast: - followed = self._follow_statements_imports() - if followed: - # TODO: Use all of the followed objects as input to Documentation. - definition = followed[0] - - if raw: - return _Help(definition).raw() - else: - return _Help(definition).full() - - @property - def type(self): - """ - The type of the completion objects. Follows imports. For a further - description, look at :attr:`jedi.api.classes.BaseDefinition.type`. - """ - if isinstance(self._definition, tree.Import): - i = imports.ImportWrapper(self._evaluator, self._name) - if len(i.import_path) <= 1: - return 'module' - - followed = self.follow_definition() - if followed: - # Caveat: Only follows the first one, ignore the other ones. - # This is ok, since people are almost never interested in - # variations. - return followed[0].type - return super(Completion, self).type - - @memoize_default() - def _follow_statements_imports(self): - # imports completion is very complicated and needs to be treated - # separately in Completion. - definition = self._definition - if definition.isinstance(tree.Import): - i = imports.ImportWrapper(self._evaluator, self._name) - return i.follow() - return super(Completion, self)._follow_statements_imports() - - @memoize_default() + @memoize_method def follow_definition(self): """ Return the original definitions. I strongly recommend not using it for @@ -495,11 +479,11 @@ def follow_definition(self): follows all results. This means with 1000 completions (e.g. numpy), it's just PITA-slow. """ - defs = self._follow_statements_imports() + defs = self._name.infer() return [Definition(self._evaluator, d.name) for d in defs] -class Definition(use_metaclass(CachedMetaClass, BaseDefinition)): +class Definition(BaseDefinition): """ *Definition* objects are returned from :meth:`api.Script.goto_assignments` or :meth:`api.Script.goto_definitions`. @@ -535,45 +519,30 @@ def description(self): 'class C' """ - d = self._definition - if isinstance(d, er.InstanceElement): - d = d.var - - if isinstance(d, compiled.CompiledObject): - typ = d.api_type() - if typ == 'instance': - typ = 'class' # The description should be similar to Py objects. - d = typ + ' ' + d.name.get_code() - elif isinstance(d, iterable.Array): - d = 'class ' + d.type - elif isinstance(d, (tree.Class, er.Class, er.Instance)): - d = 'class ' + unicode(d.name) - elif isinstance(d, (er.Function, tree.Function)): - d = 'def ' + unicode(d.name) - elif isinstance(d, tree.Module): - # only show module name - d = 'module %s' % self.module_name - elif isinstance(d, tree.Param): - d = d.get_code().strip() - if d.endswith(','): - d = d[:-1] # Remove the comma. - else: # ExprStmt - try: - first_leaf = d.first_leaf() - except AttributeError: - # `d` is already a Leaf (Name). - first_leaf = d - # Remove the prefix, because that's not what we want for get_code - # here. - old, first_leaf.prefix = first_leaf.prefix, '' - try: - d = d.get_code() - finally: - first_leaf.prefix = old + typ = self.type + tree_name = self._name.tree_name + if typ in ('function', 'class', 'module', 'instance') or tree_name is None: + if typ == 'function': + # For the description we want a short and a pythonic way. + typ = 'def' + return typ + ' ' + u(self._name.string_name) + elif typ == 'param': + code = search_ancestor(tree_name, 'param').get_code( + include_prefix=False, + include_comma=False + ) + return typ + ' ' + code + + + definition = tree_name.get_definition() or tree_name + # Remove the prefix, because that's not what we want for get_code + # here. + txt = definition.get_code(include_prefix=False) # Delete comments: - d = re.sub('#[^\n]+\n', ' ', d) + txt = re.sub('#[^\n]+\n', ' ', txt) # Delete multi spaces/newlines - return re.sub('\s+', ' ', d).strip() + txt = re.sub('\s+', ' ', txt).strip() + return txt @property def desc_with_module(self): @@ -589,26 +558,28 @@ def desc_with_module(self): position = '' if self.in_builtin_module else '@%s' % (self.line) return "%s:%s%s" % (self.module_name, self.description, position) - @memoize_default() + @memoize_method def defined_names(self): """ List sub-definitions (e.g., methods in class). :rtype: list of Definition """ - defs = self._follow_statements_imports() - # For now we don't want base classes or evaluate decorators. - defs = [d.base if isinstance(d, (er.Class, er.Function)) else d for d in defs] - iterable = (defined_names(self._evaluator, d) for d in defs) - iterable = list(iterable) - return list(chain.from_iterable(iterable)) + defs = self._name.infer() + return sorted( + unite(defined_names(self._evaluator, d) for d in defs), + key=lambda s: s._name.start_pos or (0, 0) + ) def is_definition(self): """ Returns True, if defined as a name in a statement, function or class. Returns False, if it's a reference to such a definition. """ - return self._name.is_definition() + if self._name.tree_name is None: + return True + else: + return self._name.tree_name.is_definition() def __eq__(self, other): return self._name.start_pos == other._name.start_pos \ @@ -629,11 +600,11 @@ class CallSignature(Definition): It knows what functions you are currently in. e.g. `isinstance(` would return the `isinstance` function. without `(` it would return nothing. """ - def __init__(self, evaluator, executable_name, call_stmt, index, key_name): + def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str): super(CallSignature, self).__init__(evaluator, executable_name) self._index = index - self._key_name = key_name - self._call_stmt = call_stmt + self._key_name_str = key_name_str + self._bracket_start_pos = bracket_start_pos @property def index(self): @@ -641,21 +612,24 @@ def index(self): The Param index of the current call. Returns None if the index cannot be found in the curent call. """ - if self._key_name is not None: + if self._key_name_str is not None: for i, param in enumerate(self.params): - if self._key_name == param.name: + if self._key_name_str == param.name: return i - if self.params and self.params[-1]._name.get_definition().stars == 2: - return i - else: - return None + if self.params: + param_name = self.params[-1]._name + if param_name.tree_name is not None: + if param_name.tree_name.get_definition().star_count == 2: + return i + return None if self._index >= len(self.params): - for i, param in enumerate(self.params): - # *args case - if param._name.get_definition().stars == 1: - return i + tree_name = param._name.tree_name + if tree_name is not None: + # *args case + if tree_name.get_definition().star_count == 1: + return i return None return self._index @@ -665,48 +639,11 @@ def bracket_start(self): The indent of the bracket that is responsible for the last function call. """ - return self._call_stmt.end_pos - - @property - def call_name(self): - """ - .. deprecated:: 0.8.0 - Use :attr:`.name` instead. - .. todo:: Remove! - - The name (e.g. 'isinstance') as a string. - """ - warnings.warn("Use name instead.", DeprecationWarning) - return unicode(self.name) - - @property - def module(self): - """ - .. deprecated:: 0.8.0 - Use :attr:`.module_name` for the module name. - .. todo:: Remove! - """ - return self._executable.get_parent_until() + return self._bracket_start_pos def __repr__(self): - return '<%s: %s index %s>' % (type(self).__name__, self._name, - self.index) - - -class _Param(Definition): - """ - Just here for backwards compatibility. - """ - def get_code(self): - """ - .. deprecated:: 0.8.0 - Use :attr:`.description` and :attr:`.name` instead. - .. todo:: Remove! - - A function to get the whole code of the param. - """ - warnings.warn("Use description instead.", DeprecationWarning) - return self.description + return '<%s: %s index %s>' % \ + (type(self).__name__, self._name.string_name, self.index) class _Help(object): @@ -717,19 +654,25 @@ class _Help(object): def __init__(self, definition): self._name = definition - def full(self): - try: - return self._name.doc - except AttributeError: - return self.raw() + @memoize_method + def _get_contexts(self, fast): + if isinstance(self._name, ImportName) and fast: + return {} + + if self._name.api_type == 'statement': + return {} - def raw(self): + return self._name.infer() + + def docstring(self, fast=True, raw=True): """ - The raw docstring ``__doc__`` for any object. + The docstring ``__doc__`` for any object. See :attr:`doc` for example. """ - try: - return self._name.raw_doc - except AttributeError: - return '' + # TODO: Use all of the followed objects as output. Possibly divinding + # them by a few dashes. + for context in self._get_contexts(fast=fast): + return context.py__doc__(include_call_signature=not raw) + + return '' diff --git a/pythonFiles/release/jedi/api/completion.py b/pythonFiles/release/jedi/api/completion.py new file mode 100644 index 000000000000..559a4d3f8320 --- /dev/null +++ b/pythonFiles/release/jedi/api/completion.py @@ -0,0 +1,291 @@ +from parso.python import token +from parso.python import tree +from parso.tree import search_ancestor, Leaf + +from jedi import debug +from jedi import settings +from jedi.api import classes +from jedi.api import helpers +from jedi.evaluate import imports +from jedi.api import keywords +from jedi.evaluate.helpers import evaluate_call_of_leaf +from jedi.evaluate.filters import get_global_filters +from jedi.parser_utils import get_statement_of_position + + +def get_call_signature_param_names(call_signatures): + # add named params + for call_sig in call_signatures: + for p in call_sig.params: + # Allow protected access, because it's a public API. + tree_name = p._name.tree_name + # Compiled modules typically don't allow keyword arguments. + if tree_name is not None: + # Allow access on _definition here, because it's a + # public API and we don't want to make the internal + # Name object public. + tree_param = tree.search_ancestor(tree_name, 'param') + if tree_param.star_count == 0: # no *args/**kwargs + yield p._name + + +def filter_names(evaluator, completion_names, stack, like_name): + comp_dct = {} + for name in completion_names: + if settings.case_insensitive_completion \ + and name.string_name.lower().startswith(like_name.lower()) \ + or name.string_name.startswith(like_name): + + new = classes.Completion( + evaluator, + name, + stack, + len(like_name) + ) + k = (new.name, new.complete) # key + if k in comp_dct and settings.no_completion_duplicates: + comp_dct[k]._same_name_completions.append(new) + else: + comp_dct[k] = new + yield new + + +def get_user_scope(module_context, position): + """ + Returns the scope in which the user resides. This includes flows. + """ + user_stmt = get_statement_of_position(module_context.tree_node, position) + if user_stmt is None: + def scan(scope): + for s in scope.children: + if s.start_pos <= position <= s.end_pos: + if isinstance(s, (tree.Scope, tree.Flow)): + return scan(s) or s + elif s.type in ('suite', 'decorated'): + return scan(s) + return None + + scanned_node = scan(module_context.tree_node) + if scanned_node: + return module_context.create_context(scanned_node, node_is_context=True) + return module_context + else: + return module_context.create_context(user_stmt) + + +def get_flow_scope_node(module_node, position): + node = module_node.get_leaf_for_position(position, include_prefixes=True) + while not isinstance(node, (tree.Scope, tree.Flow)): + node = node.parent + + return node + + +class Completion: + def __init__(self, evaluator, module, code_lines, position, call_signatures_method): + self._evaluator = evaluator + self._module_context = module + self._module_node = module.tree_node + self._code_lines = code_lines + + # The first step of completions is to get the name + self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position) + # The actual cursor position is not what we need to calculate + # everything. We want the start of the name we're on. + self._position = position[0], position[1] - len(self._like_name) + self._call_signatures_method = call_signatures_method + + def completions(self): + completion_names = self._get_context_completions() + + completions = filter_names(self._evaluator, completion_names, + self.stack, self._like_name) + + return sorted(completions, key=lambda x: (x.name.startswith('__'), + x.name.startswith('_'), + x.name.lower())) + + def _get_context_completions(self): + """ + Analyzes the context that a completion is made in and decides what to + return. + + Technically this works by generating a parser stack and analysing the + current stack for possible grammar nodes. + + Possible enhancements: + - global/nonlocal search global + - yield from / raise from <- could be only exceptions/generators + - In args: */**: no completion + - In params (also lambda): no completion before = + """ + + grammar = self._evaluator.grammar + + try: + self.stack = helpers.get_stack_at_position( + grammar, self._code_lines, self._module_node, self._position + ) + except helpers.OnErrorLeaf as e: + self.stack = None + if e.error_leaf.value == '.': + # After ErrorLeaf's that are dots, we will not do any + # completions since this probably just confuses the user. + return [] + # If we don't have a context, just use global completion. + + return self._global_completions() + + allowed_keywords, allowed_tokens = \ + helpers.get_possible_completion_types(grammar._pgen_grammar, self.stack) + + if 'if' in allowed_keywords: + leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) + previous_leaf = leaf.get_previous_leaf() + + indent = self._position[1] + if not (leaf.start_pos <= self._position <= leaf.end_pos): + indent = leaf.start_pos[1] + + if previous_leaf is not None: + stmt = previous_leaf + while True: + stmt = search_ancestor( + stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt', + 'error_node', + ) + if stmt is None: + break + + type_ = stmt.type + if type_ == 'error_node': + first = stmt.children[0] + if isinstance(first, Leaf): + type_ = first.value + '_stmt' + # Compare indents + if stmt.start_pos[1] == indent: + if type_ == 'if_stmt': + allowed_keywords += ['elif', 'else'] + elif type_ == 'try_stmt': + allowed_keywords += ['except', 'finally', 'else'] + elif type_ == 'for_stmt': + allowed_keywords.append('else') + + completion_names = list(self._get_keyword_completion_names(allowed_keywords)) + + if token.NAME in allowed_tokens or token.INDENT in allowed_tokens: + # This means that we actually have to do type inference. + + symbol_names = list(self.stack.get_node_names(grammar._pgen_grammar)) + + nodes = list(self.stack.get_nodes()) + + if nodes and nodes[-1] in ('as', 'def', 'class'): + # No completions for ``with x as foo`` and ``import x as foo``. + # Also true for defining names as a class or function. + return list(self._get_class_context_completions(is_function=True)) + elif "import_stmt" in symbol_names: + level, names = self._parse_dotted_names(nodes, "import_from" in symbol_names) + + only_modules = not ("import_from" in symbol_names and 'import' in nodes) + completion_names += self._get_importer_names( + names, + level, + only_modules=only_modules, + ) + elif symbol_names[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.': + dot = self._module_node.get_leaf_for_position(self._position) + completion_names += self._trailer_completions(dot.get_previous_leaf()) + else: + completion_names += self._global_completions() + completion_names += self._get_class_context_completions(is_function=False) + + if 'trailer' in symbol_names: + call_signatures = self._call_signatures_method() + completion_names += get_call_signature_param_names(call_signatures) + + return completion_names + + def _get_keyword_completion_names(self, keywords_): + for k in keywords_: + yield keywords.keyword(self._evaluator, k).name + + def _global_completions(self): + context = get_user_scope(self._module_context, self._position) + debug.dbg('global completion scope: %s', context) + flow_scope_node = get_flow_scope_node(self._module_node, self._position) + filters = get_global_filters( + self._evaluator, + context, + self._position, + origin_scope=flow_scope_node + ) + completion_names = [] + for filter in filters: + completion_names += filter.values() + return completion_names + + def _trailer_completions(self, previous_leaf): + user_context = get_user_scope(self._module_context, self._position) + evaluation_context = self._evaluator.create_context( + self._module_context, previous_leaf + ) + contexts = evaluate_call_of_leaf(evaluation_context, previous_leaf) + completion_names = [] + debug.dbg('trailer completion contexts: %s', contexts) + for context in contexts: + for filter in context.get_filters( + search_global=False, origin_scope=user_context.tree_node): + completion_names += filter.values() + return completion_names + + def _parse_dotted_names(self, nodes, is_import_from): + level = 0 + names = [] + for node in nodes[1:]: + if node in ('.', '...'): + if not names: + level += len(node.value) + elif node.type == 'dotted_name': + names += node.children[::2] + elif node.type == 'name': + names.append(node) + elif node == ',': + if not is_import_from: + names = [] + else: + # Here if the keyword `import` comes along it stops checking + # for names. + break + return level, names + + def _get_importer_names(self, names, level=0, only_modules=True): + names = [n.value for n in names] + i = imports.Importer(self._evaluator, names, self._module_context, level) + return i.completion_names(self._evaluator, only_modules=only_modules) + + def _get_class_context_completions(self, is_function=True): + """ + Autocomplete inherited methods when overriding in child class. + """ + leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) + cls = tree.search_ancestor(leaf, 'classdef') + if isinstance(cls, (tree.Class, tree.Function)): + # Complete the methods that are defined in the super classes. + random_context = self._module_context.create_context( + cls, + node_is_context=True + ) + else: + return + + if cls.start_pos[1] >= leaf.start_pos[1]: + return + + filters = random_context.get_filters(search_global=False, is_instance=True) + # The first dict is the dictionary of class itself. + next(filters) + for filter in filters: + for name in filter.values(): + if (name.api_type == 'function') == is_function: + yield name diff --git a/pythonFiles/release/jedi/api/helpers.py b/pythonFiles/release/jedi/api/helpers.py index b1b3f6e441c9..2c4d8e0d10fc 100755 --- a/pythonFiles/release/jedi/api/helpers.py +++ b/pythonFiles/release/jedi/api/helpers.py @@ -2,18 +2,20 @@ Helpers for the API """ import re +from collections import namedtuple +from textwrap import dedent -from jedi.parser import tree as pt -from jedi.evaluate import imports +from parso.python.parser import Parser +from parso.python import tree +from parso import split_lines +from jedi._compatibility import u +from jedi.evaluate.syntax_tree import eval_atom +from jedi.evaluate.helpers import evaluate_call_of_leaf +from jedi.cache import time_cache -def completion_parts(path_until_cursor): - """ - Returns the parts for the completion - :return: tuple - (path, dot, like) - """ - match = re.match(r'^(.*?)(\.|)(\w?[\w\d]*)$', path_until_cursor, flags=re.S) - return match.groups() + +CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name']) def sorted_definitions(defs): @@ -21,58 +23,294 @@ def sorted_definitions(defs): return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0)) -def get_on_import_stmt(evaluator, user_context, user_stmt, is_like_search=False): +def get_on_completion_name(module_node, lines, position): + leaf = module_node.get_leaf_for_position(position) + if leaf is None or leaf.type in ('string', 'error_leaf'): + # Completions inside strings are a bit special, we need to parse the + # string. The same is true for comments and error_leafs. + line = lines[position[0] - 1] + # The first step of completions is to get the name + return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0) + elif leaf.type not in ('name', 'keyword'): + return '' + + return leaf.value[:position[1] - leaf.start_pos[1]] + + +def _get_code(code_lines, start_pos, end_pos): + # Get relevant lines. + lines = code_lines[start_pos[0] - 1:end_pos[0]] + # Remove the parts at the end of the line. + lines[-1] = lines[-1][:end_pos[1]] + # Remove first line indentation. + lines[0] = lines[0][start_pos[1]:] + return '\n'.join(lines) + + +class OnErrorLeaf(Exception): + @property + def error_leaf(self): + return self.args[0] + + +def _is_on_comment(leaf, position): + comment_lines = split_lines(leaf.prefix) + difference = leaf.start_pos[0] - position[0] + prefix_start_pos = leaf.get_start_pos_of_prefix() + if difference == 0: + indent = leaf.start_pos[1] + elif position[0] == prefix_start_pos[0]: + indent = prefix_start_pos[1] + else: + indent = 0 + line = comment_lines[-difference - 1][:position[1] - indent] + return '#' in line + + +def _get_code_for_stack(code_lines, module_node, position): + leaf = module_node.get_leaf_for_position(position, include_prefixes=True) + # It might happen that we're on whitespace or on a comment. This means + # that we would not get the right leaf. + if leaf.start_pos >= position: + if _is_on_comment(leaf, position): + return u('') + + # If we're not on a comment simply get the previous leaf and proceed. + leaf = leaf.get_previous_leaf() + if leaf is None: + return u('') # At the beginning of the file. + + is_after_newline = leaf.type == 'newline' + while leaf.type == 'newline': + leaf = leaf.get_previous_leaf() + if leaf is None: + return u('') + + if leaf.type == 'error_leaf' or leaf.type == 'string': + if leaf.start_pos[0] < position[0]: + # On a different line, we just begin anew. + return u('') + + # Error leafs cannot be parsed, completion in strings is also + # impossible. + raise OnErrorLeaf(leaf) + else: + user_stmt = leaf + while True: + if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'): + break + user_stmt = user_stmt.parent + + if is_after_newline: + if user_stmt.start_pos[1] > position[1]: + # This means that it's actually a dedent and that means that we + # start without context (part of a suite). + return u('') + + # This is basically getting the relevant lines. + return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position) + + +def get_stack_at_position(grammar, code_lines, module_node, pos): """ - Resolve the user statement, if it is an import. Only resolve the - parts until the user position. + Returns the possible node names (e.g. import_from, xor_test or yield_stmt). """ - name = user_stmt.name_for_position(user_context.position) - if name is None: - return None, None - - i = imports.ImportWrapper(evaluator, name) - return i, name - - -def check_error_statements(module, pos): - for error_statement in module.error_statement_stacks: - if error_statement.first_type in ('import_from', 'import_name') \ - and error_statement.first_pos < pos <= error_statement.next_start_pos: - return importer_from_error_statement(error_statement, pos) - return None, 0, False, False - - -def importer_from_error_statement(error_statement, pos): - def check_dotted(children): - for name in children[::2]: - if name.start_pos <= pos: - yield name - - names = [] - level = 0 - only_modules = True - unfinished_dotted = False - for typ, nodes in error_statement.stack: - if typ == 'dotted_name': - names += check_dotted(nodes) - if nodes[-1] == '.': - # An unfinished dotted_name - unfinished_dotted = True - elif typ == 'import_name': - if nodes[0].start_pos <= pos <= nodes[0].end_pos: - # We are on the import. - return None, 0, False, False - elif typ == 'import_from': + class EndMarkerReached(Exception): + pass + + def tokenize_without_endmarker(code): + # TODO This is for now not an official parso API that exists purely + # for Jedi. + tokens = grammar._tokenize(code) + for token_ in tokens: + if token_.string == safeword: + raise EndMarkerReached() + else: + yield token_ + + # The code might be indedented, just remove it. + code = dedent(_get_code_for_stack(code_lines, module_node, pos)) + # We use a word to tell Jedi when we have reached the start of the + # completion. + # Use Z as a prefix because it's not part of a number suffix. + safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI' + code = code + safeword + + p = Parser(grammar._pgen_grammar, error_recovery=True) + try: + p.parse(tokens=tokenize_without_endmarker(code)) + except EndMarkerReached: + return Stack(p.pgen_parser.stack) + raise SystemError("This really shouldn't happen. There's a bug in Jedi.") + + +class Stack(list): + def get_node_names(self, grammar): + for dfa, state, (node_number, nodes) in self: + yield grammar.number2symbol[node_number] + + def get_nodes(self): + for dfa, state, (node_number, nodes) in self: for node in nodes: - if node.start_pos >= pos: - break - elif isinstance(node, pt.Node) and node.type == 'dotted_name': - names += check_dotted(node.children) - elif node in ('.', '...'): - level += len(node.value) - elif isinstance(node, pt.Name): - names.append(node) - elif node == 'import': - only_modules = False - - return names, level, only_modules, unfinished_dotted + yield node + + +def get_possible_completion_types(pgen_grammar, stack): + def add_results(label_index): + try: + grammar_labels.append(inversed_tokens[label_index]) + except KeyError: + try: + keywords.append(inversed_keywords[label_index]) + except KeyError: + t, v = pgen_grammar.labels[label_index] + assert t >= 256 + # See if it's a symbol and if we're in its first set + inversed_keywords + itsdfa = pgen_grammar.dfas[t] + itsstates, itsfirst = itsdfa + for first_label_index in itsfirst.keys(): + add_results(first_label_index) + + inversed_keywords = dict((v, k) for k, v in pgen_grammar.keywords.items()) + inversed_tokens = dict((v, k) for k, v in pgen_grammar.tokens.items()) + + keywords = [] + grammar_labels = [] + + def scan_stack(index): + dfa, state, node = stack[index] + states, first = dfa + arcs = states[state] + + for label_index, new_state in arcs: + if label_index == 0: + # An accepting state, check the stack below. + scan_stack(index - 1) + else: + add_results(label_index) + + scan_stack(-1) + + return keywords, grammar_labels + + +def evaluate_goto_definition(evaluator, context, leaf): + if leaf.type == 'name': + # In case of a name we can just use goto_definition which does all the + # magic itself. + return evaluator.goto_definitions(context, leaf) + + parent = leaf.parent + if parent.type == 'atom': + return context.eval_node(leaf.parent) + elif parent.type == 'trailer': + return evaluate_call_of_leaf(context, leaf) + elif isinstance(leaf, tree.Literal): + return eval_atom(context, leaf) + return [] + + +CallSignatureDetails = namedtuple( + 'CallSignatureDetails', + ['bracket_leaf', 'call_index', 'keyword_name_str'] +) + + +def _get_index_and_key(nodes, position): + """ + Returns the amount of commas and the keyword argument string. + """ + nodes_before = [c for c in nodes if c.start_pos < position] + if nodes_before[-1].type == 'arglist': + nodes_before = [c for c in nodes_before[-1].children if c.start_pos < position] + + key_str = None + + if nodes_before: + last = nodes_before[-1] + if last.type == 'argument' and last.children[1].end_pos <= position: + # Checked if the argument + key_str = last.children[0].value + elif last == '=': + key_str = nodes_before[-2].value + + return nodes_before.count(','), key_str + + +def _get_call_signature_details_from_error_node(node, position): + for index, element in reversed(list(enumerate(node.children))): + # `index > 0` means that it's a trailer and not an atom. + if element == '(' and element.end_pos <= position and index > 0: + # It's an error node, we don't want to match too much, just + # until the parentheses is enough. + children = node.children[index:] + name = element.get_previous_leaf() + if name is None: + continue + if name.type == 'name' or name.parent.type in ('trailer', 'atom'): + return CallSignatureDetails( + element, + *_get_index_and_key(children, position) + ) + + +def get_call_signature_details(module, position): + leaf = module.get_leaf_for_position(position, include_prefixes=True) + if leaf.start_pos >= position: + # Whitespace / comments after the leaf count towards the previous leaf. + leaf = leaf.get_previous_leaf() + if leaf is None: + return None + + if leaf == ')': + if leaf.end_pos == position: + leaf = leaf.get_next_leaf() + + # Now that we know where we are in the syntax tree, we start to look at + # parents for possible function definitions. + node = leaf.parent + while node is not None: + if node.type in ('funcdef', 'classdef'): + # Don't show call signatures if there's stuff before it that just + # makes it feel strange to have a call signature. + return None + + for n in node.children[::-1]: + if n.start_pos < position and n.type == 'error_node': + result = _get_call_signature_details_from_error_node(n, position) + if result is not None: + return result + + if node.type == 'trailer' and node.children[0] == '(': + leaf = node.get_previous_leaf() + if leaf is None: + return None + return CallSignatureDetails( + node.children[0], *_get_index_and_key(node.children, position)) + + node = node.parent + + return None + + +@time_cache("call_signatures_validity") +def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos): + """This function calculates the cache key.""" + index = user_pos[0] - 1 + + before_cursor = code_lines[index][:user_pos[1]] + other_lines = code_lines[bracket_leaf.start_pos[0]:index] + whole = '\n'.join(other_lines + [before_cursor]) + before_bracket = re.match(r'.*\(', whole, re.DOTALL) + + module_path = context.get_root_context().py__file__() + if module_path is None: + yield None # Don't cache! + else: + yield (module_path, before_bracket, bracket_leaf.start_pos) + yield evaluate_goto_definition( + evaluator, + context, + bracket_leaf.get_previous_leaf() + ) diff --git a/pythonFiles/release/jedi/api/interpreter.py b/pythonFiles/release/jedi/api/interpreter.py index 595435c61589..202f345e94b9 100755 --- a/pythonFiles/release/jedi/api/interpreter.py +++ b/pythonFiles/release/jedi/api/interpreter.py @@ -1,109 +1,47 @@ """ TODO Some parts of this module are still not well documented. """ -import inspect -import re -from jedi._compatibility import builtins -from jedi import debug -from jedi.common import source_to_unicode -from jedi.cache import underscore_memoization +from jedi.evaluate.context import ModuleContext from jedi.evaluate import compiled -from jedi.evaluate.compiled.fake import get_module -from jedi.parser import tree as pt -from jedi.parser import load_grammar -from jedi.parser.fast import FastParser -from jedi.evaluate import helpers -from jedi.evaluate import iterable -from jedi.evaluate import representation as er +from jedi.evaluate.compiled import mixed +from jedi.evaluate.base_context import Context -def add_namespaces_to_parser(evaluator, namespaces, parser_module): - for namespace in namespaces: - for key, value in namespace.items(): - # Name lookups in an ast tree work by checking names_dict. - # Therefore we just add fake names to that and we're done. - arr = parser_module.names_dict.setdefault(key, []) - arr.append(LazyName(evaluator, parser_module, key, value)) +class NamespaceObject(object): + def __init__(self, dct): + self.__dict__ = dct -class LazyName(helpers.FakeName): - def __init__(self, evaluator, module, name, value): - super(LazyName, self).__init__(name) - self._module = module - self._evaluator = evaluator - self._value = value - self._name = name +class MixedModuleContext(Context): + resets_positions = True + type = 'mixed_module' - def is_definition(self): - return True + def __init__(self, evaluator, tree_module, namespaces, path): + self.evaluator = evaluator + self._namespaces = namespaces - @property - @underscore_memoization - def parent(self): - """ - Creating fake statements for the interpreter. - """ - obj = self._value - parser_path = [] - if inspect.ismodule(obj): - module = obj - else: - names = [] - try: - o = obj.__objclass__ - names.append(obj.__name__) - obj = o - except AttributeError: - pass + self._namespace_objects = [NamespaceObject(n) for n in namespaces] + self._module_context = ModuleContext(evaluator, tree_module, path=path) + self.tree_node = tree_module - try: - module_name = obj.__module__ - names.insert(0, obj.__name__) - except AttributeError: - # Unfortunately in some cases like `int` there's no __module__ - module = builtins - else: - # TODO this import is wrong. Yields x for x.y.z instead of z - module = __import__(module_name) - parser_path = names - raw_module = get_module(self._value) + def get_node(self): + return self.tree_node - found = [] - try: - path = module.__file__ - except AttributeError: - pass - else: - path = re.sub('c$', '', path) - if path.endswith('.py'): - # cut the `c` from `.pyc` - with open(path) as f: - source = source_to_unicode(f.read()) - mod = FastParser(load_grammar(), source, path[:-1]).module - if parser_path: - assert len(parser_path) == 1 - found = self._evaluator.find_types(mod, parser_path[0], search_global=True) - else: - found = [self._evaluator.wrap(mod)] + def get_filters(self, *args, **kwargs): + for filter in self._module_context.get_filters(*args, **kwargs): + yield filter - if not found: - debug.warning('Possibly an interpreter lookup for Python code failed %s', - parser_path) + for namespace_obj in self._namespace_objects: + compiled_object = compiled.create(self.evaluator, namespace_obj) + mixed_object = mixed.MixedObject( + self.evaluator, + parent_context=self, + compiled_object=compiled_object, + tree_context=self._module_context + ) + for filter in mixed_object.get_filters(*args, **kwargs): + yield filter - if not found: - evaluated = compiled.CompiledObject(obj) - if evaluated == builtins: - # The builtins module is special and always cached. - evaluated = compiled.builtin - found = [evaluated] - - content = iterable.AlreadyEvaluated(found) - stmt = pt.ExprStmt([self, pt.Operator(pt.zero_position_modifier, - '=', (0, 0), ''), content]) - stmt.parent = self._module - return stmt - - @parent.setter - def parent(self, value): - """Needed because the super class tries to set parent.""" + def __getattr__(self, name): + return getattr(self._module_context, name) diff --git a/pythonFiles/release/jedi/api/keywords.py b/pythonFiles/release/jedi/api/keywords.py index 2a54ba2d5817..a1bc4e7f8556 100755 --- a/pythonFiles/release/jedi/api/keywords.py +++ b/pythonFiles/release/jedi/api/keywords.py @@ -1,56 +1,111 @@ import pydoc import keyword -from jedi._compatibility import is_py3 -from jedi import common -from jedi.evaluate import compiled -from jedi.evaluate.helpers import FakeName +from jedi._compatibility import is_py3, is_py35 +from jedi.evaluate.utils import ignored +from jedi.evaluate.filters import AbstractNameDefinition +from parso.python.tree import Leaf try: from pydoc_data import topics as pydoc_topics except ImportError: - # Python 2.6 - import pydoc_topics + # Python 2 + try: + import pydoc_topics + except ImportError: + # This is for Python 3 embeddable version, which dont have + # pydoc_data module in its file python3x.zip. + pydoc_topics = None if is_py3: - keys = keyword.kwlist + if is_py35: + # in python 3.5 async and await are not proper keywords, but for + # completion pursposes should as as though they are + keys = keyword.kwlist + ["async", "await"] + else: + keys = keyword.kwlist else: keys = keyword.kwlist + ['None', 'False', 'True'] -def keywords(string='', pos=(0, 0), all=False): - if all: - return set([Keyword(k, pos) for k in keys]) +def has_inappropriate_leaf_keyword(pos, module): + relevant_errors = filter( + lambda error: error.first_pos[0] == pos[0], + module.error_statement_stacks) + + for error in relevant_errors: + if error.next_token in keys: + return True + + return False + + +def completion_names(evaluator, stmt, pos, module): + keyword_list = all_keywords(evaluator) + + if not isinstance(stmt, Leaf) or has_inappropriate_leaf_keyword(pos, module): + keyword_list = filter( + lambda keyword: not keyword.only_valid_as_leaf, + keyword_list + ) + return [keyword.name for keyword in keyword_list] + + +def all_keywords(evaluator, pos=(0, 0)): + return set([Keyword(evaluator, k, pos) for k in keys]) + + +def keyword(evaluator, string, pos=(0, 0)): if string in keys: - return set([Keyword(string, pos)]) - return set() + return Keyword(evaluator, string, pos) + else: + return None -def keyword_names(*args, **kwargs): - return [k.name for k in keywords(*args, **kwargs)] +def get_operator(evaluator, string, pos): + return Keyword(evaluator, string, pos) -def get_operator(string, pos): - return Keyword(string, pos) +keywords_only_valid_as_leaf = ( + 'continue', + 'break', +) + + +class KeywordName(AbstractNameDefinition): + api_type = 'keyword' + + def __init__(self, evaluator, name): + self.evaluator = evaluator + self.string_name = name + self.parent_context = evaluator.BUILTINS + + def eval(self): + return set() + + def infer(self): + return [Keyword(self.evaluator, self.string_name, (0, 0))] class Keyword(object): - def __init__(self, name, pos): - self.name = FakeName(name, self, pos) + api_type = 'keyword' + + def __init__(self, evaluator, name, pos): + self.name = KeywordName(evaluator, name) self.start_pos = pos - self.parent = compiled.builtin + self.parent = evaluator.BUILTINS - def get_parent_until(self): - return self.parent + @property + def only_valid_as_leaf(self): + return self.name.value in keywords_only_valid_as_leaf @property def names(self): """ For a `parsing.Name` like comparision """ return [self.name] - @property - def docstr(self): - return imitate_pydoc(self.name) + def py__doc__(self, include_call_signature=False): + return imitate_pydoc(self.name.string_name) def __repr__(self): return '<%s: %s>' % (type(self).__name__, self.name) @@ -61,11 +116,14 @@ def imitate_pydoc(string): It's not possible to get the pydoc's without starting the annoying pager stuff. """ + if pydoc_topics is None: + return '' + # str needed because of possible unicode stuff in py2k (pydoc doesn't work # with unicode strings) string = str(string) h = pydoc.help - with common.ignored(KeyError): + with ignored(KeyError): # try to access symbols string = h.symbols[string] string, _, related = string.partition(' ') @@ -81,6 +139,6 @@ def imitate_pydoc(string): return '' try: - return pydoc_topics.topics[label] if pydoc_topics else '' + return pydoc_topics.topics[label].strip() if pydoc_topics else '' except KeyError: return '' diff --git a/pythonFiles/release/jedi/api/usages.py b/pythonFiles/release/jedi/api/usages.py deleted file mode 100755 index ecb885639032..000000000000 --- a/pythonFiles/release/jedi/api/usages.py +++ /dev/null @@ -1,49 +0,0 @@ -from jedi._compatibility import unicode -from jedi.api import classes -from jedi.parser import tree -from jedi.evaluate import imports - - -def usages(evaluator, definition_names, mods): - """ - :param definitions: list of Name - """ - def compare_array(definitions): - """ `definitions` are being compared by module/start_pos, because - sometimes the id's of the objects change (e.g. executions). - """ - result = [] - for d in definitions: - module = d.get_parent_until() - result.append((module, d.start_pos)) - return result - - search_name = unicode(list(definition_names)[0]) - compare_definitions = compare_array(definition_names) - mods |= set([d.get_parent_until() for d in definition_names]) - definitions = [] - for m in imports.get_modules_containing_name(evaluator, mods, search_name): - try: - check_names = m.used_names[search_name] - except KeyError: - continue - for name in check_names: - - result = evaluator.goto(name) - if [c for c in compare_array(result) if c in compare_definitions]: - definitions.append(classes.Definition(evaluator, name)) - # Previous definitions might be imports, so include them - # (because goto might return that import name). - compare_definitions += compare_array([name]) - return definitions - - -def usages_add_import_modules(evaluator, definitions): - """ Adds the modules of the imports """ - new = set() - for d in definitions: - imp_or_stmt = d.get_definition() - if isinstance(imp_or_stmt, tree.Import): - s = imports.ImportWrapper(evaluator, d) - new |= set(s.follow(is_goto=True)) - return set(definitions) | new diff --git a/pythonFiles/release/jedi/cache.py b/pythonFiles/release/jedi/cache.py index 56769d0d8953..01138e75a99b 100755 --- a/pythonFiles/release/jedi/cache.py +++ b/pythonFiles/release/jedi/cache.py @@ -3,8 +3,6 @@ nothing really spectacular, just some decorators. The following cache types are available: -- module caching (`load_parser` and `save_parser`), which uses pickle and is - really important to assure low load times of modules like ``numpy``. - ``time_cache`` can be used to cache something for just a limited time span, which can be useful if there's user interaction and the user cannot react faster than a certain time. @@ -14,35 +12,43 @@ these variables are being cleaned after every API usage. """ import time -import os -import sys -import json -import hashlib -import gc -import inspect -import shutil -import re -try: - import cPickle as pickle -except ImportError: - import pickle from jedi import settings -from jedi import common -from jedi import debug +from parso.cache import parser_cache _time_caches = {} -# for fast_parser, should not be deleted -parser_cache = {} +def underscore_memoization(func): + """ + Decorator for methods:: -class ParserCacheItem(object): - def __init__(self, parser, change_time=None): - self.parser = parser - if change_time is None: - change_time = time.time() - self.change_time = change_time + class A(object): + def x(self): + if self._x: + self._x = 10 + return self._x + + Becomes:: + + class A(object): + @underscore_memoization + def x(self): + return 10 + + A now has an attribute ``_x`` written by this decorator. + """ + name = '_' + func.__name__ + + def wrapper(self): + try: + return getattr(self, name) + except AttributeError: + result = func(self) + setattr(self, name, result) + return result + + return wrapper def clear_time_caches(delete_all=False): @@ -70,11 +76,12 @@ def clear_time_caches(delete_all=False): def time_cache(time_add_setting): """ - s This decorator works as follows: Call it with a setting and after that use the function with a callable that returns the key. But: This function is only called if the key is not available. After a certain amount of time (`time_add_setting`) the cache is invalid. + + If the given key is None, the function will not be cached. """ def _temp(key_func): dct = {} @@ -99,60 +106,11 @@ def wrapper(*args, **kwargs): return _temp -@time_cache("call_signatures_validity") -def cache_call_signatures(evaluator, call, source, user_pos): - """This function calculates the cache key.""" - index = user_pos[0] - 1 - lines = common.splitlines(source) - - before_cursor = lines[index][:user_pos[1]] - other_lines = lines[call.start_pos[0]:index] - whole = '\n'.join(other_lines + [before_cursor]) - before_bracket = re.match(r'.*\(', whole, re.DOTALL) - - module_path = call.get_parent_until().path - yield None if module_path is None else (module_path, before_bracket, call.start_pos) - yield evaluator.eval_element(call) - - -def underscore_memoization(func): - """ - Decorator for methods:: - - class A(object): - def x(self): - if self._x: - self._x = 10 - return self._x - - Becomes:: - - class A(object): - @underscore_memoization - def x(self): - return 10 - - A now has an attribute ``_x`` written by this decorator. - """ - name = '_' + func.__name__ - - def wrapper(self): - try: - return getattr(self, name) - except AttributeError: - result = func(self) - if inspect.isgenerator(result): - result = list(result) - setattr(self, name, result) - return result - - return wrapper - - def memoize_method(method): """A normal memoize function.""" def wrapper(self, *args, **kwargs): - dct = self.__dict__.setdefault('_memoize_method_dct', {}) + cache_dict = self.__dict__.setdefault('_memoize_method_dct', {}) + dct = cache_dict.setdefault(method, {}) key = (args, frozenset(kwargs.items())) try: return dct[key] @@ -161,176 +119,3 @@ def wrapper(self, *args, **kwargs): dct[key] = result return result return wrapper - - -def cache_star_import(func): - @time_cache("star_import_cache_validity") - def wrapper(self): - yield self.base # The cache key - yield func(self) - return wrapper - - -def _invalidate_star_import_cache_module(module, only_main=False): - """ Important if some new modules are being reparsed """ - try: - t, modules = _time_caches['star_import_cache_validity'][module] - except KeyError: - pass - else: - del _time_caches['star_import_cache_validity'][module] - - -def invalidate_star_import_cache(path): - """On success returns True.""" - try: - parser_cache_item = parser_cache[path] - except KeyError: - pass - else: - _invalidate_star_import_cache_module(parser_cache_item.parser.module) - - -def load_parser(path): - """ - Returns the module or None, if it fails. - """ - p_time = os.path.getmtime(path) if path else None - try: - parser_cache_item = parser_cache[path] - if not path or p_time <= parser_cache_item.change_time: - return parser_cache_item.parser - else: - # In case there is already a module cached and this module - # has to be reparsed, we also need to invalidate the import - # caches. - _invalidate_star_import_cache_module(parser_cache_item.parser.module) - except KeyError: - if settings.use_filesystem_cache: - return ParserPickling.load_parser(path, p_time) - - -def save_parser(path, parser, pickling=True): - try: - p_time = None if path is None else os.path.getmtime(path) - except OSError: - p_time = None - pickling = False - - item = ParserCacheItem(parser, p_time) - parser_cache[path] = item - if settings.use_filesystem_cache and pickling: - ParserPickling.save_parser(path, item) - - -class ParserPickling(object): - - version = 24 - """ - Version number (integer) for file system cache. - - Increment this number when there are any incompatible changes in - parser representation classes. For example, the following changes - are regarded as incompatible. - - - Class name is changed. - - Class is moved to another module. - - Defined slot of the class is changed. - """ - - def __init__(self): - self.__index = None - self.py_tag = 'cpython-%s%s' % sys.version_info[:2] - """ - Short name for distinguish Python implementations and versions. - - It's like `sys.implementation.cache_tag` but for Python < 3.3 - we generate something similar. See: - http://docs.python.org/3/library/sys.html#sys.implementation - - .. todo:: Detect interpreter (e.g., PyPy). - """ - - def load_parser(self, path, original_changed_time): - try: - pickle_changed_time = self._index[path] - except KeyError: - return None - if original_changed_time is not None \ - and pickle_changed_time < original_changed_time: - # the pickle file is outdated - return None - - with open(self._get_hashed_path(path), 'rb') as f: - try: - gc.disable() - parser_cache_item = pickle.load(f) - finally: - gc.enable() - - debug.dbg('pickle loaded: %s', path) - parser_cache[path] = parser_cache_item - return parser_cache_item.parser - - def save_parser(self, path, parser_cache_item): - self.__index = None - try: - files = self._index - except KeyError: - files = {} - self._index = files - - with open(self._get_hashed_path(path), 'wb') as f: - pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL) - files[path] = parser_cache_item.change_time - - self._flush_index() - - @property - def _index(self): - if self.__index is None: - try: - with open(self._get_path('index.json')) as f: - data = json.load(f) - except (IOError, ValueError): - self.__index = {} - else: - # 0 means version is not defined (= always delete cache): - if data.get('version', 0) != self.version: - self.clear_cache() - self.__index = {} - else: - self.__index = data['index'] - return self.__index - - def _remove_old_modules(self): - # TODO use - change = False - if change: - self._flush_index(self) - self._index # reload index - - def _flush_index(self): - data = {'version': self.version, 'index': self._index} - with open(self._get_path('index.json'), 'w') as f: - json.dump(data, f) - self.__index = None - - def clear_cache(self): - shutil.rmtree(self._cache_directory()) - - def _get_hashed_path(self, path): - return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest()) - - def _get_path(self, file): - dir = self._cache_directory() - if not os.path.exists(dir): - os.makedirs(dir) - return os.path.join(dir, file) - - def _cache_directory(self): - return os.path.join(settings.cache_directory, self.py_tag) - - -# is a singleton -ParserPickling = ParserPickling() diff --git a/pythonFiles/release/jedi/common/__init__.py b/pythonFiles/release/jedi/common/__init__.py new file mode 100644 index 000000000000..702a5e609985 --- /dev/null +++ b/pythonFiles/release/jedi/common/__init__.py @@ -0,0 +1 @@ +from jedi.common.context import BaseContextSet, BaseContext diff --git a/pythonFiles/release/jedi/common/context.py b/pythonFiles/release/jedi/common/context.py new file mode 100644 index 000000000000..dfcf5a9e17e6 --- /dev/null +++ b/pythonFiles/release/jedi/common/context.py @@ -0,0 +1,67 @@ +class BaseContext(object): + def __init__(self, evaluator, parent_context=None): + self.evaluator = evaluator + self.parent_context = parent_context + + def get_root_context(self): + context = self + while True: + if context.parent_context is None: + return context + context = context.parent_context + + +class BaseContextSet(object): + def __init__(self, *args): + self._set = set(args) + + @classmethod + def from_iterable(cls, iterable): + return cls.from_set(set(iterable)) + + @classmethod + def from_set(cls, set_): + self = cls() + self._set = set_ + return self + + @classmethod + def from_sets(cls, sets): + """ + Used to work with an iterable of set. + """ + aggregated = set() + sets = list(sets) + for set_ in sets: + if isinstance(set_, BaseContextSet): + aggregated |= set_._set + else: + aggregated |= set_ + return cls.from_set(aggregated) + + def __or__(self, other): + return type(self).from_set(self._set | other._set) + + def __iter__(self): + for element in self._set: + yield element + + def __bool__(self): + return bool(self._set) + + def __len__(self): + return len(self._set) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, ', '.join(str(s) for s in self._set)) + + def filter(self, filter_func): + return type(self).from_iterable(filter(filter_func, self._set)) + + def __getattr__(self, name): + def mapper(*args, **kwargs): + return type(self).from_sets( + getattr(context, name)(*args, **kwargs) + for context in self._set + ) + return mapper diff --git a/pythonFiles/release/jedi/debug.py b/pythonFiles/release/jedi/debug.py index 5c59a7b6de04..8caf1accb17f 100755 --- a/pythonFiles/release/jedi/debug.py +++ b/pythonFiles/release/jedi/debug.py @@ -1,25 +1,53 @@ from jedi._compatibility import encoding, is_py3, u -import inspect import os import time +def _lazy_colorama_init(): + """ + Lazily init colorama if necessary, not to screw up stdout is debug not + enabled. + + This version of the function does nothing. + """ + pass + +_inited=False + try: if os.name == 'nt': - # does not work on Windows, as pyreadline and colorama interfere + # Does not work on Windows, as pyreadline and colorama interfere raise ImportError else: # Use colorama for nicer console output. from colorama import Fore, init from colorama import initialise - # pytest resets the stream at the end - causes troubles. Since after - # every output the stream is reset automatically we don't need this. - initialise.atexit_done = True - init() + def _lazy_colorama_init(): + """ + Lazily init colorama if necessary, not to screw up stdout is + debug not enabled. + + This version of the function does init colorama. + """ + global _inited + if not _inited: + # pytest resets the stream at the end - causes troubles. Since + # after every output the stream is reset automatically we don't + # need this. + initialise.atexit_done = True + try: + init() + except Exception: + # Colorama fails with initializing under vim and is buggy in + # version 0.3.6. + pass + _inited = True + except ImportError: class Fore(object): RED = '' GREEN = '' YELLOW = '' + MAGENTA = '' RESET = '' NOTICE = object() @@ -32,15 +60,14 @@ class Fore(object): # callback, interface: level, str debug_function = None -ignored_modules = ['jedi.evaluate.builtin', 'jedi.parser'] -_debug_indent = -1 +_debug_indent = 0 _start_time = time.time() def reset_time(): global _start_time, _debug_indent _start_time = time.time() - _debug_indent = -1 + _debug_indent = 0 def increase_indent(func): @@ -49,44 +76,50 @@ def wrapper(*args, **kwargs): global _debug_indent _debug_indent += 1 try: - result = func(*args, **kwargs) + return func(*args, **kwargs) finally: _debug_indent -= 1 - return result return wrapper -def dbg(message, *args): +def dbg(message, *args, **kwargs): """ Looks at the stack, to see if a debug message should be printed. """ + # Python 2 compatibility, because it doesn't understand default args + color = kwargs.pop('color', 'GREEN') + assert color + if debug_function and enable_notice: - frm = inspect.stack()[1] - mod = inspect.getmodule(frm[0]) - if not (mod.__name__ in ignored_modules): - i = ' ' * _debug_indent - debug_function(NOTICE, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args)) + i = ' ' * _debug_indent + _lazy_colorama_init() + debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args)) + +def warning(message, *args, **kwargs): + format = kwargs.pop('format', True) + assert not kwargs -def warning(message, *args): if debug_function and enable_warning: i = ' ' * _debug_indent - debug_function(WARNING, i + 'warning: ' + message % tuple(u(repr(a)) for a in args)) + if format: + message = message % tuple(u(repr(a)) for a in args) + debug_function('RED', i + 'warning: ' + message) def speed(name): if debug_function and enable_speed: now = time.time() i = ' ' * _debug_indent - debug_function(SPEED, i + 'speed: ' + '%s %s' % (name, now - _start_time)) + debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time)) -def print_to_stdout(level, str_out): - """ The default debug function """ - if level == NOTICE: - col = Fore.GREEN - elif level == WARNING: - col = Fore.RED - else: - col = Fore.YELLOW +def print_to_stdout(color, str_out): + """ + The default debug function that prints to standard out. + + :param str color: A string that is an attribute of ``colorama.Fore``. + """ + col = getattr(Fore, color) + _lazy_colorama_init() if not is_py3: str_out = str_out.encode(encoding, 'replace') print(col + str_out + Fore.RESET) diff --git a/pythonFiles/release/jedi/evaluate/__init__.py b/pythonFiles/release/jedi/evaluate/__init__.py index a959d05f1835..20461071abdb 100755 --- a/pythonFiles/release/jedi/evaluate/__init__.py +++ b/pythonFiles/release/jedi/evaluate/__init__.py @@ -12,29 +12,31 @@ * The programmer is not a total dick, e.g. like `this `_ :-) -The actual algorithm is based on a principle called lazy evaluation. If you -don't know about it, google it. That said, the typical entry point for static -analysis is calling ``eval_statement``. There's separate logic for -autocompletion in the API, the evaluator is all about evaluating an expression. +The actual algorithm is based on a principle called lazy evaluation. That +said, the typical entry point for static analysis is calling +``eval_expr_stmt``. There's separate logic for autocompletion in the API, the +evaluator is all about evaluating an expression. -Now you need to understand what follows after ``eval_statement``. Let's +TODO this paragraph is not what jedi does anymore. + +Now you need to understand what follows after ``eval_expr_stmt``. Let's make an example:: import datetime datetime.date.toda# <-- cursor here First of all, this module doesn't care about completion. It really just cares -about ``datetime.date``. At the end of the procedure ``eval_statement`` will +about ``datetime.date``. At the end of the procedure ``eval_expr_stmt`` will return the ``date`` class. To *visualize* this (simplified): -- ``Evaluator.eval_statement`` doesn't do much, because there's no assignment. -- ``Evaluator.eval_element`` cares for resolving the dotted path +- ``Evaluator.eval_expr_stmt`` doesn't do much, because there's no assignment. +- ``Context.eval_node`` cares for resolving the dotted path - ``Evaluator.find_types`` searches for global definitions of datetime, which it finds in the definition of an import, by scanning the syntax tree. - Using the import logic, the datetime module is found. -- Now ``find_types`` is called again by ``eval_element`` to find ``date`` +- Now ``find_types`` is called again by ``eval_node`` to find ``date`` inside the datetime module. Now what would happen if we wanted ``datetime.date.foo.bar``? Two more @@ -46,7 +48,7 @@ from foo import bar Date = bar.baz -Well... You get it. Just another ``eval_statement`` recursion. It's really +Well... You get it. Just another ``eval_expr_stmt`` recursion. It's really easy. Python can obviously get way more complicated then this. To understand tuple assignments, list comprehensions and everything else, a lot more code had to be written. @@ -60,320 +62,298 @@ that are not used are just being ignored. """ -import copy -from itertools import chain +import sys + +from parso.python import tree +import parso -from jedi.parser import tree from jedi import debug -from jedi.evaluate import representation as er +from jedi import parser_utils +from jedi.evaluate.utils import unite from jedi.evaluate import imports from jedi.evaluate import recursion -from jedi.evaluate import iterable -from jedi.evaluate.cache import memoize_default -from jedi.evaluate import stdlib -from jedi.evaluate import finder +from jedi.evaluate.cache import evaluator_function_cache from jedi.evaluate import compiled -from jedi.evaluate import precedence -from jedi.evaluate import param from jedi.evaluate import helpers +from jedi.evaluate.filters import TreeNameDefinition, ParamName +from jedi.evaluate.base_context import ContextualizedName, ContextualizedNode, \ + ContextSet, NO_CONTEXTS, iterate_contexts +from jedi.evaluate.context import ClassContext, FunctionContext, \ + AnonymousInstance, BoundMethod +from jedi.evaluate.context.iterable import CompForContext +from jedi.evaluate.syntax_tree import eval_trailer, eval_expr_stmt, \ + eval_node, check_tuple_assignments class Evaluator(object): - def __init__(self, grammar): + def __init__(self, grammar, project): self.grammar = grammar + self.latest_grammar = parso.load_grammar(version='3.6') self.memoize_cache = {} # for memoize decorators # To memorize modules -> equals `sys.modules`. self.modules = {} # like `sys.modules`. - self.compiled_cache = {} # see `compiled.create()` - self.recursion_detector = recursion.RecursionDetector() - self.execution_recursion_detector = recursion.ExecutionRecursionDetector() + self.compiled_cache = {} # see `evaluate.compiled.create()` + self.inferred_element_counts = {} + self.mixed_cache = {} # see `evaluate.compiled.mixed._create()` self.analysis = [] + self.dynamic_params_depth = 0 + self.is_analysis = False + self.python_version = sys.version_info[:2] + self.project = project + project.add_evaluator(self) - def wrap(self, element): - if isinstance(element, tree.Class): - return er.Class(self, element) - elif isinstance(element, tree.Function): - if isinstance(element, tree.Lambda): - return er.LambdaWrapper(self, element) - else: - return er.Function(self, element) - elif isinstance(element, (tree.Module)) \ - and not isinstance(element, er.ModuleWrapper): - return er.ModuleWrapper(self, element) - else: - return element - - def find_types(self, scope, name_str, position=None, search_global=False, - is_goto=False): - """ - This is the search function. The most important part to debug. - `remove_statements` and `filter_statements` really are the core part of - this completion. + self.reset_recursion_limitations() - :param position: Position of the last statement -> tuple of line, column - :return: List of Names. Their parents are the types. - """ - f = finder.NameFinder(self, scope, name_str, position) - scopes = f.scopes(search_global) - if is_goto: - return f.filter_name(scopes) - return f.find(scopes, search_global) - - @memoize_default(default=[], evaluator_is_first_arg=True) - @recursion.recursion_decorator - @debug.increase_indent - def eval_statement(self, stmt, seek_name=None): - """ - The starting point of the completion. A statement always owns a call - list, which are the calls, that a statement does. In case multiple - names are defined in the statement, `seek_name` returns the result for - this name. + # Constants + self.BUILTINS = compiled.get_special_object(self, 'BUILTINS') - :param stmt: A `tree.ExprStmt`. - """ - debug.dbg('eval_statement %s (%s)', stmt, seek_name) - types = self.eval_element(stmt.get_rhs()) - - if seek_name: - types = finder.check_tuple_assignments(types, seek_name) - - first_operation = stmt.first_operation() - if first_operation not in ('=', None) and not isinstance(stmt, er.InstanceElement): # TODO don't check for this. - # `=` is always the last character in aug assignments -> -1 - operator = copy.copy(first_operation) - operator.value = operator.value[:-1] - name = str(stmt.get_defined_names()[0]) - parent = self.wrap(stmt.get_parent_scope()) - left = self.find_types(parent, name, stmt.start_pos, search_global=True) - if isinstance(stmt.get_parent_until(tree.ForStmt), tree.ForStmt): - # Iterate through result and add the values, that's possible - # only in for loops without clutter, because they are - # predictable. - for r in types: - left = precedence.calculate(self, left, operator, [r]) - types = left - else: - types = precedence.calculate(self, left, operator, types) - debug.dbg('eval_statement result %s', types) - return types - - @memoize_default(evaluator_is_first_arg=True) - def eval_element(self, element): - if isinstance(element, iterable.AlreadyEvaluated): - return list(element) - elif isinstance(element, iterable.MergedNodes): - return iterable.unite(self.eval_element(e) for e in element) - - debug.dbg('eval_element %s@%s', element, element.start_pos) - if isinstance(element, (tree.Name, tree.Literal)) or tree.is_node(element, 'atom'): - return self._eval_atom(element) - elif isinstance(element, tree.Keyword): - # For False/True/None - if element.value in ('False', 'True', 'None'): - return [compiled.builtin.get_by_name(element.value)] + def reset_recursion_limitations(self): + self.recursion_detector = recursion.RecursionDetector() + self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self) + + def eval_element(self, context, element): + if isinstance(context, CompForContext): + return eval_node(context, element) + + if_stmt = element + while if_stmt is not None: + if_stmt = if_stmt.parent + if if_stmt.type in ('if_stmt', 'for_stmt'): + break + if parser_utils.is_scope(if_stmt): + if_stmt = None + break + predefined_if_name_dict = context.predefined_names.get(if_stmt) + if predefined_if_name_dict is None and if_stmt and if_stmt.type == 'if_stmt': + if_stmt_test = if_stmt.children[1] + name_dicts = [{}] + # If we already did a check, we don't want to do it again -> If + # context.predefined_names is filled, we stop. + # We don't want to check the if stmt itself, it's just about + # the content. + if element.start_pos > if_stmt_test.end_pos: + # Now we need to check if the names in the if_stmt match the + # names in the suite. + if_names = helpers.get_names_of_node(if_stmt_test) + element_names = helpers.get_names_of_node(element) + str_element_names = [e.value for e in element_names] + if any(i.value in str_element_names for i in if_names): + for if_name in if_names: + definitions = self.goto_definitions(context, if_name) + # Every name that has multiple different definitions + # causes the complexity to rise. The complexity should + # never fall below 1. + if len(definitions) > 1: + if len(name_dicts) * len(definitions) > 16: + debug.dbg('Too many options for if branch evaluation %s.', if_stmt) + # There's only a certain amount of branches + # Jedi can evaluate, otherwise it will take to + # long. + name_dicts = [{}] + break + + original_name_dicts = list(name_dicts) + name_dicts = [] + for definition in definitions: + new_name_dicts = list(original_name_dicts) + for i, name_dict in enumerate(new_name_dicts): + new_name_dicts[i] = name_dict.copy() + new_name_dicts[i][if_name.value] = ContextSet(definition) + + name_dicts += new_name_dicts + else: + for name_dict in name_dicts: + name_dict[if_name.value] = definitions + if len(name_dicts) > 1: + result = ContextSet() + for name_dict in name_dicts: + with helpers.predefine_names(context, if_stmt, name_dict): + result |= eval_node(context, element) + return result else: - return [] - elif element.isinstance(tree.Lambda): - return [er.LambdaWrapper(self, element)] - elif element.isinstance(er.LambdaWrapper): - return [element] # TODO this is no real evaluation. - elif element.type == 'expr_stmt': - return self.eval_statement(element) - elif element.type == 'power': - types = self._eval_atom(element.children[0]) - for trailer in element.children[1:]: - if trailer == '**': # has a power operation. - raise NotImplementedError - types = self.eval_trailer(types, trailer) - - return types - elif element.type in ('testlist_star_expr', 'testlist',): - # The implicit tuple in statements. - return [iterable.ImplicitTuple(self, element)] - elif element.type in ('not_test', 'factor'): - types = self.eval_element(element.children[-1]) - for operator in element.children[:-1]: - types = list(precedence.factor_calculate(self, types, operator)) - return types - elif element.type == 'test': - # `x if foo else y` case. - return (self.eval_element(element.children[0]) + - self.eval_element(element.children[-1])) - elif element.type == 'operator': - # Must be an ellipsis, other operators are not evaluated. - return [] # Ignore for now. - elif element.type == 'dotted_name': - types = self._eval_atom(element.children[0]) - for next_name in element.children[2::2]: - types = list(chain.from_iterable(self.find_types(typ, next_name) - for typ in types)) - return types + return self._eval_element_if_evaluated(context, element) else: - return precedence.calculate_children(self, element.children) - - def _eval_atom(self, atom): - """ - Basically to process ``atom`` nodes. The parser sometimes doesn't - generate the node (because it has just one child). In that case an atom - might be a name or a literal as well. - """ - if isinstance(atom, tree.Name): - # This is the first global lookup. - stmt = atom.get_definition() - scope = stmt.get_parent_until(tree.IsScope, include_current=True) - if isinstance(stmt, tree.CompFor): - stmt = stmt.get_parent_until((tree.ClassOrFunc, tree.ExprStmt)) - if stmt.type != 'expr_stmt': - # We only need to adjust the start_pos for statements, because - # there the name cannot be used. - stmt = atom - return self.find_types(scope, atom, stmt.start_pos, search_global=True) - elif isinstance(atom, tree.Literal): - return [compiled.create(self, atom.eval())] - else: - c = atom.children - # Parentheses without commas are not tuples. - if c[0] == '(' and not len(c) == 2 \ - and not(tree.is_node(c[1], 'testlist_comp') - and len(c[1].children) > 1): - return self.eval_element(c[1]) - try: - comp_for = c[1].children[1] - except (IndexError, AttributeError): - pass + if predefined_if_name_dict: + return eval_node(context, element) else: - if isinstance(comp_for, tree.CompFor): - return [iterable.Comprehension.from_atom(self, atom)] - return [iterable.Array(self, atom)] - - def eval_trailer(self, types, trailer): - trailer_op, node = trailer.children[:2] - if node == ')': # `arglist` is optional. - node = () - new_types = [] - for typ in types: - debug.dbg('eval_trailer: %s in scope %s', trailer, typ) - if trailer_op == '.': - new_types += self.find_types(typ, node) - elif trailer_op == '(': - new_types += self.execute(typ, node, trailer) - elif trailer_op == '[': - try: - get = typ.get_index_types - except AttributeError: - debug.warning("TypeError: '%s' object is not subscriptable" - % typ) - else: - new_types += get(self, node) - return new_types + return self._eval_element_if_evaluated(context, element) - def execute_evaluated(self, obj, *args): + def _eval_element_if_evaluated(self, context, element): """ - Execute a function with already executed arguments. + TODO This function is temporary: Merge with eval_element. """ - args = [iterable.AlreadyEvaluated([arg]) for arg in args] - return self.execute(obj, args) - - @debug.increase_indent - def execute(self, obj, arguments=(), trailer=None): - if not isinstance(arguments, param.Arguments): - arguments = param.Arguments(self, arguments, trailer) - - if obj.isinstance(er.Function): - obj = obj.get_decorated_func() - - debug.dbg('execute: %s %s', obj, arguments) - try: - # Some stdlib functions like super(), namedtuple(), etc. have been - # hard-coded in Jedi to support them. - return stdlib.execute(self, obj, arguments) - except stdlib.NotInStdLib: - pass - - try: - func = obj.py__call__ - except AttributeError: - debug.warning("no execution possible %s", obj) - return [] - else: - types = func(self, arguments) - debug.dbg('execute result: %s in %s', types, obj) - return types - - def goto_definition(self, name): - def_ = name.get_definition() - if def_.type == 'expr_stmt' and name in def_.get_defined_names(): - return self.eval_statement(def_, name) - call = helpers.call_of_name(name) - return self.eval_element(call) - - def goto(self, name): - def resolve_implicit_imports(names): - for name in names: - if isinstance(name.parent, helpers.FakeImport): - # Those are implicit imports. - s = imports.ImportWrapper(self, name) - for n in s.follow(is_goto=True): - yield n - else: - yield name + parent = element + while parent is not None: + parent = parent.parent + predefined_if_name_dict = context.predefined_names.get(parent) + if predefined_if_name_dict is not None: + return eval_node(context, element) + return self._eval_element_cached(context, element) + + @evaluator_function_cache(default=NO_CONTEXTS) + def _eval_element_cached(self, context, element): + return eval_node(context, element) + + def goto_definitions(self, context, name): + def_ = name.get_definition(import_name_always=True) + if def_ is not None: + type_ = def_.type + if type_ == 'classdef': + return [ClassContext(self, context, name.parent)] + elif type_ == 'funcdef': + return [FunctionContext(self, context, name.parent)] + + if type_ == 'expr_stmt': + is_simple_name = name.parent.type not in ('power', 'trailer') + if is_simple_name: + return eval_expr_stmt(context, def_, name) + if type_ == 'for_stmt': + container_types = context.eval_node(def_.children[3]) + cn = ContextualizedNode(context, def_.children[3]) + for_types = iterate_contexts(container_types, cn) + c_node = ContextualizedName(context, name) + return check_tuple_assignments(self, c_node, for_types) + if type_ in ('import_from', 'import_name'): + return imports.infer_import(context, name) + + return helpers.evaluate_call_of_leaf(context, name) + + def goto(self, context, name): + definition = name.get_definition(import_name_always=True) + if definition is not None: + type_ = definition.type + if type_ == 'expr_stmt': + # Only take the parent, because if it's more complicated than just + # a name it's something you can "goto" again. + is_simple_name = name.parent.type not in ('power', 'trailer') + if is_simple_name: + return [TreeNameDefinition(context, name)] + elif type_ == 'param': + return [ParamName(context, name)] + elif type_ in ('funcdef', 'classdef'): + return [TreeNameDefinition(context, name)] + elif type_ in ('import_from', 'import_name'): + module_names = imports.infer_import(context, name, is_goto=True) + return module_names - stmt = name.get_definition() par = name.parent - if par.type == 'argument' and par.children[1] == '=' and par.children[0] == name: + node_type = par.type + if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name: # Named param goto. trailer = par.parent if trailer.type == 'arglist': trailer = trailer.parent if trailer.type != 'classdef': if trailer.type == 'decorator': - types = self.eval_element(trailer.children[1]) + context_set = context.eval_node(trailer.children[1]) else: i = trailer.parent.children.index(trailer) to_evaluate = trailer.parent.children[:i] - types = self.eval_element(to_evaluate[0]) + if to_evaluate[0] == 'await': + to_evaluate.pop(0) + context_set = context.eval_node(to_evaluate[0]) for trailer in to_evaluate[1:]: - types = self.eval_trailer(types, trailer) + context_set = eval_trailer(context, context_set, trailer) param_names = [] - for typ in types: + for context in context_set: try: - params = typ.params + get_param_names = context.get_param_names except AttributeError: pass else: - param_names += [param.name for param in params - if param.name.value == name.value] + for param_name in get_param_names(): + if param_name.string_name == name.value: + param_names.append(param_name) return param_names - elif isinstance(par, tree.ExprStmt) and name in par.get_defined_names(): - # Only take the parent, because if it's more complicated than just - # a name it's something you can "goto" again. - return [name] - elif isinstance(par, (tree.Param, tree.Function, tree.Class)) and par.name is name: - return [name] - elif isinstance(stmt, tree.Import): - modules = imports.ImportWrapper(self, name).follow(is_goto=True) - return list(resolve_implicit_imports(modules)) - elif par.type == 'dotted_name': # Is a decorator. + elif node_type == 'dotted_name': # Is a decorator. index = par.children.index(name) if index > 0: new_dotted = helpers.deep_ast_copy(par) new_dotted.children[index - 1:] = [] - types = self.eval_element(new_dotted) - return resolve_implicit_imports(iterable.unite( - self.find_types(typ, name, is_goto=True) for typ in types - )) - - scope = name.get_parent_scope() - if tree.is_node(name.parent, 'trailer'): - call = helpers.call_of_name(name, cut_own_trailer=True) - types = self.eval_element(call) - return resolve_implicit_imports(iterable.unite( - self.find_types(typ, name, is_goto=True) for typ in types - )) + values = context.eval_node(new_dotted) + return unite( + value.py__getattribute__(name, name_context=context, is_goto=True) + for value in values + ) + + if node_type == 'trailer' and par.children[0] == '.': + values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True) + return unite( + value.py__getattribute__(name, name_context=context, is_goto=True) + for value in values + ) else: - if stmt.type != 'expr_stmt': - # We only need to adjust the start_pos for statements, because - # there the name cannot be used. + stmt = tree.search_ancestor( + name, 'expr_stmt', 'lambdef' + ) or name + if stmt.type == 'lambdef': stmt = name - return self.find_types(scope, name, stmt.start_pos, - search_global=True, is_goto=True) + return context.py__getattribute__( + name, + position=stmt.start_pos, + search_global=True, is_goto=True + ) + + def create_context(self, base_context, node, node_is_context=False, node_is_object=False): + def parent_scope(node): + while True: + node = node.parent + + if parser_utils.is_scope(node): + return node + elif node.type in ('argument', 'testlist_comp'): + if node.children[1].type == 'comp_for': + return node.children[1] + elif node.type == 'dictorsetmaker': + for n in node.children[1:4]: + # In dictionaries it can be pretty much anything. + if n.type == 'comp_for': + return n + + def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_object=False): + if scope_node == base_node: + return base_context + + is_funcdef = scope_node.type in ('funcdef', 'lambdef') + parent_scope = parser_utils.get_parent_scope(scope_node) + parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef) + + if is_funcdef: + if isinstance(parent_context, AnonymousInstance): + func = BoundMethod( + self, parent_context, parent_context.class_context, + parent_context.parent_context, scope_node + ) + else: + func = FunctionContext( + self, + parent_context, + scope_node + ) + if is_nested and not node_is_object: + return func.get_function_execution() + return func + elif scope_node.type == 'classdef': + class_context = ClassContext(self, parent_context, scope_node) + if child_is_funcdef: + # anonymous instance + return AnonymousInstance(self, parent_context, class_context) + else: + return class_context + elif scope_node.type == 'comp_for': + if node.start_pos >= scope_node.children[-1].start_pos: + return parent_context + return CompForContext.from_comp_for(parent_context, scope_node) + raise Exception("There's a scope that was not managed.") + + base_node = base_context.tree_node + + if node_is_context and parser_utils.is_scope(node): + scope_node = node + else: + if node.parent.type in ('funcdef', 'classdef') and node.parent.name == node: + # When we're on class/function names/leafs that define the + # object itself and not its contents. + node = node.parent + scope_node = parent_scope(node) + return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object) diff --git a/pythonFiles/release/jedi/evaluate/analysis.py b/pythonFiles/release/jedi/evaluate/analysis.py index d4a411f42b68..c825e5fef9e9 100755 --- a/pythonFiles/release/jedi/evaluate/analysis.py +++ b/pythonFiles/release/jedi/evaluate/analysis.py @@ -2,7 +2,7 @@ Module for statical analysis. """ from jedi import debug -from jedi.parser import tree +from parso.python import tree from jedi.evaluate.compiled import CompiledObject @@ -10,14 +10,18 @@ 'attribute-error': (1, AttributeError, 'Potential AttributeError.'), 'name-error': (2, NameError, 'Potential NameError.'), 'import-error': (3, ImportError, 'Potential ImportError.'), - 'type-error-generator': (4, TypeError, "TypeError: 'generator' object is not subscriptable."), - 'type-error-too-many-arguments': (5, TypeError, None), - 'type-error-too-few-arguments': (6, TypeError, None), - 'type-error-keyword-argument': (7, TypeError, None), - 'type-error-multiple-values': (8, TypeError, None), - 'type-error-star-star': (9, TypeError, None), - 'type-error-star': (10, TypeError, None), - 'type-error-operation': (11, TypeError, None), + 'type-error-too-many-arguments': (4, TypeError, None), + 'type-error-too-few-arguments': (5, TypeError, None), + 'type-error-keyword-argument': (6, TypeError, None), + 'type-error-multiple-values': (7, TypeError, None), + 'type-error-star-star': (8, TypeError, None), + 'type-error-star': (9, TypeError, None), + 'type-error-operation': (10, TypeError, None), + 'type-error-not-iterable': (11, TypeError, None), + 'type-error-isinstance': (12, TypeError, None), + 'type-error-not-subscriptable': (13, TypeError, None), + 'value-error-too-many-values': (14, ValueError, None), + 'value-error-too-few-values': (15, ValueError, None), } @@ -52,8 +56,8 @@ def __str__(self): return self.__unicode__() def __eq__(self, other): - return (self.path == other.path and self.name == other.name - and self._start_pos == other._start_pos) + return (self.path == other.path and self.name == other.name and + self._start_pos == other._start_pos) def __ne__(self, other): return not self.__eq__(other) @@ -71,61 +75,61 @@ class Warning(Error): pass -def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None): - from jedi.evaluate.iterable import MergedNodes - while isinstance(jedi_obj, MergedNodes): - if len(jedi_obj) != 1: - # TODO is this kosher? - return - jedi_obj = list(jedi_obj)[0] - - exception = CODES[name][1] - if _check_for_exception_catch(evaluator, jedi_obj, exception, payload): +def add(node_context, error_name, node, message=None, typ=Error, payload=None): + exception = CODES[error_name][1] + if _check_for_exception_catch(node_context, node, exception, payload): return - module_path = jedi_obj.get_parent_until().path - instance = typ(name, module_path, jedi_obj.start_pos, message) - debug.warning(str(instance)) - evaluator.analysis.append(instance) + # TODO this path is probably not right + module_context = node_context.get_root_context() + module_path = module_context.py__file__() + instance = typ(error_name, module_path, node.start_pos, message) + debug.warning(str(instance), format=False) + node_context.evaluator.analysis.append(instance) def _check_for_setattr(instance): """ Check if there's any setattr method inside an instance. If so, return True. """ - module = instance.get_parent_until() + from jedi.evaluate.context import ModuleContext + module = instance.get_root_context() + if not isinstance(module, ModuleContext): + return False + + node = module.tree_node try: - stmts = module.used_names['setattr'] + stmts = node.get_used_names()['setattr'] except KeyError: return False - return any(instance.start_pos < stmt.start_pos < instance.end_pos + return any(node.start_pos < stmt.start_pos < node.end_pos for stmt in stmts) -def add_attribute_error(evaluator, scope, name): - message = ('AttributeError: %s has no attribute %s.' % (scope, name)) - from jedi.evaluate.representation import Instance +def add_attribute_error(name_context, lookup_context, name): + message = ('AttributeError: %s has no attribute %s.' % (lookup_context, name)) + from jedi.evaluate.context.instance import AbstractInstanceContext, CompiledInstanceName # Check for __getattr__/__getattribute__ existance and issue a warning # instead of an error, if that happens. - if isinstance(scope, Instance): - typ = Warning - try: - scope.get_subscope_by_name('__getattr__') - except KeyError: - try: - scope.get_subscope_by_name('__getattribute__') - except KeyError: - if not _check_for_setattr(scope): - typ = Error - else: - typ = Error + typ = Error + if isinstance(lookup_context, AbstractInstanceContext): + slot_names = lookup_context.get_function_slot_names('__getattr__') + \ + lookup_context.get_function_slot_names('__getattribute__') + for n in slot_names: + if isinstance(name, CompiledInstanceName) and \ + n.parent_context.obj == object: + typ = Warning + break + + if _check_for_setattr(lookup_context): + typ = Warning - payload = scope, name - add(evaluator, 'attribute-error', name, message, typ, payload) + payload = lookup_context, name + add(name_context, 'attribute-error', name, message, typ, payload) -def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None): +def _check_for_exception_catch(node_context, jedi_name, exception, payload=None): """ Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and doesn't count as an error (if equal to `exception`). @@ -146,157 +150,65 @@ def check_try_for_except(obj, exception): colon = next(iterator) suite = next(iterator) if branch_type == 'try' \ - and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos): + and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos): return False - for node in obj.except_clauses(): + for node in obj.get_except_clause_tests(): if node is None: return True # An exception block that catches everything. else: - except_classes = evaluator.eval_element(node) + except_classes = node_context.eval_node(node) for cls in except_classes: - from jedi.evaluate import iterable - if isinstance(cls, iterable.Array) and cls.type == 'tuple': + from jedi.evaluate.context import iterable + if isinstance(cls, iterable.AbstractIterable) and \ + cls.array_type == 'tuple': # multiple exceptions - for c in cls.values(): - if check_match(c, exception): - return True + for lazy_context in cls.py__iter__(): + for typ in lazy_context.infer(): + if check_match(typ, exception): + return True else: if check_match(cls, exception): return True def check_hasattr(node, suite): try: - assert suite.start_pos <= jedi_obj.start_pos < suite.end_pos - assert node.type == 'power' + assert suite.start_pos <= jedi_name.start_pos < suite.end_pos + assert node.type in ('power', 'atom_expr') base = node.children[0] assert base.type == 'name' and base.value == 'hasattr' trailer = node.children[1] assert trailer.type == 'trailer' arglist = trailer.children[1] assert arglist.type == 'arglist' - from jedi.evaluate.param import Arguments - args = list(Arguments(evaluator, arglist).unpack()) + from jedi.evaluate.arguments import TreeArguments + args = list(TreeArguments(node_context.evaluator, node_context, arglist).unpack()) # Arguments should be very simple assert len(args) == 2 # Check name - key, values = args[1] - assert len(values) == 1 - names = evaluator.eval_element(values[0]) + key, lazy_context = args[1] + names = list(lazy_context.infer()) assert len(names) == 1 and isinstance(names[0], CompiledObject) - assert names[0].obj == str(payload[1]) + assert names[0].obj == payload[1].value # Check objects - key, values = args[0] - assert len(values) == 1 - objects = evaluator.eval_element(values[0]) + key, lazy_context = args[0] + objects = lazy_context.infer() return payload[0] in objects except AssertionError: return False - obj = jedi_obj - while obj is not None and not obj.isinstance(tree.Function, tree.Class): - if obj.isinstance(tree.Flow): + obj = jedi_name + while obj is not None and not isinstance(obj, (tree.Function, tree.Class)): + if isinstance(obj, tree.Flow): # try/except catch check - if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception): + if obj.type == 'try_stmt' and check_try_for_except(obj, exception): return True # hasattr check - if exception == AttributeError and obj.isinstance(tree.IfStmt, tree.WhileStmt): + if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'): if check_hasattr(obj.children[1], obj.children[3]): return True obj = obj.parent return False - - -def get_module_statements(module): - """ - Returns the statements used in a module. All these statements should be - evaluated to check for potential exceptions. - """ - def check_children(node): - try: - children = node.children - except AttributeError: - return [] - else: - nodes = [] - for child in children: - nodes += check_children(child) - if child.type == 'trailer': - c = child.children - if c[0] == '(' and c[1] != ')': - if c[1].type != 'arglist': - if c[1].type == 'argument': - nodes.append(c[1].children[-1]) - else: - nodes.append(c[1]) - else: - for argument in c[1].children: - if argument.type == 'argument': - nodes.append(argument.children[-1]) - elif argument.type != 'operator': - nodes.append(argument) - return nodes - - def add_nodes(nodes): - new = set() - for node in nodes: - if isinstance(node, tree.Flow): - children = node.children - if node.type == 'for_stmt': - children = children[2:] # Don't want to include the names. - # Pick the suite/simple_stmt. - new |= add_nodes(children) - elif node.type in ('simple_stmt', 'suite'): - new |= add_nodes(node.children) - elif node.type in ('return_stmt', 'yield_expr'): - try: - new.add(node.children[1]) - except IndexError: - pass - elif node.type not in ('whitespace', 'operator', 'keyword', - 'parameters', 'decorated', 'except_clause') \ - and not isinstance(node, (tree.ClassOrFunc, tree.Import)): - new.add(node) - - try: - children = node.children - except AttributeError: - pass - else: - for next_node in children: - new.update(check_children(node)) - if next_node.type != 'keyword' and node.type != 'expr_stmt': - new.add(node) - return new - - nodes = set() - import_names = set() - decorated_funcs = [] - for scope in module.walk(): - for imp in set(scope.imports): - import_names |= set(imp.get_defined_names()) - if imp.is_nested(): - import_names |= set(path[-1] for path in imp.paths()) - - children = scope.children - if isinstance(scope, tree.ClassOrFunc): - children = children[2:] # We don't want to include the class name. - nodes |= add_nodes(children) - - for flow in scope.flows: - if flow.type == 'for_stmt': - nodes.add(flow.children[3]) - elif flow.type == 'try_stmt': - nodes.update(e for e in flow.except_clauses() if e is not None) - - try: - decorators = scope.get_decorators() - except AttributeError: - pass - else: - if decorators: - decorated_funcs.append(scope) - return nodes, import_names, decorated_funcs diff --git a/pythonFiles/release/jedi/evaluate/arguments.py b/pythonFiles/release/jedi/evaluate/arguments.py new file mode 100644 index 000000000000..32b9238c6f4d --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/arguments.py @@ -0,0 +1,245 @@ +from parso.python import tree + +from jedi._compatibility import zip_longest +from jedi import debug +from jedi.evaluate import analysis +from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \ + LazyTreeContext, get_merged_lazy_context +from jedi.evaluate.filters import ParamName +from jedi.evaluate.base_context import NO_CONTEXTS +from jedi.evaluate.context import iterable +from jedi.evaluate.param import get_params, ExecutedParam + +def try_iter_content(types, depth=0): + """Helper method for static analysis.""" + if depth > 10: + # It's possible that a loop has references on itself (especially with + # CompiledObject). Therefore don't loop infinitely. + return + + for typ in types: + try: + f = typ.py__iter__ + except AttributeError: + pass + else: + for lazy_context in f(): + try_iter_content(lazy_context.infer(), depth + 1) + + +class AbstractArguments(object): + context = None + + def eval_argument_clinic(self, parameters): + """Uses a list with argument clinic information (see PEP 436).""" + iterator = self.unpack() + for i, (name, optional, allow_kwargs) in enumerate(parameters): + key, argument = next(iterator, (None, None)) + if key is not None: + raise NotImplementedError + if argument is None and not optional: + debug.warning('TypeError: %s expected at least %s arguments, got %s', + name, len(parameters), i) + raise ValueError + values = NO_CONTEXTS if argument is None else argument.infer() + + if not values and not optional: + # For the stdlib we always want values. If we don't get them, + # that's ok, maybe something is too hard to resolve, however, + # we will not proceed with the evaluation of that function. + debug.warning('argument_clinic "%s" not resolvable.', name) + raise ValueError + yield values + + def eval_all(self, funcdef=None): + """ + Evaluates all arguments as a support for static analysis + (normally Jedi). + """ + for key, lazy_context in self.unpack(): + types = lazy_context.infer() + try_iter_content(types) + + def get_calling_nodes(self): + raise NotImplementedError + + def unpack(self, funcdef=None): + raise NotImplementedError + + def get_params(self, execution_context): + return get_params(execution_context, self) + + +class AnonymousArguments(AbstractArguments): + def get_params(self, execution_context): + from jedi.evaluate.dynamic import search_params + return search_params( + execution_context.evaluator, + execution_context, + execution_context.tree_node + ) + + +class TreeArguments(AbstractArguments): + def __init__(self, evaluator, context, argument_node, trailer=None): + """ + The argument_node is either a parser node or a list of evaluated + objects. Those evaluated objects may be lists of evaluated objects + themselves (one list for the first argument, one for the second, etc). + + :param argument_node: May be an argument_node or a list of nodes. + """ + self.argument_node = argument_node + self.context = context + self._evaluator = evaluator + self.trailer = trailer # Can be None, e.g. in a class definition. + + def _split(self): + if isinstance(self.argument_node, (tuple, list)): + for el in self.argument_node: + yield 0, el + else: + if not (self.argument_node.type == 'arglist' or ( + # in python 3.5 **arg is an argument, not arglist + (self.argument_node.type == 'argument') and + self.argument_node.children[0] in ('*', '**'))): + yield 0, self.argument_node + return + + iterator = iter(self.argument_node.children) + for child in iterator: + if child == ',': + continue + elif child in ('*', '**'): + yield len(child.value), next(iterator) + elif child.type == 'argument' and \ + child.children[0] in ('*', '**'): + assert len(child.children) == 2 + yield len(child.children[0].value), child.children[1] + else: + yield 0, child + + def unpack(self, funcdef=None): + named_args = [] + for star_count, el in self._split(): + if star_count == 1: + arrays = self.context.eval_node(el) + iterators = [_iterate_star_args(self.context, a, el, funcdef) + for a in arrays] + iterators = list(iterators) + for values in list(zip_longest(*iterators)): + # TODO zip_longest yields None, that means this would raise + # an exception? + yield None, get_merged_lazy_context( + [v for v in values if v is not None] + ) + elif star_count == 2: + arrays = self._evaluator.eval_element(self.context, el) + for dct in arrays: + for key, values in _star_star_dict(self.context, dct, el, funcdef): + yield key, values + else: + if el.type == 'argument': + c = el.children + if len(c) == 3: # Keyword argument. + named_args.append((c[0].value, LazyTreeContext(self.context, c[2]),)) + else: # Generator comprehension. + # Include the brackets with the parent. + comp = iterable.GeneratorComprehension( + self._evaluator, self.context, self.argument_node.parent) + yield None, LazyKnownContext(comp) + else: + yield None, LazyTreeContext(self.context, el) + + # Reordering var_args is necessary, because star args sometimes appear + # after named argument, but in the actual order it's prepended. + for named_arg in named_args: + yield named_arg + + def as_tree_tuple_objects(self): + for star_count, argument in self._split(): + if argument.type == 'argument': + argument, default = argument.children[::2] + else: + default = None + yield argument, default, star_count + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.argument_node) + + def get_calling_nodes(self): + from jedi.evaluate.dynamic import MergedExecutedParams + old_arguments_list = [] + arguments = self + + while arguments not in old_arguments_list: + if not isinstance(arguments, TreeArguments): + break + + old_arguments_list.append(arguments) + for name, default, star_count in reversed(list(arguments.as_tree_tuple_objects())): + if not star_count or not isinstance(name, tree.Name): + continue + + names = self._evaluator.goto(arguments.context, name) + if len(names) != 1: + break + if not isinstance(names[0], ParamName): + break + param = names[0].get_param() + if isinstance(param, MergedExecutedParams): + # For dynamic searches we don't even want to see errors. + return [] + if not isinstance(param, ExecutedParam): + break + if param.var_args is None: + break + arguments = param.var_args + break + + return [arguments.argument_node or arguments.trailer] + + +class ValuesArguments(AbstractArguments): + def __init__(self, values_list): + self._values_list = values_list + + def unpack(self, funcdef=None): + for values in self._values_list: + yield None, LazyKnownContexts(values) + + def get_calling_nodes(self): + return [] + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._values_list) + + +def _iterate_star_args(context, array, input_node, funcdef=None): + try: + iter_ = array.py__iter__ + except AttributeError: + if funcdef is not None: + # TODO this funcdef should not be needed. + m = "TypeError: %s() argument after * must be a sequence, not %s" \ + % (funcdef.name.value, array) + analysis.add(context, 'type-error-star', input_node, message=m) + else: + for lazy_context in iter_(): + yield lazy_context + + +def _star_star_dict(context, array, input_node, funcdef): + from jedi.evaluate.context.instance import CompiledInstance + if isinstance(array, CompiledInstance) and array.name.string_name == 'dict': + # For now ignore this case. In the future add proper iterators and just + # make one call without crazy isinstance checks. + return {} + elif isinstance(array, iterable.AbstractIterable) and array.array_type == 'dict': + return array.exact_key_items() + else: + if funcdef is not None: + m = "TypeError: %s argument after ** must be a mapping, not %s" \ + % (funcdef.name.value, array) + analysis.add(context, 'type-error-star-star', input_node, message=m) + return {} diff --git a/pythonFiles/release/jedi/evaluate/base_context.py b/pythonFiles/release/jedi/evaluate/base_context.py new file mode 100644 index 000000000000..693a99aae7aa --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/base_context.py @@ -0,0 +1,260 @@ +from parso.python.tree import ExprStmt, CompFor + +from jedi import debug +from jedi._compatibility import Python3Method, zip_longest, unicode +from jedi.parser_utils import clean_scope_docstring, get_doc_with_call_signature +from jedi.common import BaseContextSet, BaseContext + + +class Context(BaseContext): + """ + Should be defined, otherwise the API returns empty types. + """ + + predefined_names = {} + tree_node = None + """ + To be defined by subclasses. + """ + + @property + def api_type(self): + # By default just lower name of the class. Can and should be + # overwritten. + return self.__class__.__name__.lower() + + @debug.increase_indent + def execute(self, arguments): + """ + In contrast to py__call__ this function is always available. + + `hasattr(x, py__call__)` can also be checked to see if a context is + executable. + """ + if self.evaluator.is_analysis: + arguments.eval_all() + + debug.dbg('execute: %s %s', self, arguments) + from jedi.evaluate import stdlib + try: + # Some stdlib functions like super(), namedtuple(), etc. have been + # hard-coded in Jedi to support them. + return stdlib.execute(self.evaluator, self, arguments) + except stdlib.NotInStdLib: + pass + + try: + func = self.py__call__ + except AttributeError: + debug.warning("no execution possible %s", self) + return NO_CONTEXTS + else: + context_set = func(arguments) + debug.dbg('execute result: %s in %s', context_set, self) + return context_set + + return self.evaluator.execute(self, arguments) + + def execute_evaluated(self, *value_list): + """ + Execute a function with already executed arguments. + """ + from jedi.evaluate.arguments import ValuesArguments + arguments = ValuesArguments([ContextSet(value) for value in value_list]) + return self.execute(arguments) + + def iterate(self, contextualized_node=None): + debug.dbg('iterate') + try: + iter_method = self.py__iter__ + except AttributeError: + if contextualized_node is not None: + from jedi.evaluate import analysis + analysis.add( + contextualized_node.context, + 'type-error-not-iterable', + contextualized_node.node, + message="TypeError: '%s' object is not iterable" % self) + return iter([]) + else: + return iter_method() + + def get_item(self, index_contexts, contextualized_node): + from jedi.evaluate.compiled import CompiledObject + from jedi.evaluate.context.iterable import Slice, AbstractIterable + result = ContextSet() + + for index in index_contexts: + if isinstance(index, (CompiledObject, Slice)): + index = index.obj + + if type(index) not in (float, int, str, unicode, slice, type(Ellipsis)): + # If the index is not clearly defined, we have to get all the + # possiblities. + if isinstance(self, AbstractIterable) and self.array_type == 'dict': + result |= self.dict_values() + else: + result |= iterate_contexts(ContextSet(self)) + continue + + # The actual getitem call. + try: + getitem = self.py__getitem__ + except AttributeError: + from jedi.evaluate import analysis + # TODO this context is probably not right. + analysis.add( + contextualized_node.context, + 'type-error-not-subscriptable', + contextualized_node.node, + message="TypeError: '%s' object is not subscriptable" % self + ) + else: + try: + result |= getitem(index) + except IndexError: + result |= iterate_contexts(ContextSet(self)) + except KeyError: + # Must be a dict. Lists don't raise KeyErrors. + result |= self.dict_values() + return result + + def eval_node(self, node): + return self.evaluator.eval_element(self, node) + + @Python3Method + def py__getattribute__(self, name_or_str, name_context=None, position=None, + search_global=False, is_goto=False, + analysis_errors=True): + """ + :param position: Position of the last statement -> tuple of line, column + """ + if name_context is None: + name_context = self + from jedi.evaluate import finder + f = finder.NameFinder(self.evaluator, self, name_context, name_or_str, + position, analysis_errors=analysis_errors) + filters = f.get_filters(search_global) + if is_goto: + return f.filter_name(filters) + return f.find(filters, attribute_lookup=not search_global) + + return self.evaluator.find_types( + self, name_or_str, name_context, position, search_global, is_goto, + analysis_errors) + + def create_context(self, node, node_is_context=False, node_is_object=False): + return self.evaluator.create_context(self, node, node_is_context, node_is_object) + + def is_class(self): + return False + + def py__bool__(self): + """ + Since Wrapper is a super class for classes, functions and modules, + the return value will always be true. + """ + return True + + def py__doc__(self, include_call_signature=False): + try: + self.tree_node.get_doc_node + except AttributeError: + return '' + else: + if include_call_signature: + return get_doc_with_call_signature(self.tree_node) + else: + return clean_scope_docstring(self.tree_node) + return None + + +def iterate_contexts(contexts, contextualized_node=None): + """ + Calls `iterate`, on all contexts but ignores the ordering and just returns + all contexts that the iterate functions yield. + """ + return ContextSet.from_sets( + lazy_context.infer() + for lazy_context in contexts.iterate(contextualized_node) + ) + + +class TreeContext(Context): + def __init__(self, evaluator, parent_context=None): + super(TreeContext, self).__init__(evaluator, parent_context) + self.predefined_names = {} + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.tree_node) + + +class ContextualizedNode(object): + def __init__(self, context, node): + self.context = context + self.node = node + + def get_root_context(self): + return self.context.get_root_context() + + def infer(self): + return self.context.eval_node(self.node) + + +class ContextualizedName(ContextualizedNode): + # TODO merge with TreeNameDefinition?! + @property + def name(self): + return self.node + + def assignment_indexes(self): + """ + Returns an array of tuple(int, node) of the indexes that are used in + tuple assignments. + + For example if the name is ``y`` in the following code:: + + x, (y, z) = 2, '' + + would result in ``[(1, xyz_node), (0, yz_node)]``. + """ + indexes = [] + node = self.node.parent + compare = self.node + while node is not None: + if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'): + for i, child in enumerate(node.children): + if child == compare: + indexes.insert(0, (int(i / 2), node)) + break + else: + raise LookupError("Couldn't find the assignment.") + elif isinstance(node, (ExprStmt, CompFor)): + break + + compare = node + node = node.parent + return indexes + + +class ContextSet(BaseContextSet): + def py__class__(self): + return ContextSet.from_iterable(c.py__class__() for c in self._set) + + def iterate(self, contextualized_node=None): + from jedi.evaluate.lazy_context import get_merged_lazy_context + type_iters = [c.iterate(contextualized_node) for c in self._set] + for lazy_contexts in zip_longest(*type_iters): + yield get_merged_lazy_context( + [l for l in lazy_contexts if l is not None] + ) + + +NO_CONTEXTS = ContextSet() + + +def iterator_to_context_set(func): + def wrapper(*args, **kwargs): + return ContextSet.from_iterable(func(*args, **kwargs)) + + return wrapper diff --git a/pythonFiles/release/jedi/evaluate/cache.py b/pythonFiles/release/jedi/evaluate/cache.py index b44f812accd2..b7c7cd7e979f 100755 --- a/pythonFiles/release/jedi/evaluate/cache.py +++ b/pythonFiles/release/jedi/evaluate/cache.py @@ -1,15 +1,13 @@ """ -- the popular ``memoize_default`` works like a typical memoize and returns the +- the popular ``_memoize_default`` works like a typical memoize and returns the default otherwise. -- ``CachedMetaClass`` uses ``memoize_default`` to do the same with classes. +- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes. """ -import inspect +_NO_DEFAULT = object() -NO_DEFAULT = object() - -def memoize_default(default=NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False): +def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False): """ This is a typical memoization decorator, BUT there is one difference: To prevent recursion it sets defaults. @@ -19,12 +17,13 @@ def memoize_default(default=NO_DEFAULT, evaluator_is_first_arg=False, second_arg """ def func(function): def wrapper(obj, *args, **kwargs): + # TODO These checks are kind of ugly and slow. if evaluator_is_first_arg: cache = obj.memoize_cache - elif second_arg_is_evaluator: # needed for meta classes - cache = args[0].memoize_cache + elif second_arg_is_evaluator: + cache = args[0].memoize_cache # needed for meta classes else: - cache = obj._evaluator.memoize_cache + cache = obj.evaluator.memoize_cache try: memo = cache[function] @@ -36,23 +35,43 @@ def wrapper(obj, *args, **kwargs): if key in memo: return memo[key] else: - if default is not NO_DEFAULT: + if default is not _NO_DEFAULT: memo[key] = default rv = function(obj, *args, **kwargs) - if inspect.isgenerator(rv): - rv = list(rv) memo[key] = rv return rv return wrapper + return func +def evaluator_function_cache(default=_NO_DEFAULT): + def decorator(func): + return _memoize_default(default=default, evaluator_is_first_arg=True)(func) + + return decorator + + +def evaluator_method_cache(default=_NO_DEFAULT): + def decorator(func): + return _memoize_default(default=default)(func) + + return decorator + + +def _memoize_meta_class(): + def decorator(call): + return _memoize_default(second_arg_is_evaluator=True)(call) + + return decorator + + class CachedMetaClass(type): """ This is basically almost the same than the decorator above, it just caches class initializations. Either you do it this way or with decorators, but with decorators you lose class access (isinstance, etc). """ - @memoize_default(None, second_arg_is_evaluator=True) + @_memoize_meta_class() def __call__(self, *args, **kwargs): return super(CachedMetaClass, self).__call__(*args, **kwargs) diff --git a/pythonFiles/release/jedi/evaluate/compiled/__init__.py b/pythonFiles/release/jedi/evaluate/compiled/__init__.py index 67d3f37119d3..f9f2e0781e13 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/__init__.py +++ b/pythonFiles/release/jedi/evaluate/compiled/__init__.py @@ -5,14 +5,17 @@ import re import sys import os +import types from functools import partial -from jedi._compatibility import builtins as _builtins, unicode +from jedi._compatibility import builtins as _builtins, unicode, py_version from jedi import debug from jedi.cache import underscore_memoization, memoize_method -from jedi.evaluate.sys_path import get_sys_path -from jedi.parser.tree import Param, Base, Operator, zero_position_modifier -from jedi.evaluate.helpers import FakeName +from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \ + ContextNameMixin +from jedi.evaluate.base_context import Context, ContextSet +from jedi.evaluate.lazy_context import LazyKnownContext +from jedi.evaluate.compiled.getattr_static import getattr_static from . import fake @@ -22,6 +25,23 @@ _path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) del _sep +# Those types don't exist in typing. +MethodDescriptorType = type(str.replace) +WrapperDescriptorType = type(set.__iter__) +# `object.__subclasshook__` is an already executed descriptor. +object_class_dict = type.__dict__["__dict__"].__get__(object) +ClassMethodDescriptorType = type(object_class_dict['__subclasshook__']) + +ALLOWED_DESCRIPTOR_ACCESS = ( + types.FunctionType, + types.GetSetDescriptorType, + types.MemberDescriptorType, + MethodDescriptorType, + WrapperDescriptorType, + ClassMethodDescriptorType, + staticmethod, + classmethod, +) class CheckAttribute(object): """Raises an AttributeError if the attribute X isn't available.""" @@ -32,212 +52,225 @@ def __init__(self, func): def __get__(self, instance, owner): # This might raise an AttributeError. That's wanted. - getattr(instance.obj, self.check_name) + if self.check_name == '__iter__': + # Python iterators are a bit strange, because there's no need for + # the __iter__ function as long as __getitem__ is defined (it will + # just start with __getitem__(0). This is especially true for + # Python 2 strings, where `str.__iter__` is not even defined. + try: + iter(instance.obj) + except TypeError: + raise AttributeError + else: + getattr(instance.obj, self.check_name) return partial(self.func, instance) -class CompiledObject(Base): - # comply with the parser - start_pos = 0, 0 +class CompiledObject(Context): path = None # modules have this attribute - set it to None. - used_names = {} # To be consistent with modules. + used_names = lambda self: {} # To be consistent with modules. - def __init__(self, obj, parent=None): + def __init__(self, evaluator, obj, parent_context=None, faked_class=None): + super(CompiledObject, self).__init__(evaluator, parent_context) self.obj = obj - self.parent = parent + # This attribute will not be set for most classes, except for fakes. + self.tree_node = faked_class - @property - def py__call__(self): - def actual(evaluator, params): - if inspect.isclass(self.obj): - from jedi.evaluate.representation import Instance - return [Instance(evaluator, self, params)] - else: - return list(self._execute_function(evaluator, params)) + def get_root_node(self): + # To make things a bit easier with filters we add this method here. + return self.get_root_context() - # Might raise an AttributeError, which is intentional. - self.obj.__call__ - return actual + @CheckAttribute + def py__call__(self, params): + if inspect.isclass(self.obj): + from jedi.evaluate.context import CompiledInstance + return ContextSet(CompiledInstance(self.evaluator, self.parent_context, self, params)) + else: + return ContextSet.from_iterable(self._execute_function(params)) @CheckAttribute - def py__class__(self, evaluator): - return CompiledObject(self.obj.__class__, parent=self.parent) + def py__class__(self): + return create(self.evaluator, self.obj.__class__) @CheckAttribute - def py__mro__(self, evaluator): - return tuple(create(evaluator, cls, self.parent) for cls in self.obj.__mro__) + def py__mro__(self): + return (self,) + tuple(create(self.evaluator, cls) for cls in self.obj.__mro__[1:]) @CheckAttribute - def py__bases__(self, evaluator): - return tuple(create(evaluator, cls) for cls in self.obj.__bases__) + def py__bases__(self): + return tuple(create(self.evaluator, cls) for cls in self.obj.__bases__) def py__bool__(self): return bool(self.obj) def py__file__(self): - return self.obj.__file__ + try: + return self.obj.__file__ + except AttributeError: + return None def is_class(self): return inspect.isclass(self.obj) - @property - def doc(self): + def py__doc__(self, include_call_signature=False): return inspect.getdoc(self.obj) or '' - @property - def params(self): - params_str, ret = self._parse_function_doc() - tokens = params_str.split(',') - if inspect.ismethoddescriptor(self._cls().obj): - tokens.insert(0, 'self') - params = [] - for p in tokens: - parts = [FakeName(part) for part in p.strip().split('=')] - if len(parts) > 1: - parts.insert(1, Operator(zero_position_modifier, '=', (0, 0))) - params.append(Param(parts, self)) - return params + def get_param_names(self): + obj = self.obj + try: + if py_version < 33: + raise ValueError("inspect.signature was introduced in 3.3") + if py_version == 34: + # In 3.4 inspect.signature are wrong for str and int. This has + # been fixed in 3.5. The signature of object is returned, + # because no signature was found for str. Here we imitate 3.5 + # logic and just ignore the signature if the magic methods + # don't match object. + # 3.3 doesn't even have the logic and returns nothing for str + # and classes that inherit from object. + user_def = inspect._signature_get_user_defined_method + if (inspect.isclass(obj) + and not user_def(type(obj), '__init__') + and not user_def(type(obj), '__new__') + and (obj.__init__ != object.__init__ + or obj.__new__ != object.__new__)): + raise ValueError + + signature = inspect.signature(obj) + except ValueError: # Has no signature + params_str, ret = self._parse_function_doc() + tokens = params_str.split(',') + if inspect.ismethoddescriptor(obj): + tokens.insert(0, 'self') + for p in tokens: + parts = p.strip().split('=') + yield UnresolvableParamName(self, parts[0]) + else: + for signature_param in signature.parameters.values(): + yield SignatureParamName(self, signature_param) def __repr__(self): - return '<%s: %s>' % (type(self).__name__, repr(self.obj)) + return '<%s: %s>' % (self.__class__.__name__, repr(self.obj)) @underscore_memoization def _parse_function_doc(self): - if self.doc is None: + doc = self.py__doc__() + if doc is None: return '', '' - return _parse_function_doc(self.doc) + return _parse_function_doc(doc) + @property def api_type(self): - if fake.is_class_instance(self.obj): - return 'instance' - - cls = self._cls().obj - if inspect.isclass(cls): + obj = self.obj + if inspect.isclass(obj): return 'class' - elif inspect.ismodule(cls): + elif inspect.ismodule(obj): return 'module' - elif inspect.isbuiltin(cls) or inspect.ismethod(cls) \ - or inspect.ismethoddescriptor(cls): + elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \ + or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj): return 'function' + # Everything else... + return 'instance' @property def type(self): """Imitate the tree.Node.type values.""" - cls = self._cls().obj + cls = self._get_class() if inspect.isclass(cls): return 'classdef' elif inspect.ismodule(cls): return 'file_input' - elif inspect.isbuiltin(cls) or inspect.ismethod(cls) \ - or inspect.ismethoddescriptor(cls): + elif inspect.isbuiltin(cls) or inspect.ismethod(cls) or \ + inspect.ismethoddescriptor(cls): return 'funcdef' @underscore_memoization def _cls(self): + """ + We used to limit the lookups for instantiated objects like list(), but + this is not the case anymore. Python itself + """ # Ensures that a CompiledObject is returned that is not an instance (like list) - if fake.is_class_instance(self.obj): - try: - c = self.obj.__class__ - except AttributeError: - # happens with numpy.core.umath._UFUNC_API (you get it - # automatically by doing `import numpy`. - c = type(None) - return CompiledObject(c, self.parent) return self - @property - def names_dict(self): - # For compatibility with `representation.Class`. - return self.names_dicts(False)[0] + def _get_class(self): + if not fake.is_class_instance(self.obj) or \ + inspect.ismethoddescriptor(self.obj): # slots + return self.obj + + try: + return self.obj.__class__ + except AttributeError: + # happens with numpy.core.umath._UFUNC_API (you get it + # automatically by doing `import numpy`. + return type - def names_dicts(self, search_global, is_instance=False): - return self._names_dict_ensure_one_dict(is_instance) + def get_filters(self, search_global=False, is_instance=False, + until_position=None, origin_scope=None): + yield self._ensure_one_filter(is_instance) @memoize_method - def _names_dict_ensure_one_dict(self, is_instance): + def _ensure_one_filter(self, is_instance): """ search_global shouldn't change the fact that there's one dict, this way there's only one `object`. """ - return [LazyNamesDict(self._cls(), is_instance)] + return CompiledObjectFilter(self.evaluator, self, is_instance) - def get_subscope_by_name(self, name): - if name in dir(self._cls().obj): - return CompiledName(self._cls(), name).parent - else: - raise KeyError("CompiledObject doesn't have an attribute '%s'." % name) + @CheckAttribute + def py__getitem__(self, index): + if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): + # Get rid of side effects, we won't call custom `__getitem__`s. + return ContextSet() - def get_index_types(self, evaluator, index_array=()): - # If the object doesn't have `__getitem__`, just raise the - # AttributeError. - if not hasattr(self.obj, '__getitem__'): - debug.warning('Tried to call __getitem__ on non-iterable.') - return [] + return ContextSet(create(self.evaluator, self.obj[index])) + + @CheckAttribute + def py__iter__(self): if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): # Get rid of side effects, we won't call custom `__getitem__`s. - return [] + return - result = [] - from jedi.evaluate.iterable import create_indexes_or_slices - for typ in create_indexes_or_slices(evaluator, index_array): - index = None - try: - index = typ.obj - new = self.obj[index] - except (KeyError, IndexError, TypeError, AttributeError): - # Just try, we don't care if it fails, except for slices. - if isinstance(index, slice): - result.append(self) - else: - result.append(CompiledObject(new)) - if not result: - try: - for obj in self.obj: - result.append(CompiledObject(obj)) - except TypeError: - pass # self.obj maynot have an __iter__ method. - return result + for i, part in enumerate(self.obj): + if i > 20: + # Should not go crazy with large iterators + break + yield LazyKnownContext(create(self.evaluator, part)) + + def py__name__(self): + try: + return self._get_class().__name__ + except AttributeError: + return None @property def name(self): - # might not exist sometimes (raises AttributeError) - return FakeName(self._cls().obj.__name__, self) + try: + name = self._get_class().__name__ + except AttributeError: + name = repr(self.obj) + return CompiledContextName(self, name) - def _execute_function(self, evaluator, params): + def _execute_function(self, params): + from jedi.evaluate import docstrings if self.type != 'funcdef': return - for name in self._parse_function_doc()[1].split(): try: - bltn_obj = _create_from_name(builtin, builtin, name) + bltn_obj = getattr(_builtins, name) except AttributeError: continue else: - if isinstance(bltn_obj, CompiledObject) and bltn_obj.obj is None: - # We want everything except None. + if bltn_obj is None: + # We want to evaluate everything except None. + # TODO do we? continue - for result in evaluator.execute(bltn_obj, params): + bltn_obj = create(self.evaluator, bltn_obj) + for result in bltn_obj.execute(params): yield result - - @property - @underscore_memoization - def subscopes(self): - """ - Returns only the faked scopes - the other ones are not important for - internal analysis. - """ - module = self.get_parent_until() - faked_subscopes = [] - for name in dir(self._cls().obj): - f = fake.get_faked(module.obj, self.obj, name) - if f: - f.parent = self - faked_subscopes.append(f) - return faked_subscopes - - def is_scope(self): - return True + for type_ in docstrings.infer_return_types(self): + yield type_ def get_self_attributes(self): return [] # Instance compatibility @@ -245,79 +278,142 @@ def get_self_attributes(self): def get_imports(self): return [] # Builtins don't have imports + def dict_values(self): + return ContextSet.from_iterable( + create(self.evaluator, v) for v in self.obj.values() + ) -class LazyNamesDict(object): - """ - A names_dict instance for compiled objects, resembles the parser.tree. - """ - def __init__(self, compiled_obj, is_instance): - self._compiled_obj = compiled_obj - self._is_instance = is_instance - def __iter__(self): - return (v[0].value for v in self.values()) +class CompiledName(AbstractNameDefinition): + def __init__(self, evaluator, parent_context, name): + self._evaluator = evaluator + self.parent_context = parent_context + self.string_name = name - @memoize_method - def __getitem__(self, name): + def __repr__(self): try: - getattr(self._compiled_obj.obj, name) + name = self.parent_context.name # __name__ is not defined all the time except AttributeError: - raise KeyError('%s in %s not found.' % (name, self._compiled_obj)) - return [CompiledName(self._compiled_obj, name)] + name = None + return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name) - def values(self): - obj = self._compiled_obj.obj + @property + def api_type(self): + return next(iter(self.infer())).api_type - values = [] - for name in dir(obj): - try: - values.append(self[name]) - except KeyError: - # The dir function can be wrong. - pass + @underscore_memoization + def infer(self): + module = self.parent_context.get_root_context() + return ContextSet(_create_from_name( + self._evaluator, module, self.parent_context, self.string_name + )) - # dir doesn't include the type names. - if not inspect.ismodule(obj) and obj != type and not self._is_instance: - values += _type_names_dict.values() - return values +class SignatureParamName(AbstractNameDefinition): + api_type = 'param' -class CompiledName(FakeName): - def __init__(self, obj, name): - super(CompiledName, self).__init__(name) - self._obj = obj - self.name = name + def __init__(self, compiled_obj, signature_param): + self.parent_context = compiled_obj.parent_context + self._signature_param = signature_param - def __repr__(self): + @property + def string_name(self): + return self._signature_param.name + + def infer(self): + p = self._signature_param + evaluator = self.parent_context.evaluator + contexts = ContextSet() + if p.default is not p.empty: + contexts = ContextSet(create(evaluator, p.default)) + if p.annotation is not p.empty: + annotation = create(evaluator, p.annotation) + contexts |= annotation.execute_evaluated() + return contexts + + +class UnresolvableParamName(AbstractNameDefinition): + api_type = 'param' + + def __init__(self, compiled_obj, name): + self.parent_context = compiled_obj.parent_context + self.string_name = name + + def infer(self): + return ContextSet() + + +class CompiledContextName(ContextNameMixin, AbstractNameDefinition): + def __init__(self, context, name): + self.string_name = name + self._context = context + self.parent_context = context.parent_context + + +class EmptyCompiledName(AbstractNameDefinition): + """ + Accessing some names will raise an exception. To avoid not having any + completions, just give Jedi the option to return this object. It infers to + nothing. + """ + def __init__(self, evaluator, name): + self.parent_context = evaluator.BUILTINS + self.string_name = name + + def infer(self): + return ContextSet() + + +class CompiledObjectFilter(AbstractFilter): + name_class = CompiledName + + def __init__(self, evaluator, compiled_object, is_instance=False): + self._evaluator = evaluator + self._compiled_object = compiled_object + self._is_instance = is_instance + + @memoize_method + def get(self, name): + name = str(name) + obj = self._compiled_object.obj try: - name = self._obj.name # __name__ is not defined all the time + attr, is_get_descriptor = getattr_static(obj, name) except AttributeError: - name = None - return '<%s: (%s).%s>' % (type(self).__name__, name, self.name) + return [] + else: + if is_get_descriptor \ + and not type(attr) in ALLOWED_DESCRIPTOR_ACCESS: + # In case of descriptors that have get methods we cannot return + # it's value, because that would mean code execution. + return [EmptyCompiledName(self._evaluator, name)] + if self._is_instance and name not in dir(obj): + return [] + return [self._create_name(name)] - def is_definition(self): - return True + def values(self): + obj = self._compiled_object.obj - @property - @underscore_memoization - def parent(self): - module = self._obj.get_parent_until() - return _create_from_name(module, self._obj, self.name) + names = [] + for name in dir(obj): + names += self.get(name) - @parent.setter - def parent(self, value): - pass # Just ignore this, FakeName tries to overwrite the parent attribute. + is_instance = self._is_instance or fake.is_class_instance(obj) + # ``dir`` doesn't include the type names. + if not inspect.ismodule(obj) and (obj is not type) and not is_instance: + for filter in create(self._evaluator, type).get_filters(): + names += filter.values() + return names + def _create_name(self, name): + return self.name_class(self._evaluator, self._compiled_object, name) -def dotted_from_fs_path(fs_path, sys_path=None): + +def dotted_from_fs_path(fs_path, sys_path): """ Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e. compares the path with sys.path and then returns the dotted_path. If the path is not in the sys.path, just returns None. """ - if sys_path is None: - sys_path = get_sys_path() - if os.path.basename(fs_path).startswith('__init__.'): # We are calculating the path. __init__ files are not interesting. fs_path = os.path.dirname(fs_path) @@ -338,20 +434,22 @@ def dotted_from_fs_path(fs_path, sys_path=None): for s in sys_path: if (fs_path.startswith(s) and len(path) < len(s)): path = s - return _path_re.sub('', fs_path[len(path):].lstrip(os.path.sep)).replace(os.path.sep, '.') + # - Window + # X:\path\to\lib-dynload/datetime.pyd => datetime + module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/') + # - Window + # Replace like X:\path\to\something/foo/bar.py + return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.') -def load_module(path=None, name=None): + +def load_module(evaluator, path=None, name=None): + sys_path = list(evaluator.project.sys_path) if path is not None: - dotted_path = dotted_from_fs_path(path) + dotted_path = dotted_from_fs_path(path, sys_path=sys_path) else: dotted_path = name - sys_path = get_sys_path() - if dotted_path is None: - p, _, dotted_path = path.partition(os.path.sep) - sys_path.insert(0, p) - temp, sys.path = sys.path, sys_path try: __import__(dotted_path) @@ -364,7 +462,7 @@ def load_module(path=None, name=None): raise except ImportError: # If a module is "corrupt" or not really a Python module or whatever. - debug.warning('Module %s not importable.', path) + debug.warning('Module %s not importable in path %s.', dotted_path, path) return None finally: sys.path = temp @@ -373,7 +471,7 @@ def load_module(path=None, name=None): # complicated import structure of Python. module = sys.modules[dotted_path] - return CompiledObject(module) + return create(evaluator, module) docstr_defaults = { @@ -445,10 +543,30 @@ def change_options(m): return param_str, ret -class Builtin(CompiledObject): - @memoize_method - def get_by_name(self, name): - return self.names_dict[name][0].parent +def _create_from_name(evaluator, module, compiled_object, name): + obj = compiled_object.obj + faked = None + try: + faked = fake.get_faked(evaluator, module, obj, parent_context=compiled_object, name=name) + if faked.type == 'funcdef': + from jedi.evaluate.context.function import FunctionContext + return FunctionContext(evaluator, compiled_object, faked) + except fake.FakeDoesNotExist: + pass + + try: + obj = getattr(obj, name) + except AttributeError: + # Happens e.g. in properties of + # PyQt4.QtGui.QStyleOptionComboBox.currentText + # -> just set it to None + obj = None + return create(evaluator, obj, parent_context=compiled_object, faked=faked) + + +def builtin_from_name(evaluator, string): + bltn_obj = getattr(_builtins, string) + return create(evaluator, bltn_obj) def _a_generator(foo): @@ -457,75 +575,64 @@ def _a_generator(foo): yield foo -def _create_from_name(module, parent, name): - faked = fake.get_faked(module.obj, parent.obj, name) - # only functions are necessary. - if faked is not None: - faked.parent = parent - return faked +_SPECIAL_OBJECTS = { + 'FUNCTION_CLASS': type(load_module), + 'METHOD_CLASS': type(CompiledObject.is_class), + 'MODULE_CLASS': type(os), + 'GENERATOR_OBJECT': _a_generator(1.0), + 'BUILTINS': _builtins, +} - try: - obj = getattr(parent.obj, name) - except AttributeError: - # happens e.g. in properties of - # PyQt4.QtGui.QStyleOptionComboBox.currentText - # -> just set it to None - obj = None - return CompiledObject(obj, parent) - - -builtin = Builtin(_builtins) -magic_function_class = CompiledObject(type(load_module), parent=builtin) -generator_obj = CompiledObject(_a_generator(1.0)) -_type_names_dict = builtin.get_by_name('type').names_dict -none_obj = builtin.get_by_name('None') -false_obj = builtin.get_by_name('False') -true_obj = builtin.get_by_name('True') -object_obj = builtin.get_by_name('object') - - -def keyword_from_value(obj): - if obj is None: - return none_obj - elif obj is False: - return false_obj - elif obj is True: - return true_obj - else: - raise NotImplementedError +def get_special_object(evaluator, identifier): + obj = _SPECIAL_OBJECTS[identifier] + return create(evaluator, obj, parent_context=create(evaluator, _builtins)) -def compiled_objects_cache(func): - def wrapper(evaluator, obj, parent=builtin, module=None): - # Do a very cheap form of caching here. - key = id(obj), id(parent), id(module) - try: - return evaluator.compiled_cache[key][0] - except KeyError: - result = func(evaluator, obj, parent, module) - # Need to cache all of them, otherwise the id could be overwritten. - evaluator.compiled_cache[key] = result, obj, parent, module - return result - return wrapper +def compiled_objects_cache(attribute_name): + def decorator(func): + """ + This decorator caches just the ids, oopposed to caching the object itself. + Caching the id has the advantage that an object doesn't need to be + hashable. + """ + def wrapper(evaluator, obj, parent_context=None, module=None, faked=None): + cache = getattr(evaluator, attribute_name) + # Do a very cheap form of caching here. + key = id(obj), id(parent_context) + try: + return cache[key][0] + except KeyError: + # TODO this whole decorator is way too ugly + result = func(evaluator, obj, parent_context, module, faked) + # Need to cache all of them, otherwise the id could be overwritten. + cache[key] = result, obj, parent_context, module, faked + return result + return wrapper + + return decorator -@compiled_objects_cache -def create(evaluator, obj, parent=builtin, module=None): + +@compiled_objects_cache('compiled_cache') +def create(evaluator, obj, parent_context=None, module=None, faked=None): """ A very weird interface class to this module. The more options provided the more acurate loading compiled objects is. """ + if inspect.ismodule(obj): + if parent_context is not None: + # Modules don't have parents, be careful with caching: recurse. + return create(evaluator, obj) + else: + if parent_context is None and obj is not _builtins: + return create(evaluator, obj, create(evaluator, _builtins)) - if not inspect.ismodule(obj): - faked = fake.get_faked(module and module.obj, obj) - if faked is not None: - faked.parent = parent - return faked - - try: - if parent == builtin and obj.__module__ in ('builtins', '__builtin__'): - return builtin.get_by_name(obj.__name__) - except AttributeError: - pass - - return CompiledObject(obj, parent) + try: + faked = fake.get_faked(evaluator, module, obj, parent_context=parent_context) + if faked.type == 'funcdef': + from jedi.evaluate.context.function import FunctionContext + return FunctionContext(evaluator, parent_context, faked) + except fake.FakeDoesNotExist: + pass + + return CompiledObject(evaluator, obj, parent_context, faked) diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake.py b/pythonFiles/release/jedi/evaluate/compiled/fake.py index 0037cfc77a80..60dbefe4acac 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/fake.py +++ b/pythonFiles/release/jedi/evaluate/compiled/fake.py @@ -6,16 +6,48 @@ import os import inspect +import types +from itertools import chain -from jedi._compatibility import is_py3, builtins, unicode -from jedi.parser import Parser, load_grammar -from jedi.parser import tree as pt -from jedi.evaluate.helpers import FakeName +from parso.python import tree + +from jedi._compatibility import is_py3, builtins, unicode, is_py34 modules = {} -def _load_faked_module(module): +MethodDescriptorType = type(str.replace) +# These are not considered classes and access is granted even though they have +# a __class__ attribute. +NOT_CLASS_TYPES = ( + types.BuiltinFunctionType, + types.CodeType, + types.FrameType, + types.FunctionType, + types.GeneratorType, + types.GetSetDescriptorType, + types.LambdaType, + types.MemberDescriptorType, + types.MethodType, + types.ModuleType, + types.TracebackType, + MethodDescriptorType +) + +if is_py3: + NOT_CLASS_TYPES += ( + types.MappingProxyType, + types.SimpleNamespace + ) + if is_py34: + NOT_CLASS_TYPES += (types.DynamicClassAttribute,) + + +class FakeDoesNotExist(Exception): + pass + + +def _load_faked_module(grammar, module): module_name = module.__name__ if module_name == '__builtin__' and not is_py3: module_name = 'builtins' @@ -30,23 +62,21 @@ def _load_faked_module(module): except IOError: modules[module_name] = None return - grammar = load_grammar('grammar3.4') - module = Parser(grammar, unicode(source), module_name).module - modules[module_name] = module + modules[module_name] = m = grammar.parse(unicode(source)) if module_name == 'builtins' and not is_py3: # There are two implementations of `open` for either python 2/3. # -> Rename the python2 version (`look at fake/builtins.pym`). - open_func = search_scope(module, 'open') - open_func.children[1] = FakeName('open_python3') - open_func = search_scope(module, 'open_python2') - open_func.children[1] = FakeName('open') - return module + open_func = _search_scope(m, 'open') + open_func.children[1].value = 'open_python3' + open_func = _search_scope(m, 'open_python2') + open_func.children[1].value = 'open' + return m -def search_scope(scope, obj_name): - for s in scope.subscopes: - if str(s.name) == obj_name: +def _search_scope(scope, obj_name): + for s in chain(scope.iter_classdefs(), scope.iter_funcdefs()): + if s.name.value == obj_name: return s @@ -64,60 +94,120 @@ def get_module(obj): # Unfortunately in some cases like `int` there's no __module__ return builtins else: - return __import__(imp_plz) + if imp_plz is None: + # Happens for example in `(_ for _ in []).send.__module__`. + return builtins + else: + try: + return __import__(imp_plz) + except ImportError: + # __module__ can be something arbitrary that doesn't exist. + return builtins -def _faked(module, obj, name): +def _faked(grammar, module, obj, name): # Crazy underscore actions to try to escape all the internal madness. if module is None: module = get_module(obj) - faked_mod = _load_faked_module(module) + faked_mod = _load_faked_module(grammar, module) if faked_mod is None: - return + return None, None - # Having the module as a `parser.representation.module`, we need to scan + # Having the module as a `parser.python.tree.Module`, we need to scan # for methods. if name is None: - if inspect.isbuiltin(obj): - return search_scope(faked_mod, obj.__name__) + if inspect.isbuiltin(obj) or inspect.isclass(obj): + return _search_scope(faked_mod, obj.__name__), faked_mod elif not inspect.isclass(obj): # object is a method or descriptor - cls = search_scope(faked_mod, obj.__objclass__.__name__) - if cls is None: - return - return search_scope(cls, obj.__name__) + try: + objclass = obj.__objclass__ + except AttributeError: + return None, None + else: + cls = _search_scope(faked_mod, objclass.__name__) + if cls is None: + return None, None + return _search_scope(cls, obj.__name__), faked_mod else: - if obj == module: - return search_scope(faked_mod, name) + if obj is module: + return _search_scope(faked_mod, name), faked_mod else: - cls = search_scope(faked_mod, obj.__name__) + try: + cls_name = obj.__name__ + except AttributeError: + return None, None + cls = _search_scope(faked_mod, cls_name) if cls is None: - return - return search_scope(cls, name) + return None, None + return _search_scope(cls, name), faked_mod + return None, None + +def memoize_faked(obj): + """ + A typical memoize function that ignores issues with non hashable results. + """ + cache = obj.cache = {} + + def memoizer(*args, **kwargs): + key = (obj, args, frozenset(kwargs.items())) + try: + result = cache[key] + except (TypeError, ValueError): + return obj(*args, **kwargs) + except KeyError: + result = obj(*args, **kwargs) + if result is not None: + cache[key] = obj(*args, **kwargs) + return result + else: + return result + return memoizer -def get_faked(module, obj, name=None): - obj = obj.__class__ if is_class_instance(obj) else obj - result = _faked(module, obj, name) - if result is None or isinstance(result, pt.Class): + +@memoize_faked +def _get_faked(grammar, module, obj, name=None): + result, fake_module = _faked(grammar, module, obj, name) + if result is None: # We're not interested in classes. What we want is functions. - return None + raise FakeDoesNotExist + elif result.type == 'classdef': + return result, fake_module else: # Set the docstr which was previously not set (faked modules don't # contain it). + assert result.type == 'funcdef' doc = '"""%s"""' % obj.__doc__ # TODO need escapes. suite = result.children[-1] - string = pt.String(pt.zero_position_modifier, doc, (0, 0), '') - new_line = pt.Whitespace('\n', (0, 0), '') - docstr_node = pt.Node('simple_stmt', [string, new_line]) - suite.children.insert(2, docstr_node) - return result + string = tree.String(doc, (0, 0), '') + new_line = tree.Newline('\n', (0, 0)) + docstr_node = tree.PythonNode('simple_stmt', [string, new_line]) + suite.children.insert(1, docstr_node) + return result, fake_module + + +def get_faked(evaluator, module, obj, name=None, parent_context=None): + if parent_context and parent_context.tree_node is not None: + # Try to search in already clearly defined stuff. + found = _search_scope(parent_context.tree_node, name) + if found is not None: + return found + else: + raise FakeDoesNotExist + + faked, fake_module = _get_faked(evaluator.latest_grammar, module and module.obj, obj, name) + if module is not None: + module.get_used_names = fake_module.get_used_names + return faked def is_class_instance(obj): """Like inspect.* methods.""" - return not (inspect.isclass(obj) or inspect.ismodule(obj) - or inspect.isbuiltin(obj) or inspect.ismethod(obj) - or inspect.ismethoddescriptor(obj) or inspect.iscode(obj) - or inspect.isgenerator(obj)) + try: + cls = obj.__class__ + except AttributeError: + return False + else: + return cls != type and not issubclass(cls, NOT_CLASS_TYPES) diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym index 8d21a2c4a7c6..298d0b0dba88 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym +++ b/pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym @@ -1,8 +1,9 @@ def proxy(object, callback=None): return object -class weakref(): +class ref(): def __init__(self, object, callback=None): self.__object = object + def __call__(self): return self.__object diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym index 1283de00317b..1225929c2cb7 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym +++ b/pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym @@ -32,9 +32,16 @@ def range(start, stop=None, step=1): class file(): def __iter__(self): yield '' + def next(self): return '' + def readlines(self): + return [''] + + def __enter__(self): + return self + class xrange(): # Attention: this function doesn't exist in Py3k (there it is range). @@ -124,7 +131,7 @@ class list(): return self.__iterable[y] def pop(self): - return self.__iterable[-1] + return self.__iterable[int()] class tuple(): @@ -202,11 +209,29 @@ class dict(): except KeyError: return d + def values(self): + return self.__elements.values() + def setdefault(self, k, d): # TODO maybe also return the content return d +class enumerate(): + def __init__(self, sequence, start=0): + self.__sequence = sequence + + def __iter__(self): + for i in self.__sequence: + yield 1, i + + def __next__(self): + return next(self.__iter__()) + + def next(self): + return next(self.__iter__()) + + class reversed(): def __init__(self, sequence): self.__sequence = sequence @@ -238,6 +263,11 @@ class str(): def __init__(self, obj): pass + def strip(self): + return str() + + def split(self): + return [str()] class type(): def mro(): diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/io.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/io.pym index 87b02eed4321..c1f4fc011662 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/io.pym +++ b/pythonFiles/release/jedi/evaluate/compiled/fake/io.pym @@ -4,3 +4,9 @@ class TextIOWrapper(): def __iter__(self): yield str() + + def readlines(self): + return [''] + + def __enter__(self): + return self diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym new file mode 100644 index 000000000000..d40d468179aa --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym @@ -0,0 +1,33 @@ +# Just copied this code from Python 3.6. + +class itemgetter: + """ + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + """ + __slots__ = ('_items', '_call') + + def __init__(self, item, *items): + if not items: + self._items = (item,) + def func(obj): + return obj[item] + self._call = func + else: + self._items = items = (item,) + items + def func(obj): + return tuple(obj[i] for i in items) + self._call = func + + def __call__(self, obj): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(map(repr, self._items))) + + def __reduce__(self): + return self.__class__, self._items + diff --git a/pythonFiles/release/jedi/evaluate/compiled/getattr_static.py b/pythonFiles/release/jedi/evaluate/compiled/getattr_static.py new file mode 100644 index 000000000000..9f8cd8a838cf --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/compiled/getattr_static.py @@ -0,0 +1,175 @@ +""" +A static version of getattr. +This is a backport of the Python 3 code with a little bit of additional +information returned to enable Jedi to make decisions. +""" + +import types + +from jedi._compatibility import py_version + +_sentinel = object() + +def _check_instance(obj, attr): + instance_dict = {} + try: + instance_dict = object.__getattribute__(obj, "__dict__") + except AttributeError: + pass + return dict.get(instance_dict, attr, _sentinel) + + +def _check_class(klass, attr): + for entry in _static_getmro(klass): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr] + except KeyError: + pass + return _sentinel + +def _is_type(obj): + try: + _static_getmro(obj) + except TypeError: + return False + return True + + +def _shadowed_dict_newstyle(klass): + dict_attr = type.__dict__["__dict__"] + for entry in _static_getmro(klass): + try: + class_dict = dict_attr.__get__(entry)["__dict__"] + except KeyError: + pass + else: + if not (type(class_dict) is types.GetSetDescriptorType and + class_dict.__name__ == "__dict__" and + class_dict.__objclass__ is entry): + return class_dict + return _sentinel + + +def _static_getmro_newstyle(klass): + return type.__dict__['__mro__'].__get__(klass) + + +if py_version >= 30: + _shadowed_dict = _shadowed_dict_newstyle + _get_type = type + _static_getmro = _static_getmro_newstyle +else: + def _shadowed_dict(klass): + """ + In Python 2 __dict__ is not overwritable: + + class Foo(object): pass + setattr(Foo, '__dict__', 4) + + Traceback (most recent call last): + File "", line 1, in + TypeError: __dict__ must be a dictionary object + + It applies to both newstyle and oldstyle classes: + + class Foo(object): pass + setattr(Foo, '__dict__', 4) + Traceback (most recent call last): + File "", line 1, in + AttributeError: attribute '__dict__' of 'type' objects is not writable + + It also applies to instances of those objects. However to keep things + straight forward, newstyle classes always use the complicated way of + accessing it while oldstyle classes just use getattr. + """ + if type(klass) is _oldstyle_class_type: + return getattr(klass, '__dict__', _sentinel) + return _shadowed_dict_newstyle(klass) + + class _OldStyleClass(): + pass + + _oldstyle_instance_type = type(_OldStyleClass()) + _oldstyle_class_type = type(_OldStyleClass) + + def _get_type(obj): + type_ = object.__getattribute__(obj, '__class__') + if type_ is _oldstyle_instance_type: + # Somehow for old style classes we need to access it directly. + return obj.__class__ + return type_ + + def _static_getmro(klass): + if type(klass) is _oldstyle_class_type: + def oldstyle_mro(klass): + """ + Oldstyle mro is a really simplistic way of look up mro: + https://stackoverflow.com/questions/54867/what-is-the-difference-between-old-style-and-new-style-classes-in-python + """ + yield klass + for base in klass.__bases__: + for yield_from in oldstyle_mro(base): + yield yield_from + + return oldstyle_mro(klass) + + return _static_getmro_newstyle(klass) + + +def _safe_hasattr(obj, name): + return _check_class(_get_type(obj), name) is not _sentinel + + +def _safe_is_data_descriptor(obj): + return (_safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')) + + +def getattr_static(obj, attr, default=_sentinel): + """Retrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + + Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that + the attribute is a descriptor that has a `__get__` attribute. + """ + instance_result = _sentinel + if not _is_type(obj): + klass = _get_type(obj) + dict_attr = _shadowed_dict(klass) + if (dict_attr is _sentinel or + type(dict_attr) is types.MemberDescriptorType): + instance_result = _check_instance(obj, attr) + else: + klass = obj + + klass_result = _check_class(klass, attr) + + if instance_result is not _sentinel and klass_result is not _sentinel: + if _safe_hasattr(klass_result, '__get__') \ + and _safe_is_data_descriptor(klass_result): + # A get/set descriptor has priority over everything. + return klass_result, True + + if instance_result is not _sentinel: + return instance_result, False + if klass_result is not _sentinel: + return klass_result, _safe_hasattr(klass_result, '__get__') + + if obj is klass: + # for types we check the metaclass too + for entry in _static_getmro(type(klass)): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr], False + except KeyError: + pass + if default is not _sentinel: + return default, False + raise AttributeError(attr) diff --git a/pythonFiles/release/jedi/evaluate/compiled/mixed.py b/pythonFiles/release/jedi/evaluate/compiled/mixed.py new file mode 100644 index 000000000000..ac0f6dd614db --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/compiled/mixed.py @@ -0,0 +1,231 @@ +""" +Used only for REPL Completion. +""" + +import inspect +import os + +from jedi import settings +from jedi.evaluate import compiled +from jedi.cache import underscore_memoization +from jedi.evaluate import imports +from jedi.evaluate.base_context import Context, ContextSet +from jedi.evaluate.context import ModuleContext +from jedi.evaluate.cache import evaluator_function_cache +from jedi.evaluate.compiled.getattr_static import getattr_static + + +class MixedObject(object): + """ + A ``MixedObject`` is used in two ways: + + 1. It uses the default logic of ``parser.python.tree`` objects, + 2. except for getattr calls. The names dicts are generated in a fashion + like ``CompiledObject``. + + This combined logic makes it possible to provide more powerful REPL + completion. It allows side effects that are not noticable with the default + parser structure to still be completeable. + + The biggest difference from CompiledObject to MixedObject is that we are + generally dealing with Python code and not with C code. This will generate + fewer special cases, because we in Python you don't have the same freedoms + to modify the runtime. + """ + def __init__(self, evaluator, parent_context, compiled_object, tree_context): + self.evaluator = evaluator + self.parent_context = parent_context + self.compiled_object = compiled_object + self._context = tree_context + self.obj = compiled_object.obj + + # We have to overwrite everything that has to do with trailers, name + # lookups and filters to make it possible to route name lookups towards + # compiled objects and the rest towards tree node contexts. + def py__getattribute__(*args, **kwargs): + return Context.py__getattribute__(*args, **kwargs) + + def get_filters(self, *args, **kwargs): + yield MixedObjectFilter(self.evaluator, self) + + def __repr__(self): + return '<%s: %s>' % (type(self).__name__, repr(self.obj)) + + def __getattr__(self, name): + return getattr(self._context, name) + + +class MixedName(compiled.CompiledName): + """ + The ``CompiledName._compiled_object`` is our MixedObject. + """ + @property + def start_pos(self): + contexts = list(self.infer()) + if not contexts: + # This means a start_pos that doesn't exist (compiled objects). + return (0, 0) + return contexts[0].name.start_pos + + @start_pos.setter + def start_pos(self, value): + # Ignore the __init__'s start_pos setter call. + pass + + @underscore_memoization + def infer(self): + obj = self.parent_context.obj + try: + # TODO use logic from compiled.CompiledObjectFilter + obj = getattr(obj, self.string_name) + except AttributeError: + # Happens e.g. in properties of + # PyQt4.QtGui.QStyleOptionComboBox.currentText + # -> just set it to None + obj = None + return ContextSet( + _create(self._evaluator, obj, parent_context=self.parent_context) + ) + + @property + def api_type(self): + return next(iter(self.infer())).api_type + + +class MixedObjectFilter(compiled.CompiledObjectFilter): + name_class = MixedName + + def __init__(self, evaluator, mixed_object, is_instance=False): + super(MixedObjectFilter, self).__init__( + evaluator, mixed_object, is_instance) + self._mixed_object = mixed_object + + #def _create(self, name): + #return MixedName(self._evaluator, self._compiled_object, name) + + +@evaluator_function_cache() +def _load_module(evaluator, path, python_object): + module = evaluator.grammar.parse( + path=path, + cache=True, + diff_cache=True, + cache_path=settings.cache_directory + ).get_root_node() + python_module = inspect.getmodule(python_object) + + evaluator.modules[python_module.__name__] = module + return module + + +def _get_object_to_check(python_object): + """Check if inspect.getfile has a chance to find the source.""" + if (inspect.ismodule(python_object) or + inspect.isclass(python_object) or + inspect.ismethod(python_object) or + inspect.isfunction(python_object) or + inspect.istraceback(python_object) or + inspect.isframe(python_object) or + inspect.iscode(python_object)): + return python_object + + try: + return python_object.__class__ + except AttributeError: + raise TypeError # Prevents computation of `repr` within inspect. + + +def find_syntax_node_name(evaluator, python_object): + try: + python_object = _get_object_to_check(python_object) + path = inspect.getsourcefile(python_object) + except TypeError: + # The type might not be known (e.g. class_with_dict.__weakref__) + return None, None + if path is None or not os.path.exists(path): + # The path might not exist or be e.g. . + return None, None + + module = _load_module(evaluator, path, python_object) + + if inspect.ismodule(python_object): + # We don't need to check names for modules, because there's not really + # a way to write a module in a module in Python (and also __name__ can + # be something like ``email.utils``). + return module, path + + try: + name_str = python_object.__name__ + except AttributeError: + # Stuff like python_function.__code__. + return None, None + + if name_str == '': + return None, None # It's too hard to find lambdas. + + # Doesn't always work (e.g. os.stat_result) + try: + names = module.get_used_names()[name_str] + except KeyError: + return None, None + names = [n for n in names if n.is_definition()] + + try: + code = python_object.__code__ + # By using the line number of a code object we make the lookup in a + # file pretty easy. There's still a possibility of people defining + # stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people + # do so we just don't care. + line_nr = code.co_firstlineno + except AttributeError: + pass + else: + line_names = [name for name in names if name.start_pos[0] == line_nr] + # There's a chance that the object is not available anymore, because + # the code has changed in the background. + if line_names: + return line_names[-1].parent, path + + # It's really hard to actually get the right definition, here as a last + # resort we just return the last one. This chance might lead to odd + # completions at some points but will lead to mostly correct type + # inference, because people tend to define a public name in a module only + # once. + return names[-1].parent, path + + +@compiled.compiled_objects_cache('mixed_cache') +def _create(evaluator, obj, parent_context=None, *args): + tree_node, path = find_syntax_node_name(evaluator, obj) + + compiled_object = compiled.create( + evaluator, obj, parent_context=parent_context.compiled_object) + if tree_node is None: + return compiled_object + + module_node = tree_node.get_root_node() + if parent_context.tree_node.get_root_node() == module_node: + module_context = parent_context.get_root_context() + else: + module_context = ModuleContext(evaluator, module_node, path=path) + # TODO this __name__ is probably wrong. + name = compiled_object.get_root_context().py__name__() + imports.add_module(evaluator, name, module_context) + + tree_context = module_context.create_context( + tree_node, + node_is_context=True, + node_is_object=True + ) + if tree_node.type == 'classdef': + if not inspect.isclass(obj): + # Is an instance, not a class. + tree_context, = tree_context.execute_evaluated() + + return MixedObject( + evaluator, + parent_context, + compiled_object, + tree_context=tree_context + ) + diff --git a/pythonFiles/release/jedi/evaluate/context/__init__.py b/pythonFiles/release/jedi/evaluate/context/__init__.py new file mode 100644 index 000000000000..4e7ce4d6c831 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/__init__.py @@ -0,0 +1,5 @@ +from jedi.evaluate.context.module import ModuleContext +from jedi.evaluate.context.klass import ClassContext +from jedi.evaluate.context.function import FunctionContext, FunctionExecutionContext +from jedi.evaluate.context.instance import AnonymousInstance, BoundMethod, \ + CompiledInstance, AbstractInstanceContext, TreeInstance diff --git a/pythonFiles/release/jedi/evaluate/context/function.py b/pythonFiles/release/jedi/evaluate/context/function.py new file mode 100644 index 000000000000..0dba9c91d707 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/function.py @@ -0,0 +1,226 @@ +from parso.python import tree + +from jedi._compatibility import use_metaclass +from jedi import debug +from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass +from jedi.evaluate import compiled +from jedi.evaluate import recursion +from jedi.evaluate import docstrings +from jedi.evaluate import pep0484 +from jedi.evaluate import flow_analysis +from jedi.evaluate import helpers +from jedi.evaluate.arguments import AnonymousArguments +from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \ + ContextName, AbstractNameDefinition, ParamName +from jedi.evaluate.base_context import ContextualizedNode, NO_CONTEXTS, \ + ContextSet, TreeContext +from jedi.evaluate.lazy_context import LazyKnownContexts, LazyKnownContext, \ + LazyTreeContext +from jedi.evaluate.context import iterable +from jedi import parser_utils +from jedi.evaluate.parser_cache import get_yield_exprs + + +class LambdaName(AbstractNameDefinition): + string_name = '' + + def __init__(self, lambda_context): + self._lambda_context = lambda_context + self.parent_context = lambda_context.parent_context + + def start_pos(self): + return self._lambda_context.tree_node.start_pos + + def infer(self): + return ContextSet(self._lambda_context) + + +class FunctionContext(use_metaclass(CachedMetaClass, TreeContext)): + """ + Needed because of decorators. Decorators are evaluated here. + """ + api_type = 'function' + + def __init__(self, evaluator, parent_context, funcdef): + """ This should not be called directly """ + super(FunctionContext, self).__init__(evaluator, parent_context) + self.tree_node = funcdef + + def get_filters(self, search_global, until_position=None, origin_scope=None): + if search_global: + yield ParserTreeFilter( + self.evaluator, + context=self, + until_position=until_position, + origin_scope=origin_scope + ) + else: + scope = self.py__class__() + for filter in scope.get_filters(search_global=False, origin_scope=origin_scope): + yield filter + + def infer_function_execution(self, function_execution): + """ + Created to be used by inheritance. + """ + yield_exprs = get_yield_exprs(self.evaluator, self.tree_node) + if yield_exprs: + return ContextSet(iterable.Generator(self.evaluator, function_execution)) + else: + return function_execution.get_return_values() + + def get_function_execution(self, arguments=None): + if arguments is None: + arguments = AnonymousArguments() + + return FunctionExecutionContext(self.evaluator, self.parent_context, self, arguments) + + def py__call__(self, arguments): + function_execution = self.get_function_execution(arguments) + return self.infer_function_execution(function_execution) + + def py__class__(self): + # This differentiation is only necessary for Python2. Python3 does not + # use a different method class. + if isinstance(parser_utils.get_parent_scope(self.tree_node), tree.Class): + name = 'METHOD_CLASS' + else: + name = 'FUNCTION_CLASS' + return compiled.get_special_object(self.evaluator, name) + + @property + def name(self): + if self.tree_node.type == 'lambdef': + return LambdaName(self) + return ContextName(self, self.tree_node.name) + + def get_param_names(self): + function_execution = self.get_function_execution() + return [ParamName(function_execution, param.name) + for param in self.tree_node.get_params()] + + +class FunctionExecutionContext(TreeContext): + """ + This class is used to evaluate functions and their returns. + + This is the most complicated class, because it contains the logic to + transfer parameters. It is even more complicated, because there may be + multiple calls to functions and recursion has to be avoided. But this is + responsibility of the decorators. + """ + function_execution_filter = FunctionExecutionFilter + + def __init__(self, evaluator, parent_context, function_context, var_args): + super(FunctionExecutionContext, self).__init__(evaluator, parent_context) + self.function_context = function_context + self.tree_node = function_context.tree_node + self.var_args = var_args + + @evaluator_method_cache(default=NO_CONTEXTS) + @recursion.execution_recursion_decorator() + def get_return_values(self, check_yields=False): + funcdef = self.tree_node + if funcdef.type == 'lambdef': + return self.evaluator.eval_element(self, funcdef.children[-1]) + + if check_yields: + context_set = NO_CONTEXTS + returns = get_yield_exprs(self.evaluator, funcdef) + else: + returns = funcdef.iter_return_stmts() + context_set = docstrings.infer_return_types(self.function_context) + context_set |= pep0484.infer_return_types(self.function_context) + + for r in returns: + check = flow_analysis.reachability_check(self, funcdef, r) + if check is flow_analysis.UNREACHABLE: + debug.dbg('Return unreachable: %s', r) + else: + if check_yields: + context_set |= ContextSet.from_sets( + lazy_context.infer() + for lazy_context in self._eval_yield(r) + ) + else: + try: + children = r.children + except AttributeError: + context_set |= ContextSet(compiled.create(self.evaluator, None)) + else: + context_set |= self.eval_node(children[1]) + if check is flow_analysis.REACHABLE: + debug.dbg('Return reachable: %s', r) + break + return context_set + + def _eval_yield(self, yield_expr): + if yield_expr.type == 'keyword': + # `yield` just yields None. + yield LazyKnownContext(compiled.create(self.evaluator, None)) + return + + node = yield_expr.children[1] + if node.type == 'yield_arg': # It must be a yield from. + cn = ContextualizedNode(self, node.children[1]) + for lazy_context in cn.infer().iterate(cn): + yield lazy_context + else: + yield LazyTreeContext(self, node) + + @recursion.execution_recursion_decorator(default=iter([])) + def get_yield_values(self): + for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef', + 'while_stmt', 'if_stmt')) + for y in get_yield_exprs(self.evaluator, self.tree_node)] + + # Calculate if the yields are placed within the same for loop. + yields_order = [] + last_for_stmt = None + for yield_, for_stmt in for_parents: + # For really simple for loops we can predict the order. Otherwise + # we just ignore it. + parent = for_stmt.parent + if parent.type == 'suite': + parent = parent.parent + if for_stmt.type == 'for_stmt' and parent == self.tree_node \ + and parser_utils.for_stmt_defines_one_name(for_stmt): # Simplicity for now. + if for_stmt == last_for_stmt: + yields_order[-1][1].append(yield_) + else: + yields_order.append((for_stmt, [yield_])) + elif for_stmt == self.tree_node: + yields_order.append((None, [yield_])) + else: + types = self.get_return_values(check_yields=True) + if types: + yield LazyKnownContexts(types) + return + last_for_stmt = for_stmt + + for for_stmt, yields in yields_order: + if for_stmt is None: + # No for_stmt, just normal yields. + for yield_ in yields: + for result in self._eval_yield(yield_): + yield result + else: + input_node = for_stmt.get_testlist() + cn = ContextualizedNode(self, input_node) + ordered = cn.infer().iterate(cn) + ordered = list(ordered) + for lazy_context in ordered: + dct = {str(for_stmt.children[1].value): lazy_context.infer()} + with helpers.predefine_names(self, for_stmt, dct): + for yield_in_same_for_stmt in yields: + for result in self._eval_yield(yield_in_same_for_stmt): + yield result + + def get_filters(self, search_global, until_position=None, origin_scope=None): + yield self.function_execution_filter(self.evaluator, self, + until_position=until_position, + origin_scope=origin_scope) + + @evaluator_method_cache() + def get_params(self): + return self.var_args.get_params(self) diff --git a/pythonFiles/release/jedi/evaluate/context/instance.py b/pythonFiles/release/jedi/evaluate/context/instance.py new file mode 100644 index 000000000000..2c8d796c9c6d --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/instance.py @@ -0,0 +1,435 @@ +from abc import abstractproperty + +from jedi._compatibility import is_py3 +from jedi import debug +from jedi.evaluate import compiled +from jedi.evaluate import filters +from jedi.evaluate.base_context import Context, NO_CONTEXTS, ContextSet, \ + iterator_to_context_set +from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.arguments import AbstractArguments, AnonymousArguments +from jedi.cache import memoize_method +from jedi.evaluate.context.function import FunctionExecutionContext, FunctionContext +from jedi.evaluate.context.klass import ClassContext, apply_py__get__ +from jedi.evaluate.context import iterable +from jedi.parser_utils import get_parent_scope + + + +class InstanceFunctionExecution(FunctionExecutionContext): + def __init__(self, instance, parent_context, function_context, var_args): + self.instance = instance + var_args = InstanceVarArgs(self, var_args) + + super(InstanceFunctionExecution, self).__init__( + instance.evaluator, parent_context, function_context, var_args) + + +class AnonymousInstanceFunctionExecution(FunctionExecutionContext): + function_execution_filter = filters.AnonymousInstanceFunctionExecutionFilter + + def __init__(self, instance, parent_context, function_context, var_args): + self.instance = instance + super(AnonymousInstanceFunctionExecution, self).__init__( + instance.evaluator, parent_context, function_context, var_args) + + +class AbstractInstanceContext(Context): + """ + This class is used to evaluate instances. + """ + api_type = 'instance' + function_execution_cls = InstanceFunctionExecution + + def __init__(self, evaluator, parent_context, class_context, var_args): + super(AbstractInstanceContext, self).__init__(evaluator, parent_context) + # Generated instances are classes that are just generated by self + # (No var_args) used. + self.class_context = class_context + self.var_args = var_args + + def is_class(self): + return False + + @property + def py__call__(self): + names = self.get_function_slot_names('__call__') + if not names: + # Means the Instance is not callable. + raise AttributeError + + def execute(arguments): + return ContextSet.from_sets(name.execute(arguments) for name in names) + + return execute + + def py__class__(self): + return self.class_context + + def py__bool__(self): + # Signalize that we don't know about the bool type. + return None + + def get_function_slot_names(self, name): + # Python classes don't look at the dictionary of the instance when + # looking up `__call__`. This is something that has to do with Python's + # internal slot system (note: not __slots__, but C slots). + for filter in self.get_filters(include_self_names=False): + names = filter.get(name) + if names: + return names + return [] + + def execute_function_slots(self, names, *evaluated_args): + return ContextSet.from_sets( + name.execute_evaluated(*evaluated_args) + for name in names + ) + + def py__get__(self, obj): + # Arguments in __get__ descriptors are obj, class. + # `method` is the new parent of the array, don't know if that's good. + names = self.get_function_slot_names('__get__') + if names: + if isinstance(obj, AbstractInstanceContext): + return self.execute_function_slots(names, obj, obj.class_context) + else: + none_obj = compiled.create(self.evaluator, None) + return self.execute_function_slots(names, none_obj, obj) + else: + return ContextSet(self) + + def get_filters(self, search_global=None, until_position=None, + origin_scope=None, include_self_names=True): + if include_self_names: + for cls in self.class_context.py__mro__(): + if isinstance(cls, compiled.CompiledObject): + if cls.tree_node is not None: + # In this case we're talking about a fake object, it + # doesn't make sense for normal compiled objects to + # search for self variables. + yield SelfNameFilter(self.evaluator, self, cls, origin_scope) + else: + yield SelfNameFilter(self.evaluator, self, cls, origin_scope) + + for cls in self.class_context.py__mro__(): + if isinstance(cls, compiled.CompiledObject): + yield CompiledInstanceClassFilter(self.evaluator, self, cls) + else: + yield InstanceClassFilter(self.evaluator, self, cls, origin_scope) + + def py__getitem__(self, index): + try: + names = self.get_function_slot_names('__getitem__') + except KeyError: + debug.warning('No __getitem__, cannot access the array.') + return NO_CONTEXTS + else: + index_obj = compiled.create(self.evaluator, index) + return self.execute_function_slots(names, index_obj) + + def py__iter__(self): + iter_slot_names = self.get_function_slot_names('__iter__') + if not iter_slot_names: + debug.warning('No __iter__ on %s.' % self) + return + + for generator in self.execute_function_slots(iter_slot_names): + if isinstance(generator, AbstractInstanceContext): + # `__next__` logic. + name = '__next__' if is_py3 else 'next' + iter_slot_names = generator.get_function_slot_names(name) + if iter_slot_names: + yield LazyKnownContexts( + generator.execute_function_slots(iter_slot_names) + ) + else: + debug.warning('Instance has no __next__ function in %s.', generator) + else: + for lazy_context in generator.py__iter__(): + yield lazy_context + + @abstractproperty + def name(self): + pass + + def _create_init_execution(self, class_context, func_node): + bound_method = BoundMethod( + self.evaluator, self, class_context, self.parent_context, func_node + ) + return self.function_execution_cls( + self, + class_context.parent_context, + bound_method, + self.var_args + ) + + def create_init_executions(self): + for name in self.get_function_slot_names('__init__'): + if isinstance(name, LazyInstanceName): + yield self._create_init_execution(name.class_context, name.tree_name.parent) + + @evaluator_method_cache() + def create_instance_context(self, class_context, node): + if node.parent.type in ('funcdef', 'classdef'): + node = node.parent + scope = get_parent_scope(node) + if scope == class_context.tree_node: + return class_context + else: + parent_context = self.create_instance_context(class_context, scope) + if scope.type == 'funcdef': + if scope.name.value == '__init__' and parent_context == class_context: + return self._create_init_execution(class_context, scope) + else: + bound_method = BoundMethod( + self.evaluator, self, class_context, + parent_context, scope + ) + return bound_method.get_function_execution() + elif scope.type == 'classdef': + class_context = ClassContext(self.evaluator, scope, parent_context) + return class_context + elif scope.type == 'comp_for': + # Comprehensions currently don't have a special scope in Jedi. + return self.create_instance_context(class_context, scope) + else: + raise NotImplementedError + return class_context + + def __repr__(self): + return "<%s of %s(%s)>" % (self.__class__.__name__, self.class_context, + self.var_args) + + +class CompiledInstance(AbstractInstanceContext): + def __init__(self, *args, **kwargs): + super(CompiledInstance, self).__init__(*args, **kwargs) + # I don't think that dynamic append lookups should happen here. That + # sounds more like something that should go to py__iter__. + if self.class_context.name.string_name in ['list', 'set'] \ + and self.parent_context.get_root_context() == self.evaluator.BUILTINS: + # compare the module path with the builtin name. + self.var_args = iterable.get_dynamic_array_instance(self) + + @property + def name(self): + return compiled.CompiledContextName(self, self.class_context.name.string_name) + + def create_instance_context(self, class_context, node): + if get_parent_scope(node).type == 'classdef': + return class_context + else: + return super(CompiledInstance, self).create_instance_context(class_context, node) + + +class TreeInstance(AbstractInstanceContext): + def __init__(self, evaluator, parent_context, class_context, var_args): + super(TreeInstance, self).__init__(evaluator, parent_context, + class_context, var_args) + self.tree_node = class_context.tree_node + + @property + def name(self): + return filters.ContextName(self, self.class_context.name.tree_name) + + +class AnonymousInstance(TreeInstance): + function_execution_cls = AnonymousInstanceFunctionExecution + + def __init__(self, evaluator, parent_context, class_context): + super(AnonymousInstance, self).__init__( + evaluator, + parent_context, + class_context, + var_args=AnonymousArguments(), + ) + + +class CompiledInstanceName(compiled.CompiledName): + def __init__(self, evaluator, instance, parent_context, name): + super(CompiledInstanceName, self).__init__(evaluator, parent_context, name) + self._instance = instance + + @iterator_to_context_set + def infer(self): + for result_context in super(CompiledInstanceName, self).infer(): + if isinstance(result_context, FunctionContext): + parent_context = result_context.parent_context + while parent_context.is_class(): + parent_context = parent_context.parent_context + + yield BoundMethod( + result_context.evaluator, self._instance, self.parent_context, + parent_context, result_context.tree_node + ) + else: + if result_context.api_type == 'function': + yield CompiledBoundMethod(result_context) + else: + yield result_context + + +class CompiledInstanceClassFilter(compiled.CompiledObjectFilter): + name_class = CompiledInstanceName + + def __init__(self, evaluator, instance, compiled_object): + super(CompiledInstanceClassFilter, self).__init__( + evaluator, + compiled_object, + is_instance=True, + ) + self._instance = instance + + def _create_name(self, name): + return self.name_class( + self._evaluator, self._instance, self._compiled_object, name) + + +class BoundMethod(FunctionContext): + def __init__(self, evaluator, instance, class_context, *args, **kwargs): + super(BoundMethod, self).__init__(evaluator, *args, **kwargs) + self._instance = instance + self._class_context = class_context + + def get_function_execution(self, arguments=None): + if arguments is None: + arguments = AnonymousArguments() + return AnonymousInstanceFunctionExecution( + self._instance, self.parent_context, self, arguments) + else: + return InstanceFunctionExecution( + self._instance, self.parent_context, self, arguments) + + +class CompiledBoundMethod(compiled.CompiledObject): + def __init__(self, func): + super(CompiledBoundMethod, self).__init__( + func.evaluator, func.obj, func.parent_context, func.tree_node) + + def get_param_names(self): + return list(super(CompiledBoundMethod, self).get_param_names())[1:] + + +class InstanceNameDefinition(filters.TreeNameDefinition): + def infer(self): + return super(InstanceNameDefinition, self).infer() + + +class LazyInstanceName(filters.TreeNameDefinition): + """ + This name calculates the parent_context lazily. + """ + def __init__(self, instance, class_context, tree_name): + self._instance = instance + self.class_context = class_context + self.tree_name = tree_name + + @property + def parent_context(self): + return self._instance.create_instance_context(self.class_context, self.tree_name) + + +class LazyInstanceClassName(LazyInstanceName): + @iterator_to_context_set + def infer(self): + for result_context in super(LazyInstanceClassName, self).infer(): + if isinstance(result_context, FunctionContext): + # Classes are never used to resolve anything within the + # functions. Only other functions and modules will resolve + # those things. + parent_context = result_context.parent_context + while parent_context.is_class(): + parent_context = parent_context.parent_context + + yield BoundMethod( + result_context.evaluator, self._instance, self.class_context, + parent_context, result_context.tree_node + ) + else: + for c in apply_py__get__(result_context, self._instance): + yield c + + +class InstanceClassFilter(filters.ParserTreeFilter): + name_class = LazyInstanceClassName + + def __init__(self, evaluator, context, class_context, origin_scope): + super(InstanceClassFilter, self).__init__( + evaluator=evaluator, + context=context, + node_context=class_context, + origin_scope=origin_scope + ) + self._class_context = class_context + + def _equals_origin_scope(self): + node = self._origin_scope + while node is not None: + if node == self._parser_scope or node == self.context: + return True + node = get_parent_scope(node) + return False + + def _access_possible(self, name): + return not name.value.startswith('__') or name.value.endswith('__') \ + or self._equals_origin_scope() + + def _filter(self, names): + names = super(InstanceClassFilter, self)._filter(names) + return [name for name in names if self._access_possible(name)] + + def _convert_names(self, names): + return [self.name_class(self.context, self._class_context, name) for name in names] + + +class SelfNameFilter(InstanceClassFilter): + name_class = LazyInstanceName + + def _filter(self, names): + names = self._filter_self_names(names) + if isinstance(self._parser_scope, compiled.CompiledObject) and False: + # This would be for builtin skeletons, which are not yet supported. + return list(names) + else: + start, end = self._parser_scope.start_pos, self._parser_scope.end_pos + return [n for n in names if start < n.start_pos < end] + + def _filter_self_names(self, names): + for name in names: + trailer = name.parent + if trailer.type == 'trailer' \ + and len(trailer.children) == 2 \ + and trailer.children[0] == '.': + if name.is_definition() and self._access_possible(name): + yield name + + def _check_flows(self, names): + return names + + +class InstanceVarArgs(AbstractArguments): + def __init__(self, execution_context, var_args): + self._execution_context = execution_context + self._var_args = var_args + + @memoize_method + def _get_var_args(self): + return self._var_args + + @property + def argument_node(self): + return self._var_args.argument_node + + @property + def trailer(self): + return self._var_args.trailer + + def unpack(self, func=None): + yield None, LazyKnownContext(self._execution_context.instance) + for values in self._get_var_args().unpack(func): + yield values + + def get_calling_nodes(self): + return self._get_var_args().get_calling_nodes() diff --git a/pythonFiles/release/jedi/evaluate/context/iterable.py b/pythonFiles/release/jedi/evaluate/context/iterable.py new file mode 100644 index 000000000000..d0f468e43425 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/iterable.py @@ -0,0 +1,691 @@ +""" +Contains all classes and functions to deal with lists, dicts, generators and +iterators in general. + +Array modifications +******************* + +If the content of an array (``set``/``list``) is requested somewhere, the +current module will be checked for appearances of ``arr.append``, +``arr.insert``, etc. If the ``arr`` name points to an actual array, the +content will be added + +This can be really cpu intensive, as you can imagine. Because |jedi| has to +follow **every** ``append`` and check wheter it's the right array. However this +works pretty good, because in *slow* cases, the recursion detector and other +settings will stop this process. + +It is important to note that: + +1. Array modfications work only in the current module. +2. Jedi only checks Array additions; ``list.pop``, etc are ignored. +""" +from jedi import debug +from jedi import settings +from jedi.evaluate import compiled +from jedi.evaluate import analysis +from jedi.evaluate import recursion +from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \ + LazyTreeContext +from jedi.evaluate.helpers import is_string, predefine_names, evaluate_call_of_leaf +from jedi.evaluate.utils import safe_property +from jedi.evaluate.utils import to_list +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.filters import ParserTreeFilter, has_builtin_methods, \ + register_builtin_method, SpecialMethodFilter +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, Context, \ + TreeContext, ContextualizedNode +from jedi.parser_utils import get_comp_fors + + +class AbstractIterable(Context): + builtin_methods = {} + api_type = 'instance' + + def __init__(self, evaluator): + super(AbstractIterable, self).__init__(evaluator, evaluator.BUILTINS) + + def get_filters(self, search_global, until_position=None, origin_scope=None): + raise NotImplementedError + + @property + def name(self): + return compiled.CompiledContextName(self, self.array_type) + + +@has_builtin_methods +class GeneratorMixin(object): + array_type = None + + @register_builtin_method('send') + @register_builtin_method('next', python_version_match=2) + @register_builtin_method('__next__', python_version_match=3) + def py__next__(self): + # TODO add TypeError if params are given. + return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__()) + + def get_filters(self, search_global, until_position=None, origin_scope=None): + gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT') + yield SpecialMethodFilter(self, self.builtin_methods, gen_obj) + for filter in gen_obj.get_filters(search_global): + yield filter + + def py__bool__(self): + return True + + def py__class__(self): + gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT') + return gen_obj.py__class__() + + @property + def name(self): + return compiled.CompiledContextName(self, 'generator') + + +class Generator(GeneratorMixin, Context): + """Handling of `yield` functions.""" + def __init__(self, evaluator, func_execution_context): + super(Generator, self).__init__(evaluator, parent_context=evaluator.BUILTINS) + self._func_execution_context = func_execution_context + + def py__iter__(self): + return self._func_execution_context.get_yield_values() + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self._func_execution_context) + + +class CompForContext(TreeContext): + @classmethod + def from_comp_for(cls, parent_context, comp_for): + return cls(parent_context.evaluator, parent_context, comp_for) + + def __init__(self, evaluator, parent_context, comp_for): + super(CompForContext, self).__init__(evaluator, parent_context) + self.tree_node = comp_for + + def get_node(self): + return self.tree_node + + def get_filters(self, search_global, until_position=None, origin_scope=None): + yield ParserTreeFilter(self.evaluator, self) + + +class Comprehension(AbstractIterable): + @staticmethod + def from_atom(evaluator, context, atom): + bracket = atom.children[0] + if bracket == '{': + if atom.children[1].children[1] == ':': + cls = DictComprehension + else: + cls = SetComprehension + elif bracket == '(': + cls = GeneratorComprehension + elif bracket == '[': + cls = ListComprehension + return cls(evaluator, context, atom) + + def __init__(self, evaluator, defining_context, atom): + super(Comprehension, self).__init__(evaluator) + self._defining_context = defining_context + self._atom = atom + + def _get_comprehension(self): + # The atom contains a testlist_comp + return self._atom.children[1] + + def _get_comp_for(self): + # The atom contains a testlist_comp + return self._get_comprehension().children[1] + + def _eval_node(self, index=0): + """ + The first part `x + 1` of the list comprehension: + + [x + 1 for x in foo] + """ + return self._get_comprehension().children[index] + + @evaluator_method_cache() + def _get_comp_for_context(self, parent_context, comp_for): + # TODO shouldn't this be part of create_context? + return CompForContext.from_comp_for(parent_context, comp_for) + + def _nested(self, comp_fors, parent_context=None): + comp_for = comp_fors[0] + input_node = comp_for.children[3] + parent_context = parent_context or self._defining_context + input_types = parent_context.eval_node(input_node) + + cn = ContextualizedNode(parent_context, input_node) + iterated = input_types.iterate(cn) + exprlist = comp_for.children[1] + for i, lazy_context in enumerate(iterated): + types = lazy_context.infer() + dct = unpack_tuple_to_dict(parent_context, types, exprlist) + context_ = self._get_comp_for_context( + parent_context, + comp_for, + ) + with predefine_names(context_, comp_for, dct): + try: + for result in self._nested(comp_fors[1:], context_): + yield result + except IndexError: + iterated = context_.eval_node(self._eval_node()) + if self.array_type == 'dict': + yield iterated, context_.eval_node(self._eval_node(2)) + else: + yield iterated + + @evaluator_method_cache(default=[]) + @to_list + def _iterate(self): + comp_fors = tuple(get_comp_fors(self._get_comp_for())) + for result in self._nested(comp_fors): + yield result + + def py__iter__(self): + for set_ in self._iterate(): + yield LazyKnownContexts(set_) + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self._atom) + + +class ArrayMixin(object): + def get_filters(self, search_global, until_position=None, origin_scope=None): + # `array.type` is a string with the type, e.g. 'list'. + compiled_obj = compiled.builtin_from_name(self.evaluator, self.array_type) + yield SpecialMethodFilter(self, self.builtin_methods, compiled_obj) + for typ in compiled_obj.execute_evaluated(self): + for filter in typ.get_filters(): + yield filter + + def py__bool__(self): + return None # We don't know the length, because of appends. + + def py__class__(self): + return compiled.builtin_from_name(self.evaluator, self.array_type) + + @safe_property + def parent(self): + return self.evaluator.BUILTINS + + def dict_values(self): + return ContextSet.from_sets( + self._defining_context.eval_node(v) + for k, v in self._items() + ) + + +class ListComprehension(ArrayMixin, Comprehension): + array_type = 'list' + + def py__getitem__(self, index): + if isinstance(index, slice): + return ContextSet(self) + + all_types = list(self.py__iter__()) + return all_types[index].infer() + + +class SetComprehension(ArrayMixin, Comprehension): + array_type = 'set' + + +@has_builtin_methods +class DictComprehension(ArrayMixin, Comprehension): + array_type = 'dict' + + def _get_comp_for(self): + return self._get_comprehension().children[3] + + def py__iter__(self): + for keys, values in self._iterate(): + yield LazyKnownContexts(keys) + + def py__getitem__(self, index): + for keys, values in self._iterate(): + for k in keys: + if isinstance(k, compiled.CompiledObject): + if k.obj == index: + return values + return self.dict_values() + + def dict_values(self): + return ContextSet.from_sets(values for keys, values in self._iterate()) + + @register_builtin_method('values') + def _imitate_values(self): + lazy_context = LazyKnownContexts(self.dict_values()) + return ContextSet(FakeSequence(self.evaluator, 'list', [lazy_context])) + + @register_builtin_method('items') + def _imitate_items(self): + items = ContextSet.from_iterable( + FakeSequence( + self.evaluator, 'tuple' + (LazyKnownContexts(keys), LazyKnownContexts(values)) + ) for keys, values in self._iterate() + ) + + return create_evaluated_sequence_set(self.evaluator, items, sequence_type='list') + + +class GeneratorComprehension(GeneratorMixin, Comprehension): + pass + + +class SequenceLiteralContext(ArrayMixin, AbstractIterable): + mapping = {'(': 'tuple', + '[': 'list', + '{': 'set'} + + def __init__(self, evaluator, defining_context, atom): + super(SequenceLiteralContext, self).__init__(evaluator) + self.atom = atom + self._defining_context = defining_context + + if self.atom.type in ('testlist_star_expr', 'testlist'): + self.array_type = 'tuple' + else: + self.array_type = SequenceLiteralContext.mapping[atom.children[0]] + """The builtin name of the array (list, set, tuple or dict).""" + + def py__getitem__(self, index): + """Here the index is an int/str. Raises IndexError/KeyError.""" + if self.array_type == 'dict': + for key, value in self._items(): + for k in self._defining_context.eval_node(key): + if isinstance(k, compiled.CompiledObject) \ + and index == k.obj: + return self._defining_context.eval_node(value) + raise KeyError('No key found in dictionary %s.' % self) + + # Can raise an IndexError + if isinstance(index, slice): + return ContextSet(self) + else: + return self._defining_context.eval_node(self._items()[index]) + + def py__iter__(self): + """ + While values returns the possible values for any array field, this + function returns the value for a certain index. + """ + if self.array_type == 'dict': + # Get keys. + types = ContextSet() + for k, _ in self._items(): + types |= self._defining_context.eval_node(k) + # We don't know which dict index comes first, therefore always + # yield all the types. + for _ in types: + yield LazyKnownContexts(types) + else: + for node in self._items(): + yield LazyTreeContext(self._defining_context, node) + + for addition in check_array_additions(self._defining_context, self): + yield addition + + def _values(self): + """Returns a list of a list of node.""" + if self.array_type == 'dict': + return ContextSet.from_sets(v for k, v in self._items()) + else: + return self._items() + + def _items(self): + c = self.atom.children + + if self.atom.type in ('testlist_star_expr', 'testlist'): + return c[::2] + + array_node = c[1] + if array_node in (']', '}', ')'): + return [] # Direct closing bracket, doesn't contain items. + + if array_node.type == 'testlist_comp': + return array_node.children[::2] + elif array_node.type == 'dictorsetmaker': + kv = [] + iterator = iter(array_node.children) + for key in iterator: + op = next(iterator, None) + if op is None or op == ',': + kv.append(key) # A set. + else: + assert op == ':' # A dict. + kv.append((key, next(iterator))) + next(iterator, None) # Possible comma. + return kv + else: + return [array_node] + + def exact_key_items(self): + """ + Returns a generator of tuples like dict.items(), where the key is + resolved (as a string) and the values are still lazy contexts. + """ + for key_node, value in self._items(): + for key in self._defining_context.eval_node(key_node): + if is_string(key): + yield key.obj, LazyTreeContext(self._defining_context, value) + + def __repr__(self): + return "<%s of %s>" % (self.__class__.__name__, self.atom) + + +@has_builtin_methods +class DictLiteralContext(SequenceLiteralContext): + array_type = 'dict' + + def __init__(self, evaluator, defining_context, atom): + super(SequenceLiteralContext, self).__init__(evaluator) + self._defining_context = defining_context + self.atom = atom + + @register_builtin_method('values') + def _imitate_values(self): + lazy_context = LazyKnownContexts(self.dict_values()) + return ContextSet(FakeSequence(self.evaluator, 'list', [lazy_context])) + + @register_builtin_method('items') + def _imitate_items(self): + lazy_contexts = [ + LazyKnownContext(FakeSequence( + self.evaluator, 'tuple', + (LazyTreeContext(self._defining_context, key_node), + LazyTreeContext(self._defining_context, value_node)) + )) for key_node, value_node in self._items() + ] + + return ContextSet(FakeSequence(self.evaluator, 'list', lazy_contexts)) + + +class _FakeArray(SequenceLiteralContext): + def __init__(self, evaluator, container, type): + super(SequenceLiteralContext, self).__init__(evaluator) + self.array_type = type + self.atom = container + # TODO is this class really needed? + + +class FakeSequence(_FakeArray): + def __init__(self, evaluator, array_type, lazy_context_list): + """ + type should be one of "tuple", "list" + """ + super(FakeSequence, self).__init__(evaluator, None, array_type) + self._lazy_context_list = lazy_context_list + + def py__getitem__(self, index): + return self._lazy_context_list[index].infer() + + def py__iter__(self): + return self._lazy_context_list + + def py__bool__(self): + return bool(len(self._lazy_context_list)) + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self._lazy_context_list) + + +class FakeDict(_FakeArray): + def __init__(self, evaluator, dct): + super(FakeDict, self).__init__(evaluator, dct, 'dict') + self._dct = dct + + def py__iter__(self): + for key in self._dct: + yield LazyKnownContext(compiled.create(self.evaluator, key)) + + def py__getitem__(self, index): + return self._dct[index].infer() + + def dict_values(self): + return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values()) + + def exact_key_items(self): + return self._dct.items() + + +class MergedArray(_FakeArray): + def __init__(self, evaluator, arrays): + super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].array_type) + self._arrays = arrays + + def py__iter__(self): + for array in self._arrays: + for lazy_context in array.py__iter__(): + yield lazy_context + + def py__getitem__(self, index): + return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__()) + + def _items(self): + for array in self._arrays: + for a in array._items(): + yield a + + def __len__(self): + return sum(len(a) for a in self._arrays) + + +def unpack_tuple_to_dict(context, types, exprlist): + """ + Unpacking tuple assignments in for statements and expr_stmts. + """ + if exprlist.type == 'name': + return {exprlist.value: types} + elif exprlist.type == 'atom' and exprlist.children[0] in '([': + return unpack_tuple_to_dict(context, types, exprlist.children[1]) + elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist', + 'testlist_star_expr'): + dct = {} + parts = iter(exprlist.children[::2]) + n = 0 + for lazy_context in types.iterate(exprlist): + n += 1 + try: + part = next(parts) + except StopIteration: + # TODO this context is probably not right. + analysis.add(context, 'value-error-too-many-values', part, + message="ValueError: too many values to unpack (expected %s)" % n) + else: + dct.update(unpack_tuple_to_dict(context, lazy_context.infer(), part)) + has_parts = next(parts, None) + if types and has_parts is not None: + # TODO this context is probably not right. + analysis.add(context, 'value-error-too-few-values', has_parts, + message="ValueError: need more than %s values to unpack" % n) + return dct + elif exprlist.type == 'power' or exprlist.type == 'atom_expr': + # Something like ``arr[x], var = ...``. + # This is something that is not yet supported, would also be difficult + # to write into a dict. + return {} + elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings + # Currently we're not supporting them. + return {} + raise NotImplementedError + + +def check_array_additions(context, sequence): + """ Just a mapper function for the internal _check_array_additions """ + if sequence.array_type not in ('list', 'set'): + # TODO also check for dict updates + return NO_CONTEXTS + + return _check_array_additions(context, sequence) + + +@evaluator_method_cache(default=NO_CONTEXTS) +@debug.increase_indent +def _check_array_additions(context, sequence): + """ + Checks if a `Array` has "add" (append, insert, extend) statements: + + >>> a = [""] + >>> a.append(1) + """ + from jedi.evaluate import arguments + + debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA') + module_context = context.get_root_context() + if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject): + debug.dbg('Dynamic array search aborted.', color='MAGENTA') + return ContextSet() + + def find_additions(context, arglist, add_name): + params = list(arguments.TreeArguments(context.evaluator, context, arglist).unpack()) + result = set() + if add_name in ['insert']: + params = params[1:] + if add_name in ['append', 'add', 'insert']: + for key, whatever in params: + result.add(whatever) + elif add_name in ['extend', 'update']: + for key, lazy_context in params: + result |= set(lazy_context.infer().iterate()) + return result + + temp_param_add, settings.dynamic_params_for_other_modules = \ + settings.dynamic_params_for_other_modules, False + + is_list = sequence.name.string_name == 'list' + search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update']) + + added_types = set() + for add_name in search_names: + try: + possible_names = module_context.tree_node.get_used_names()[add_name] + except KeyError: + continue + else: + for name in possible_names: + context_node = context.tree_node + if not (context_node.start_pos < name.start_pos < context_node.end_pos): + continue + trailer = name.parent + power = trailer.parent + trailer_pos = power.children.index(trailer) + try: + execution_trailer = power.children[trailer_pos + 1] + except IndexError: + continue + else: + if execution_trailer.type != 'trailer' \ + or execution_trailer.children[0] != '(' \ + or execution_trailer.children[1] == ')': + continue + + random_context = context.create_context(name) + + with recursion.execution_allowed(context.evaluator, power) as allowed: + if allowed: + found = evaluate_call_of_leaf( + random_context, + name, + cut_own_trailer=True + ) + if sequence in found: + # The arrays match. Now add the results + added_types |= find_additions( + random_context, + execution_trailer.children[1], + add_name + ) + + # reset settings + settings.dynamic_params_for_other_modules = temp_param_add + debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA') + return added_types + + +def get_dynamic_array_instance(instance): + """Used for set() and list() instances.""" + if not settings.dynamic_array_additions: + return instance.var_args + + ai = _ArrayInstance(instance) + from jedi.evaluate import arguments + return arguments.ValuesArguments([ContextSet(ai)]) + + +class _ArrayInstance(object): + """ + Used for the usage of set() and list(). + This is definitely a hack, but a good one :-) + It makes it possible to use set/list conversions. + + In contrast to Array, ListComprehension and all other iterable types, this + is something that is only used inside `evaluate/compiled/fake/builtins.py` + and therefore doesn't need filters, `py__bool__` and so on, because + we don't use these operations in `builtins.py`. + """ + def __init__(self, instance): + self.instance = instance + self.var_args = instance.var_args + + def py__iter__(self): + var_args = self.var_args + try: + _, lazy_context = next(var_args.unpack()) + except StopIteration: + pass + else: + for lazy in lazy_context.infer().iterate(): + yield lazy + + from jedi.evaluate import arguments + if isinstance(var_args, arguments.TreeArguments): + additions = _check_array_additions(var_args.context, self.instance) + for addition in additions: + yield addition + + def iterate(self, contextualized_node=None): + return self.py__iter__() + + +class Slice(Context): + def __init__(self, context, start, stop, step): + super(Slice, self).__init__( + context.evaluator, + parent_context=context.evaluator.BUILTINS + ) + self._context = context + # all of them are either a Precedence or None. + self._start = start + self._stop = stop + self._step = step + + @property + def obj(self): + """ + Imitate CompiledObject.obj behavior and return a ``builtin.slice()`` + object. + """ + def get(element): + if element is None: + return None + + result = self._context.eval_node(element) + if len(result) != 1: + # For simplicity, we want slices to be clear defined with just + # one type. Otherwise we will return an empty slice object. + raise IndexError + try: + return list(result)[0].obj + except AttributeError: + return None + + try: + return slice(get(self._start), get(self._stop), get(self._step)) + except IndexError: + return slice(None, None, None) diff --git a/pythonFiles/release/jedi/evaluate/context/klass.py b/pythonFiles/release/jedi/evaluate/context/klass.py new file mode 100644 index 000000000000..b7d61d3e16bf --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/klass.py @@ -0,0 +1,197 @@ +""" +Like described in the :mod:`parso.python.tree` module, +there's a need for an ast like module to represent the states of parsed +modules. + +But now there are also structures in Python that need a little bit more than +that. An ``Instance`` for example is only a ``Class`` before it is +instantiated. This class represents these cases. + +So, why is there also a ``Class`` class here? Well, there are decorators and +they change classes in Python 3. + +Representation modules also define "magic methods". Those methods look like +``py__foo__`` and are typically mappable to the Python equivalents ``__call__`` +and others. Here's a list: + +====================================== ======================================== +**Method** **Description** +-------------------------------------- ---------------------------------------- +py__call__(params: Array) On callable objects, returns types. +py__bool__() Returns True/False/None; None means that + there's no certainty. +py__bases__() Returns a list of base classes. +py__mro__() Returns a list of classes (the mro). +py__iter__() Returns a generator of a set of types. +py__class__() Returns the class of an instance. +py__getitem__(index: int/str) Returns a a set of types of the index. + Can raise an IndexError/KeyError. +py__file__() Only on modules. Returns None if does + not exist. +py__package__() Only on modules. For the import system. +py__path__() Only on modules. For the import system. +py__get__(call_object) Only on instances. Simulates + descriptors. +py__doc__(include_call_signature: Returns the docstring for a context. + bool) +====================================== ======================================== + +""" +from jedi._compatibility import use_metaclass +from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass +from jedi.evaluate import compiled +from jedi.evaluate.lazy_context import LazyKnownContext +from jedi.evaluate.filters import ParserTreeFilter, TreeNameDefinition, \ + ContextName, AnonymousInstanceParamName +from jedi.evaluate.base_context import ContextSet, iterator_to_context_set, \ + TreeContext + + +def apply_py__get__(context, base_context): + try: + method = context.py__get__ + except AttributeError: + yield context + else: + for descriptor_context in method(base_context): + yield descriptor_context + + +class ClassName(TreeNameDefinition): + def __init__(self, parent_context, tree_name, name_context): + super(ClassName, self).__init__(parent_context, tree_name) + self._name_context = name_context + + @iterator_to_context_set + def infer(self): + # TODO this _name_to_types might get refactored and be a part of the + # parent class. Once it is, we can probably just overwrite method to + # achieve this. + from jedi.evaluate.syntax_tree import tree_name_to_contexts + inferred = tree_name_to_contexts( + self.parent_context.evaluator, self._name_context, self.tree_name) + + for result_context in inferred: + for c in apply_py__get__(result_context, self.parent_context): + yield c + + +class ClassFilter(ParserTreeFilter): + name_class = ClassName + + def _convert_names(self, names): + return [self.name_class(self.context, name, self._node_context) + for name in names] + + +class ClassContext(use_metaclass(CachedMetaClass, TreeContext)): + """ + This class is not only important to extend `tree.Class`, it is also a + important for descriptors (if the descriptor methods are evaluated or not). + """ + api_type = 'class' + + def __init__(self, evaluator, parent_context, classdef): + super(ClassContext, self).__init__(evaluator, parent_context=parent_context) + self.tree_node = classdef + + @evaluator_method_cache(default=()) + def py__mro__(self): + def add(cls): + if cls not in mro: + mro.append(cls) + + mro = [self] + # TODO Do a proper mro resolution. Currently we are just listing + # classes. However, it's a complicated algorithm. + for lazy_cls in self.py__bases__(): + # TODO there's multiple different mro paths possible if this yields + # multiple possibilities. Could be changed to be more correct. + for cls in lazy_cls.infer(): + # TODO detect for TypeError: duplicate base class str, + # e.g. `class X(str, str): pass` + try: + mro_method = cls.py__mro__ + except AttributeError: + # TODO add a TypeError like: + """ + >>> class Y(lambda: test): pass + Traceback (most recent call last): + File "", line 1, in + TypeError: function() argument 1 must be code, not str + >>> class Y(1): pass + Traceback (most recent call last): + File "", line 1, in + TypeError: int() takes at most 2 arguments (3 given) + """ + pass + else: + add(cls) + for cls_new in mro_method(): + add(cls_new) + return tuple(mro) + + @evaluator_method_cache(default=()) + def py__bases__(self): + arglist = self.tree_node.get_super_arglist() + if arglist: + from jedi.evaluate import arguments + args = arguments.TreeArguments(self.evaluator, self, arglist) + return [value for key, value in args.unpack() if key is None] + else: + return [LazyKnownContext(compiled.create(self.evaluator, object))] + + def py__call__(self, params): + from jedi.evaluate.context import TreeInstance + return ContextSet(TreeInstance(self.evaluator, self.parent_context, self, params)) + + def py__class__(self): + return compiled.create(self.evaluator, type) + + def get_params(self): + from jedi.evaluate.context import AnonymousInstance + anon = AnonymousInstance(self.evaluator, self.parent_context, self) + return [AnonymousInstanceParamName(anon, param.name) for param in self.funcdef.get_params()] + + def get_filters(self, search_global, until_position=None, origin_scope=None, is_instance=False): + if search_global: + yield ParserTreeFilter( + self.evaluator, + context=self, + until_position=until_position, + origin_scope=origin_scope + ) + else: + for cls in self.py__mro__(): + if isinstance(cls, compiled.CompiledObject): + for filter in cls.get_filters(is_instance=is_instance): + yield filter + else: + yield ClassFilter( + self.evaluator, self, node_context=cls, + origin_scope=origin_scope) + + def is_class(self): + return True + + def get_function_slot_names(self, name): + for filter in self.get_filters(search_global=False): + names = filter.get(name) + if names: + return names + return [] + + def get_param_names(self): + for name in self.get_function_slot_names('__init__'): + for context_ in name.infer(): + try: + method = context_.get_param_names + except AttributeError: + pass + else: + return list(method())[1:] + return [] + + @property + def name(self): + return ContextName(self, self.tree_node.name) diff --git a/pythonFiles/release/jedi/evaluate/context/module.py b/pythonFiles/release/jedi/evaluate/context/module.py new file mode 100644 index 000000000000..5ba92cdb1c3e --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/module.py @@ -0,0 +1,213 @@ +import pkgutil +import imp +import re +import os + +from parso import python_bytes_to_unicode + +from jedi._compatibility import use_metaclass +from jedi.evaluate.cache import CachedMetaClass, evaluator_method_cache +from jedi.evaluate.filters import GlobalNameFilter, ContextNameMixin, \ + AbstractNameDefinition, ParserTreeFilter, DictFilter +from jedi.evaluate import compiled +from jedi.evaluate.base_context import TreeContext +from jedi.evaluate.imports import SubModuleName, infer_import + + +class _ModuleAttributeName(AbstractNameDefinition): + """ + For module attributes like __file__, __str__ and so on. + """ + api_type = 'instance' + + def __init__(self, parent_module, string_name): + self.parent_context = parent_module + self.string_name = string_name + + def infer(self): + return compiled.create(self.parent_context.evaluator, str).execute_evaluated() + + +class ModuleName(ContextNameMixin, AbstractNameDefinition): + start_pos = 1, 0 + + def __init__(self, context, name): + self._context = context + self._name = name + + @property + def string_name(self): + return self._name + + +class ModuleContext(use_metaclass(CachedMetaClass, TreeContext)): + api_type = 'module' + parent_context = None + + def __init__(self, evaluator, module_node, path): + super(ModuleContext, self).__init__(evaluator, parent_context=None) + self.tree_node = module_node + self._path = path + + def get_filters(self, search_global, until_position=None, origin_scope=None): + yield ParserTreeFilter( + self.evaluator, + context=self, + until_position=until_position, + origin_scope=origin_scope + ) + yield GlobalNameFilter(self, self.tree_node) + yield DictFilter(self._sub_modules_dict()) + yield DictFilter(self._module_attributes_dict()) + for star_module in self.star_imports(): + yield next(star_module.get_filters(search_global)) + + # I'm not sure if the star import cache is really that effective anymore + # with all the other really fast import caches. Recheck. Also we would need + # to push the star imports into Evaluator.modules, if we reenable this. + @evaluator_method_cache([]) + def star_imports(self): + modules = [] + for i in self.tree_node.iter_imports(): + if i.is_star_import(): + name = i.get_paths()[-1][-1] + new = infer_import(self, name) + for module in new: + if isinstance(module, ModuleContext): + modules += module.star_imports() + modules += new + return modules + + @evaluator_method_cache() + def _module_attributes_dict(self): + names = ['__file__', '__package__', '__doc__', '__name__'] + # All the additional module attributes are strings. + return dict((n, _ModuleAttributeName(self, n)) for n in names) + + @property + def _string_name(self): + """ This is used for the goto functions. """ + if self._path is None: + return '' # no path -> empty name + else: + sep = (re.escape(os.path.sep),) * 2 + r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self._path) + # Remove PEP 3149 names + return re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) + + @property + @evaluator_method_cache() + def name(self): + return ModuleName(self, self._string_name) + + def _get_init_directory(self): + """ + :return: The path to the directory of a package. None in case it's not + a package. + """ + for suffix, _, _ in imp.get_suffixes(): + ending = '__init__' + suffix + py__file__ = self.py__file__() + if py__file__ is not None and py__file__.endswith(ending): + # Remove the ending, including the separator. + return self.py__file__()[:-len(ending) - 1] + return None + + def py__name__(self): + for name, module in self.evaluator.modules.items(): + if module == self and name != '': + return name + + return '__main__' + + def py__file__(self): + """ + In contrast to Python's __file__ can be None. + """ + if self._path is None: + return None + + return os.path.abspath(self._path) + + def py__package__(self): + if self._get_init_directory() is None: + return re.sub(r'\.?[^\.]+$', '', self.py__name__()) + else: + return self.py__name__() + + def _py__path__(self): + search_path = self.evaluator.project.sys_path + init_path = self.py__file__() + if os.path.basename(init_path) == '__init__.py': + with open(init_path, 'rb') as f: + content = python_bytes_to_unicode(f.read(), errors='replace') + # these are strings that need to be used for namespace packages, + # the first one is ``pkgutil``, the second ``pkg_resources``. + options = ('declare_namespace(__name__)', 'extend_path(__path__') + if options[0] in content or options[1] in content: + # It is a namespace, now try to find the rest of the + # modules on sys_path or whatever the search_path is. + paths = set() + for s in search_path: + other = os.path.join(s, self.name.string_name) + if os.path.isdir(other): + paths.add(other) + if paths: + return list(paths) + # TODO I'm not sure if this is how nested namespace + # packages work. The tests are not really good enough to + # show that. + # Default to this. + return [self._get_init_directory()] + + @property + def py__path__(self): + """ + Not seen here, since it's a property. The callback actually uses a + variable, so use it like:: + + foo.py__path__(sys_path) + + In case of a package, this returns Python's __path__ attribute, which + is a list of paths (strings). + Raises an AttributeError if the module is not a package. + """ + path = self._get_init_directory() + + if path is None: + raise AttributeError('Only packages have __path__ attributes.') + else: + return self._py__path__ + + @evaluator_method_cache() + def _sub_modules_dict(self): + """ + Lists modules in the directory of this module (if this module is a + package). + """ + path = self._path + names = {} + if path is not None and path.endswith(os.path.sep + '__init__.py'): + mods = pkgutil.iter_modules([os.path.dirname(path)]) + for module_loader, name, is_pkg in mods: + # It's obviously a relative import to the current module. + names[name] = SubModuleName(self, name) + + # TODO add something like this in the future, its cleaner than the + # import hacks. + # ``os.path`` is a hardcoded exception, because it's a + # ``sys.modules`` modification. + # if str(self.name) == 'os': + # names.append(Name('path', parent_context=self)) + + return names + + def py__class__(self): + return compiled.get_special_object(self.evaluator, 'MODULE_CLASS') + + def __repr__(self): + return "<%s: %s@%s-%s>" % ( + self.__class__.__name__, self._string_name, + self.tree_node.start_pos[0], self.tree_node.end_pos[0]) + + diff --git a/pythonFiles/release/jedi/evaluate/context/namespace.py b/pythonFiles/release/jedi/evaluate/context/namespace.py new file mode 100644 index 000000000000..e40c23a5e8ca --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/namespace.py @@ -0,0 +1,74 @@ +import os +from itertools import chain + +from jedi._compatibility import use_metaclass +from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass +from jedi.evaluate import imports +from jedi.evaluate.filters import DictFilter, AbstractNameDefinition +from jedi.evaluate.base_context import NO_CONTEXTS, TreeContext + + +class ImplicitNSName(AbstractNameDefinition): + """ + Accessing names for implicit namespace packages should infer to nothing. + This object will prevent Jedi from raising exceptions + """ + def __init__(self, implicit_ns_context, string_name): + self.implicit_ns_context = implicit_ns_context + self.string_name = string_name + + def infer(self): + return NO_CONTEXTS + + def get_root_context(self): + return self.implicit_ns_context + + +class ImplicitNamespaceContext(use_metaclass(CachedMetaClass, TreeContext)): + """ + Provides support for implicit namespace packages + """ + api_type = 'module' + parent_context = None + + def __init__(self, evaluator, fullname): + super(ImplicitNamespaceContext, self).__init__(evaluator, parent_context=None) + self.evaluator = evaluator + self.fullname = fullname + + def get_filters(self, search_global, until_position=None, origin_scope=None): + yield DictFilter(self._sub_modules_dict()) + + @property + @evaluator_method_cache() + def name(self): + string_name = self.py__package__().rpartition('.')[-1] + return ImplicitNSName(self, string_name) + + def py__file__(self): + return None + + def py__package__(self): + """Return the fullname + """ + return self.fullname + + @property + def py__path__(self): + return lambda: [self.paths] + + @evaluator_method_cache() + def _sub_modules_dict(self): + names = {} + + paths = self.paths + file_names = chain.from_iterable(os.listdir(path) for path in paths) + mods = [ + file_name.rpartition('.')[0] if '.' in file_name else file_name + for file_name in file_names + if file_name != '__pycache__' + ] + + for name in mods: + names[name] = imports.SubModuleName(self, name) + return names diff --git a/pythonFiles/release/jedi/evaluate/docstrings.py b/pythonFiles/release/jedi/evaluate/docstrings.py index 84137de59742..f9c1141226e9 100755 --- a/pythonFiles/release/jedi/evaluate/docstrings.py +++ b/pythonFiles/release/jedi/evaluate/docstrings.py @@ -1,11 +1,12 @@ """ Docstrings are another source of information for functions and classes. :mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while -the docstring parsing is much easier. There are two different types of +the docstring parsing is much easier. There are three different types of docstrings that |jedi| understands: - `Sphinx `_ - `Epydoc `_ +- `Numpydoc `_ For example, the sphinx annotation ``:type foo: str`` clearly states that the type of ``foo`` is ``str``. @@ -14,20 +15,22 @@ annotations. """ -from ast import literal_eval import re -from itertools import chain from textwrap import dedent -from jedi.evaluate.cache import memoize_default -from jedi.parser import Parser, load_grammar -from jedi.common import indent_block -from jedi.evaluate.iterable import Array, FakeSequence, AlreadyEvaluated +from parso import parse + +from jedi._compatibility import u +from jedi.evaluate.utils import indent_block +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.base_context import iterator_to_context_set, ContextSet, \ + NO_CONTEXTS +from jedi.evaluate.lazy_context import LazyKnownContexts DOCSTRING_PARAM_PATTERNS = [ r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx - r'\s*:param\s+(\w+)\s+%s:[^\n]+', # Sphinx param with type + r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc ] @@ -44,23 +47,78 @@ except ImportError: def _search_param_in_numpydocstr(docstr, param_str): return [] + + def _search_return_in_numpydocstr(docstr): + return [] else: def _search_param_in_numpydocstr(docstr, param_str): """Search `docstr` (in numpydoc format) for type(-s) of `param_str`.""" - params = NumpyDocString(docstr)._parsed_data['Parameters'] + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + params = NumpyDocString(docstr)._parsed_data['Parameters'] + except (KeyError, AttributeError): + return [] for p_name, p_type, p_descr in params: if p_name == param_str: m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) if m: p_type = m.group(1) - - if p_type.startswith('{'): - types = set(type(x).__name__ for x in literal_eval(p_type)) - return list(types) - else: - return [p_type] + return list(_expand_typestr(p_type)) return [] + def _search_return_in_numpydocstr(docstr): + """ + Search `docstr` (in numpydoc format) for type(-s) of function returns. + """ + doc = NumpyDocString(docstr) + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + returns = doc._parsed_data['Returns'] + returns += doc._parsed_data['Yields'] + except (KeyError, AttributeError): + raise StopIteration + for r_name, r_type, r_descr in returns: + #Return names are optional and if so the type is in the name + if not r_type: + r_type = r_name + for type_ in _expand_typestr(r_type): + yield type_ + + +def _expand_typestr(type_str): + """ + Attempts to interpret the possible types in `type_str` + """ + # Check if alternative types are specified with 'or' + if re.search('\\bor\\b', type_str): + for t in type_str.split('or'): + yield t.split('of')[0].strip() + # Check if like "list of `type`" and set type to list + elif re.search('\\bof\\b', type_str): + yield type_str.split('of')[0] + # Check if type has is a set of valid literal values eg: {'C', 'F', 'A'} + elif type_str.startswith('{'): + node = parse(type_str, version='3.6').children[0] + if node.type == 'atom': + for leaf in node.children[1].children: + if leaf.type == 'number': + if '.' in leaf.value: + yield 'float' + else: + yield 'int' + elif leaf.type == 'string': + if 'b' in leaf.string_prefix.lower(): + yield 'bytes' + else: + yield 'str' + # Ignore everything else. + + # Otherwise just work with what we have. + else: + yield type_str + def _search_param_in_docstr(docstr, param_str): """ @@ -113,12 +171,16 @@ def _strip_rst_role(type_str): return type_str -def _evaluate_for_statement_string(evaluator, string, module): - code = dedent(""" +def _evaluate_for_statement_string(module_context, string): + code = dedent(u(""" def pseudo_docstring_stuff(): - # Create a pseudo function for docstring statements. - %s - """) + ''' + Create a pseudo function for docstring statements. + Need this docstring so that if the below part is not valid Python this + is still a function. + ''' + {0} + """)) if string is None: return [] @@ -130,31 +192,41 @@ def pseudo_docstring_stuff(): # Take the default grammar here, if we load the Python 2.7 grammar here, it # will be impossible to use `...` (Ellipsis) as a token. Docstring types # don't need to conform with the current grammar. - p = Parser(load_grammar(), code % indent_block(string)) + grammar = module_context.evaluator.latest_grammar + module = grammar.parse(code.format(indent_block(string))) try: - pseudo_cls = p.module.subscopes[0] - # First pick suite, then simple_stmt (-2 for DEDENT) and then the node, + funcdef = next(module.iter_funcdefs()) + # First pick suite, then simple_stmt and then the node, # which is also not the last item, because there's a newline. - stmt = pseudo_cls.children[-1].children[-2].children[-2] + stmt = funcdef.children[-1].children[-1].children[-2] except (AttributeError, IndexError): return [] + from jedi.evaluate.context import FunctionContext + function_context = FunctionContext( + module_context.evaluator, + module_context, + funcdef + ) + func_execution_context = function_context.get_function_execution() # Use the module of the param. # TODO this module is not the module of the param in case of a function # call. In that case it's the module of the function call. # stuffed with content from a function call. - pseudo_cls.parent = module - return list(_execute_types_in_stmt(evaluator, stmt)) + return list(_execute_types_in_stmt(func_execution_context, stmt)) -def _execute_types_in_stmt(evaluator, stmt): +def _execute_types_in_stmt(module_context, stmt): """ Executing all types or general elements that we find in a statement. This doesn't include tuple, list and dict literals, because the stuff they contain is executed. (Used as type information). """ - definitions = evaluator.eval_element(stmt) - return chain.from_iterable(_execute_array_values(evaluator, d) for d in definitions) + definitions = module_context.eval_node(stmt) + return ContextSet.from_sets( + _execute_array_values(module_context.evaluator, d) + for d in definitions + ) def _execute_array_values(evaluator, array): @@ -162,34 +234,56 @@ def _execute_array_values(evaluator, array): Tuples indicate that there's not just one return value, but the listed ones. `(str, int)` means that it returns a tuple with both types. """ - if isinstance(array, Array): + from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence + if isinstance(array, SequenceLiteralContext): values = [] - for typ in array.values(): - objects = _execute_array_values(evaluator, typ) - values.append(AlreadyEvaluated(objects)) - return [FakeSequence(evaluator, values, array.type)] + for lazy_context in array.py__iter__(): + objects = ContextSet.from_sets( + _execute_array_values(evaluator, typ) + for typ in lazy_context.infer() + ) + values.append(LazyKnownContexts(objects)) + return set([FakeSequence(evaluator, array.array_type, values)]) else: - return evaluator.execute(array) + return array.execute_evaluated() + +@evaluator_method_cache() +def infer_param(execution_context, param): + from jedi.evaluate.context.instance import AnonymousInstanceFunctionExecution -@memoize_default(None, evaluator_is_first_arg=True) -def follow_param(evaluator, param): - func = param.parent_function + def eval_docstring(docstring): + return ContextSet.from_iterable( + p + for param_str in _search_param_in_docstr(docstring, param.name.value) + for p in _evaluate_for_statement_string(module_context, param_str) + ) + module_context = execution_context.get_root_context() + func = param.get_parent_function() + if func.type == 'lambdef': + return NO_CONTEXTS - return [p - for param_str in _search_param_in_docstr(func.raw_doc, - str(param.name)) - for p in _evaluate_for_statement_string(evaluator, param_str, - param.get_parent_until())] + types = eval_docstring(execution_context.py__doc__()) + if isinstance(execution_context, AnonymousInstanceFunctionExecution) and \ + execution_context.function_context.name.string_name == '__init__': + class_context = execution_context.instance.class_context + types |= eval_docstring(class_context.py__doc__()) + return types -@memoize_default(None, evaluator_is_first_arg=True) -def find_return_types(evaluator, func): + +@evaluator_method_cache() +@iterator_to_context_set +def infer_return_types(function_context): def search_return_in_docstr(code): for p in DOCSTRING_RETURN_PATTERNS: match = p.search(code) if match: - return _strip_rst_role(match.group(1)) - - type_str = search_return_in_docstr(func.raw_doc) - return _evaluate_for_statement_string(evaluator, type_str, func.get_parent_until()) + yield _strip_rst_role(match.group(1)) + # Check for numpy style return hint + for type_ in _search_return_in_numpydocstr(code): + yield type_ + + for type_str in search_return_in_docstr(function_context.py__doc__()): + for type_eval in _evaluate_for_statement_string(function_context.get_root_context(), type_str): + yield type_eval diff --git a/pythonFiles/release/jedi/evaluate/dynamic.py b/pythonFiles/release/jedi/evaluate/dynamic.py index 04ed909a1949..7d05000dc9d5 100755 --- a/pythonFiles/release/jedi/evaluate/dynamic.py +++ b/pythonFiles/release/jedi/evaluate/dynamic.py @@ -14,31 +14,40 @@ def foo(bar): - |Jedi| sees a param - search for function calls named ``foo`` -- execute these calls and check the input. This work with a ``ParamListener``. +- execute these calls and check the input. """ -from itertools import chain -from jedi._compatibility import unicode -from jedi.parser import tree +from parso.python import tree from jedi import settings from jedi import debug -from jedi.evaluate.cache import memoize_default +from jedi.evaluate.cache import evaluator_function_cache from jedi.evaluate import imports +from jedi.evaluate.arguments import TreeArguments +from jedi.evaluate.param import create_default_params +from jedi.evaluate.helpers import is_stdlib_path +from jedi.evaluate.utils import to_list +from jedi.parser_utils import get_parent_scope +from jedi.evaluate.context import ModuleContext, instance +from jedi.evaluate.base_context import ContextSet -class ParamListener(object): + +MAX_PARAM_SEARCHES = 20 + + +class MergedExecutedParams(object): """ - This listener is used to get the params for a function. + Simulates being a parameter while actually just being multiple params. """ - def __init__(self): - self.param_possibilities = [] + def __init__(self, executed_params): + self._executed_params = executed_params - def execute(self, params): - self.param_possibilities += params + def infer(self): + return ContextSet.from_sets(p.infer() for p in self._executed_params) @debug.increase_indent -def search_params(evaluator, param): +def search_params(evaluator, execution_context, funcdef): """ A dynamic search for param values. If you try to complete a type: @@ -52,95 +61,143 @@ def search_params(evaluator, param): is. """ if not settings.dynamic_params: - return [] + return create_default_params(execution_context, funcdef) - func = param.get_parent_until(tree.Function) - debug.dbg('Dynamic param search for %s in %s.', param, str(func.name)) - # Compare the param names. - names = [n for n in search_function_call(evaluator, func) - if n.value == param.name.value] - # Evaluate the ExecutedParams to types. - result = list(chain.from_iterable(n.parent.eval(evaluator) for n in names)) - debug.dbg('Dynamic param result %s', result) - return result + evaluator.dynamic_params_depth += 1 + try: + path = execution_context.get_root_context().py__file__() + if path is not None and is_stdlib_path(path): + # We don't want to search for usages in the stdlib. Usually people + # don't work with it (except if you are a core maintainer, sorry). + # This makes everything slower. Just disable it and run the tests, + # you will see the slowdown, especially in 3.6. + return create_default_params(execution_context, funcdef) + + debug.dbg('Dynamic param search in %s.', funcdef.name.value, color='MAGENTA') + + module_context = execution_context.get_root_context() + function_executions = _search_function_executions( + evaluator, + module_context, + funcdef + ) + if function_executions: + zipped_params = zip(*list( + function_execution.get_params() + for function_execution in function_executions + )) + params = [MergedExecutedParams(executed_params) for executed_params in zipped_params] + # Evaluate the ExecutedParams to types. + else: + return create_default_params(execution_context, funcdef) + debug.dbg('Dynamic param result finished', color='MAGENTA') + return params + finally: + evaluator.dynamic_params_depth -= 1 -@memoize_default([], evaluator_is_first_arg=True) -def search_function_call(evaluator, func): +@evaluator_function_cache(default=None) +@to_list +def _search_function_executions(evaluator, module_context, funcdef): """ Returns a list of param names. """ - from jedi.evaluate import representation as er - - def get_params_for_module(module): - """ - Returns the values of a param, or an empty array. - """ - @memoize_default([], evaluator_is_first_arg=True) - def get_posibilities(evaluator, module, func_name): - try: - names = module.used_names[func_name] - except KeyError: - return [] - - for name in names: - parent = name.parent - if tree.is_node(parent, 'trailer'): - parent = parent.parent - - trailer = None - if tree.is_node(parent, 'power'): - for t in parent.children[1:]: - if t == '**': - break - if t.start_pos > name.start_pos and t.children[0] == '(': - trailer = t - break - if trailer is not None: - types = evaluator.goto_definition(name) - - # We have to remove decorators, because they are not the - # "original" functions, this way we can easily compare. - # At the same time we also have to remove InstanceElements. - undec = [] - for escope in types: - if escope.isinstance(er.Function, er.Instance) \ - and escope.decorates is not None: - undec.append(escope.decorates) - elif isinstance(escope, er.InstanceElement): - undec.append(escope.var) - else: - undec.append(escope) - - if evaluator.wrap(compare) in undec: - # Only if we have the correct function we execute - # it, otherwise just ignore it. - evaluator.eval_trailer(types, trailer) - return listener.param_possibilities - return get_posibilities(evaluator, module, func_name) - - current_module = func.get_parent_until() - func_name = unicode(func.name) - compare = func - if func_name == '__init__': - cls = func.get_parent_scope() + func_string_name = funcdef.name.value + compare_node = funcdef + if func_string_name == '__init__': + cls = get_parent_scope(funcdef) if isinstance(cls, tree.Class): - func_name = unicode(cls.name) - compare = cls - - # add the listener - listener = ParamListener() - func.listeners.add(listener) - + func_string_name = cls.name.value + compare_node = cls + + found_executions = False + i = 0 + for for_mod_context in imports.get_modules_containing_name( + evaluator, [module_context], func_string_name): + if not isinstance(module_context, ModuleContext): + return + for name, trailer in _get_possible_nodes(for_mod_context, func_string_name): + i += 1 + + # This is a simple way to stop Jedi's dynamic param recursion + # from going wild: The deeper Jedi's in the recursion, the less + # code should be evaluated. + if i * evaluator.dynamic_params_depth > MAX_PARAM_SEARCHES: + return + + random_context = evaluator.create_context(for_mod_context, name) + for function_execution in _check_name_for_execution( + evaluator, random_context, compare_node, name, trailer): + found_executions = True + yield function_execution + + # If there are results after processing a module, we're probably + # good to process. This is a speed optimization. + if found_executions: + return + + +def _get_possible_nodes(module_context, func_string_name): try: - result = [] - # This is like backtracking: Get the first possible result. - for mod in imports.get_modules_containing_name(evaluator, [current_module], func_name): - result = get_params_for_module(mod) - if result: - break - finally: - # cleanup: remove the listener; important: should not stick. - func.listeners.remove(listener) - - return result + names = module_context.tree_node.get_used_names()[func_string_name] + except KeyError: + return + + for name in names: + bracket = name.get_next_leaf() + trailer = bracket.parent + if trailer.type == 'trailer' and bracket == '(': + yield name, trailer + + +def _check_name_for_execution(evaluator, context, compare_node, name, trailer): + from jedi.evaluate.context.function import FunctionExecutionContext + + def create_func_excs(): + arglist = trailer.children[1] + if arglist == ')': + arglist = () + args = TreeArguments(evaluator, context, arglist, trailer) + if value_node.type == 'funcdef': + yield value.get_function_execution(args) + else: + created_instance = instance.TreeInstance( + evaluator, + value.parent_context, + value, + args + ) + for execution in created_instance.create_init_executions(): + yield execution + + for value in evaluator.goto_definitions(context, name): + value_node = value.tree_node + if compare_node == value_node: + for func_execution in create_func_excs(): + yield func_execution + elif isinstance(value.parent_context, FunctionExecutionContext) and \ + compare_node.type == 'funcdef': + # Here we're trying to find decorators by checking the first + # parameter. It's not very generic though. Should find a better + # solution that also applies to nested decorators. + params = value.parent_context.get_params() + if len(params) != 1: + continue + values = params[0].infer() + nodes = [v.tree_node for v in values] + if nodes == [compare_node]: + # Found a decorator. + module_context = context.get_root_context() + execution_context = next(create_func_excs()) + for name, trailer in _get_possible_nodes(module_context, params[0].string_name): + if value_node.start_pos < name.start_pos < value_node.end_pos: + random_context = evaluator.create_context(execution_context, name) + iterator = _check_name_for_execution( + evaluator, + random_context, + compare_node, + name, + trailer + ) + for function_execution in iterator: + yield function_execution diff --git a/pythonFiles/release/jedi/evaluate/filters.py b/pythonFiles/release/jedi/evaluate/filters.py new file mode 100644 index 000000000000..35dff9dace65 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/filters.py @@ -0,0 +1,434 @@ +""" +Filters are objects that you can use to filter names in different scopes. They +are needed for name resolution. +""" +from abc import abstractmethod + +from parso.tree import search_ancestor + +from jedi._compatibility import is_py3 +from jedi.evaluate import flow_analysis +from jedi.evaluate.base_context import ContextSet, Context +from jedi.parser_utils import get_parent_scope +from jedi.evaluate.utils import to_list + + +class AbstractNameDefinition(object): + start_pos = None + string_name = None + parent_context = None + tree_name = None + + @abstractmethod + def infer(self): + raise NotImplementedError + + @abstractmethod + def goto(self): + # Typically names are already definitions and therefore a goto on that + # name will always result on itself. + return set([self]) + + def get_root_context(self): + return self.parent_context.get_root_context() + + def __repr__(self): + if self.start_pos is None: + return '<%s: %s>' % (self.__class__.__name__, self.string_name) + return '<%s: %s@%s>' % (self.__class__.__name__, self.string_name, self.start_pos) + + def execute(self, arguments): + return self.infer().execute(arguments) + + def execute_evaluated(self, *args, **kwargs): + return self.infer().execute_evaluated(*args, **kwargs) + + @property + def api_type(self): + return self.parent_context.api_type + + +class AbstractTreeName(AbstractNameDefinition): + def __init__(self, parent_context, tree_name): + self.parent_context = parent_context + self.tree_name = tree_name + + def goto(self): + return self.parent_context.evaluator.goto(self.parent_context, self.tree_name) + + @property + def string_name(self): + return self.tree_name.value + + @property + def start_pos(self): + return self.tree_name.start_pos + + +class ContextNameMixin(object): + def infer(self): + return ContextSet(self._context) + + def get_root_context(self): + if self.parent_context is None: + return self._context + return super(ContextNameMixin, self).get_root_context() + + @property + def api_type(self): + return self._context.api_type + + +class ContextName(ContextNameMixin, AbstractTreeName): + def __init__(self, context, tree_name): + super(ContextName, self).__init__(context.parent_context, tree_name) + self._context = context + + +class TreeNameDefinition(AbstractTreeName): + _API_TYPES = dict( + import_name='module', + import_from='module', + funcdef='function', + param='param', + classdef='class', + ) + + def infer(self): + # Refactor this, should probably be here. + from jedi.evaluate.syntax_tree import tree_name_to_contexts + return tree_name_to_contexts(self.parent_context.evaluator, self.parent_context, self.tree_name) + + @property + def api_type(self): + definition = self.tree_name.get_definition(import_name_always=True) + if definition is None: + return 'statement' + return self._API_TYPES.get(definition.type, 'statement') + + +class ParamName(AbstractTreeName): + api_type = 'param' + + def __init__(self, parent_context, tree_name): + self.parent_context = parent_context + self.tree_name = tree_name + + def infer(self): + return self.get_param().infer() + + def get_param(self): + params = self.parent_context.get_params() + param_node = search_ancestor(self.tree_name, 'param') + return params[param_node.position_index] + + +class AnonymousInstanceParamName(ParamName): + def infer(self): + param_node = search_ancestor(self.tree_name, 'param') + # TODO I think this should not belong here. It's not even really true, + # because classmethod and other descriptors can change it. + if param_node.position_index == 0: + # This is a speed optimization, to return the self param (because + # it's known). This only affects anonymous instances. + return ContextSet(self.parent_context.instance) + else: + return self.get_param().infer() + + +class AbstractFilter(object): + _until_position = None + + def _filter(self, names): + if self._until_position is not None: + return [n for n in names if n.start_pos < self._until_position] + return names + + @abstractmethod + def get(self, name): + raise NotImplementedError + + @abstractmethod + def values(self): + raise NotImplementedError + + +class AbstractUsedNamesFilter(AbstractFilter): + name_class = TreeNameDefinition + + def __init__(self, context, parser_scope): + self._parser_scope = parser_scope + self._used_names = self._parser_scope.get_root_node().get_used_names() + self.context = context + + def get(self, name): + try: + names = self._used_names[str(name)] + except KeyError: + return [] + + return self._convert_names(self._filter(names)) + + def _convert_names(self, names): + return [self.name_class(self.context, name) for name in names] + + def values(self): + return self._convert_names(name for name_list in self._used_names.values() + for name in self._filter(name_list)) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.context) + + +class ParserTreeFilter(AbstractUsedNamesFilter): + def __init__(self, evaluator, context, node_context=None, until_position=None, + origin_scope=None): + """ + node_context is an option to specify a second context for use cases + like the class mro where the parent class of a new name would be the + context, but for some type inference it's important to have a local + context of the other classes. + """ + if node_context is None: + node_context = context + super(ParserTreeFilter, self).__init__(context, node_context.tree_node) + self._node_context = node_context + self._origin_scope = origin_scope + self._until_position = until_position + + def _filter(self, names): + names = super(ParserTreeFilter, self)._filter(names) + names = [n for n in names if self._is_name_reachable(n)] + return list(self._check_flows(names)) + + def _is_name_reachable(self, name): + if not name.is_definition(): + return False + parent = name.parent + if parent.type == 'trailer': + return False + base_node = parent if parent.type in ('classdef', 'funcdef') else name + return get_parent_scope(base_node) == self._parser_scope + + def _check_flows(self, names): + for name in sorted(names, key=lambda name: name.start_pos, reverse=True): + check = flow_analysis.reachability_check( + self._node_context, self._parser_scope, name, self._origin_scope + ) + if check is not flow_analysis.UNREACHABLE: + yield name + + if check is flow_analysis.REACHABLE: + break + + +class FunctionExecutionFilter(ParserTreeFilter): + param_name = ParamName + + def __init__(self, evaluator, context, node_context=None, + until_position=None, origin_scope=None): + super(FunctionExecutionFilter, self).__init__( + evaluator, + context, + node_context, + until_position, + origin_scope + ) + + @to_list + def _convert_names(self, names): + for name in names: + param = search_ancestor(name, 'param') + if param: + yield self.param_name(self.context, name) + else: + yield TreeNameDefinition(self.context, name) + + +class AnonymousInstanceFunctionExecutionFilter(FunctionExecutionFilter): + param_name = AnonymousInstanceParamName + + +class GlobalNameFilter(AbstractUsedNamesFilter): + def __init__(self, context, parser_scope): + super(GlobalNameFilter, self).__init__(context, parser_scope) + + @to_list + def _filter(self, names): + for name in names: + if name.parent.type == 'global_stmt': + yield name + + +class DictFilter(AbstractFilter): + def __init__(self, dct): + self._dct = dct + + def get(self, name): + try: + value = self._convert(name, self._dct[str(name)]) + except KeyError: + return [] + + return list(self._filter([value])) + + def values(self): + return self._filter(self._convert(*item) for item in self._dct.items()) + + def _convert(self, name, value): + return value + + +class _BuiltinMappedMethod(Context): + """``Generator.__next__`` ``dict.values`` methods and so on.""" + api_type = 'function' + + def __init__(self, builtin_context, method, builtin_func): + super(_BuiltinMappedMethod, self).__init__( + builtin_context.evaluator, + parent_context=builtin_context + ) + self._method = method + self._builtin_func = builtin_func + + def py__call__(self, params): + return self._method(self.parent_context) + + def __getattr__(self, name): + return getattr(self._builtin_func, name) + + +class SpecialMethodFilter(DictFilter): + """ + A filter for methods that are defined in this module on the corresponding + classes like Generator (for __next__, etc). + """ + class SpecialMethodName(AbstractNameDefinition): + api_type = 'function' + + def __init__(self, parent_context, string_name, callable_, builtin_context): + self.parent_context = parent_context + self.string_name = string_name + self._callable = callable_ + self._builtin_context = builtin_context + + def infer(self): + filter = next(self._builtin_context.get_filters()) + # We can take the first index, because on builtin methods there's + # always only going to be one name. The same is true for the + # inferred values. + builtin_func = next(iter(filter.get(self.string_name)[0].infer())) + return ContextSet(_BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)) + + def __init__(self, context, dct, builtin_context): + super(SpecialMethodFilter, self).__init__(dct) + self.context = context + self._builtin_context = builtin_context + """ + This context is what will be used to introspect the name, where as the + other context will be used to execute the function. + + We distinguish, because we have to. + """ + + def _convert(self, name, value): + return self.SpecialMethodName(self.context, name, value, self._builtin_context) + + +def has_builtin_methods(cls): + base_dct = {} + # Need to care properly about inheritance. Builtin Methods should not get + # lost, just because they are not mentioned in a class. + for base_cls in reversed(cls.__bases__): + try: + base_dct.update(base_cls.builtin_methods) + except AttributeError: + pass + + cls.builtin_methods = base_dct + for func in cls.__dict__.values(): + try: + cls.builtin_methods.update(func.registered_builtin_methods) + except AttributeError: + pass + return cls + + +def register_builtin_method(method_name, python_version_match=None): + def wrapper(func): + if python_version_match and python_version_match != 2 + int(is_py3): + # Some functions do only apply to certain versions. + return func + dct = func.__dict__.setdefault('registered_builtin_methods', {}) + dct[method_name] = func + return func + return wrapper + + +def get_global_filters(evaluator, context, until_position, origin_scope): + """ + Returns all filters in order of priority for name resolution. + + For global name lookups. The filters will handle name resolution + themselves, but here we gather possible filters downwards. + + >>> from jedi._compatibility import u, no_unicode_pprint + >>> from jedi import Script + >>> script = Script(u(''' + ... x = ['a', 'b', 'c'] + ... def func(): + ... y = None + ... ''')) + >>> module_node = script._get_module_node() + >>> scope = next(module_node.iter_funcdefs()) + >>> scope + + >>> context = script._get_module().create_context(scope) + >>> filters = list(get_global_filters(context.evaluator, context, (4, 0), None)) + + First we get the names names from the function scope. + + >>> no_unicode_pprint(filters[0]) + > + >>> sorted(str(n) for n in filters[0].values()) + ['', ''] + >>> filters[0]._until_position + (4, 0) + + Then it yields the names from one level "lower". In this example, this is + the module scope. As a side note, you can see, that the position in the + filter is now None, because typically the whole module is loaded before the + function is called. + + >>> filters[1].values() # global names -> there are none in our example. + [] + >>> list(filters[2].values()) # package modules -> Also empty. + [] + >>> sorted(name.string_name for name in filters[3].values()) # Module attributes + ['__doc__', '__file__', '__name__', '__package__'] + >>> print(filters[1]._until_position) + None + + Finally, it yields the builtin filter, if `include_builtin` is + true (default). + + >>> filters[4].values() #doctest: +ELLIPSIS + [, ...] + """ + from jedi.evaluate.context.function import FunctionExecutionContext + while context is not None: + # Names in methods cannot be resolved within the class. + for filter in context.get_filters( + search_global=True, + until_position=until_position, + origin_scope=origin_scope): + yield filter + if isinstance(context, FunctionExecutionContext): + # The position should be reset if the current scope is a function. + until_position = None + + context = context.parent_context + + # Add builtins to the global scope. + for filter in evaluator.BUILTINS.get_filters(search_global=True): + yield filter diff --git a/pythonFiles/release/jedi/evaluate/finder.py b/pythonFiles/release/jedi/evaluate/finder.py index c112f971b277..96032ae9b792 100755 --- a/pythonFiles/release/jedi/evaluate/finder.py +++ b/pythonFiles/release/jedi/evaluate/finder.py @@ -3,6 +3,9 @@ Python. The name resolution is quite complicated with descripter, ``__getattribute__``, ``__getattr__``, ``global``, etc. +If you want to understand name resolution, please read the first few chapters +in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/. + Flow checks +++++++++++ @@ -11,371 +14,172 @@ Unfortunately every other thing is being ignored (e.g. a == '' would be easy to check for -> a is a string). There's big potential in these checks. """ -from itertools import chain -from jedi._compatibility import unicode, u -from jedi.parser import tree +from parso.python import tree +from parso.tree import search_ancestor from jedi import debug -from jedi import common from jedi import settings -from jedi.evaluate import representation as er -from jedi.evaluate import dynamic +from jedi.evaluate.context import AbstractInstanceContext from jedi.evaluate import compiled -from jedi.evaluate import docstrings -from jedi.evaluate import iterable -from jedi.evaluate import imports from jedi.evaluate import analysis from jedi.evaluate import flow_analysis -from jedi.evaluate import param +from jedi.evaluate.arguments import TreeArguments from jedi.evaluate import helpers -from jedi.evaluate.cache import memoize_default - - -def filter_after_position(names, position): - """ - Removes all names after a certain position. If position is None, just - returns the names list. - """ - if position is None: - return names - - names_new = [] - for n in names: - # Filter positions and also allow list comprehensions and lambdas. - if n.start_pos[0] is not None and n.start_pos < position \ - or isinstance(n.get_definition(), (tree.CompFor, tree.Lambda)): - names_new.append(n) - return names_new - - -def filter_definition_names(names, origin, position=None): - """ - Filter names that are actual definitions in a scope. Names that are just - used will be ignored. - """ - # Just calculate the scope from the first - stmt = names[0].get_definition() - scope = stmt.get_parent_scope() - - if not (isinstance(scope, er.FunctionExecution) - and isinstance(scope.base, er.LambdaWrapper)): - names = filter_after_position(names, position) - names = [name for name in names if name.is_definition()] - - # Private name mangling (compile.c) disallows access on names - # preceeded by two underscores `__` if used outside of the class. Names - # that also end with two underscores (e.g. __id__) are not affected. - for name in list(names): - if name.value.startswith('__') and not name.value.endswith('__'): - if filter_private_variable(scope, origin): - names.remove(name) - return names +from jedi.evaluate.context import iterable +from jedi.evaluate.filters import get_global_filters, TreeNameDefinition +from jedi.evaluate.base_context import ContextSet +from jedi.parser_utils import is_scope, get_parent_scope class NameFinder(object): - def __init__(self, evaluator, scope, name_str, position=None): + def __init__(self, evaluator, context, name_context, name_or_str, + position=None, analysis_errors=True): self._evaluator = evaluator # Make sure that it's not just a syntax tree node. - self.scope = evaluator.wrap(scope) - self.name_str = name_str - self.position = position + self._context = context + self._name_context = name_context + self._name = name_or_str + if isinstance(name_or_str, tree.Name): + self._string_name = name_or_str.value + else: + self._string_name = name_or_str + self._position = position + self._found_predefined_types = None + self._analysis_errors = analysis_errors @debug.increase_indent - def find(self, scopes, search_global=False): - # TODO rename scopes to names_dicts - names = self.filter_name(scopes) - types = self._names_to_types(names, search_global) - - if not names and not types \ - and not (isinstance(self.name_str, tree.Name) - and isinstance(self.name_str.parent.parent, tree.Param)): - if not isinstance(self.name_str, (str, unicode)): # TODO Remove? - if search_global: - message = ("NameError: name '%s' is not defined." - % self.name_str) - analysis.add(self._evaluator, 'name-error', self.name_str, - message) + def find(self, filters, attribute_lookup): + """ + :params bool attribute_lookup: Tell to logic if we're accessing the + attribute or the contents of e.g. a function. + """ + names = self.filter_name(filters) + if self._found_predefined_types is not None and names: + check = flow_analysis.reachability_check( + self._context, self._context.tree_node, self._name) + if check is flow_analysis.UNREACHABLE: + return ContextSet() + return self._found_predefined_types + + types = self._names_to_types(names, attribute_lookup) + + if not names and self._analysis_errors and not types \ + and not (isinstance(self._name, tree.Name) and + isinstance(self._name.parent.parent, tree.Param)): + if isinstance(self._name, tree.Name): + if attribute_lookup: + analysis.add_attribute_error( + self._name_context, self._context, self._name) else: - analysis.add_attribute_error(self._evaluator, - self.scope, self.name_str) + message = ("NameError: name '%s' is not defined." + % self._string_name) + analysis.add(self._name_context, 'name-error', self._name, message) - debug.dbg('finder._names_to_types: %s -> %s', names, types) return types - def scopes(self, search_global=False): - if search_global: - return global_names_dict_generator(self._evaluator, self.scope, self.position) + def _get_origin_scope(self): + if isinstance(self._name, tree.Name): + scope = self._name + while scope.parent is not None: + # TODO why if classes? + if not isinstance(scope, tree.Scope): + break + scope = scope.parent + return scope else: - return ((n, None) for n in self.scope.names_dicts(search_global)) - - def names_dict_lookup(self, names_dict, position): - def get_param(scope, el): - if isinstance(el.get_parent_until(tree.Param), tree.Param): - return scope.param_by_name(str(el)) - return el - - search_str = str(self.name_str) - try: - names = names_dict[search_str] - if not names: # We want names, otherwise stop. - return [] - except KeyError: - return [] + return None - names = filter_definition_names(names, self.name_str, position) - - name_scope = None - # Only the names defined in the last position are valid definitions. - last_names = [] - for name in reversed(sorted(names, key=lambda name: name.start_pos)): - stmt = name.get_definition() - name_scope = self._evaluator.wrap(stmt.get_parent_scope()) - - if isinstance(self.scope, er.Instance) and not isinstance(name_scope, er.Instance): - # Instances should not be checked for positioning, because we - # don't know in which order the functions are called. - last_names.append(name) - continue - - if isinstance(name_scope, compiled.CompiledObject): - # Let's test this. TODO need comment. shouldn't this be - # filtered before? - last_names.append(name) - continue - - if isinstance(name, compiled.CompiledName) \ - or isinstance(name, er.InstanceName) and isinstance(name._origin_name, compiled.CompiledName): - last_names.append(name) - continue - - if isinstance(self.name_str, tree.Name): - origin_scope = self.name_str.get_parent_until(tree.Scope, reverse=True) - else: - origin_scope = None - if isinstance(stmt.parent, compiled.CompiledObject): - # TODO seriously? this is stupid. - continue - check = flow_analysis.break_check(self._evaluator, name_scope, - stmt, origin_scope) - if check is not flow_analysis.UNREACHABLE: - last_names.append(name) - if check is flow_analysis.REACHABLE: - break - - if isinstance(name_scope, er.FunctionExecution): - # Replace params - return [get_param(name_scope, n) for n in last_names] - return last_names + def get_filters(self, search_global=False): + origin_scope = self._get_origin_scope() + if search_global: + return get_global_filters(self._evaluator, self._context, self._position, origin_scope) + else: + return self._context.get_filters(search_global, self._position, origin_scope=origin_scope) - def filter_name(self, names_dicts): + def filter_name(self, filters): """ Searches names that are defined in a scope (the different - `names_dicts`), until a name fits. + ``filters``), until a name fits. """ names = [] - for names_dict, position in names_dicts: - names = self.names_dict_lookup(names_dict, position) + if self._context.predefined_names: + # TODO is this ok? node might not always be a tree.Name + node = self._name + while node is not None and not is_scope(node): + node = node.parent + if node.type in ("if_stmt", "for_stmt", "comp_for"): + try: + name_dict = self._context.predefined_names[node] + types = name_dict[self._string_name] + except KeyError: + continue + else: + self._found_predefined_types = types + break + + for filter in filters: + names = filter.get(self._string_name) if names: + if len(names) == 1: + n, = names + if isinstance(n, TreeNameDefinition): + # Something somewhere went terribly wrong. This + # typically happens when using goto on an import in an + # __init__ file. I think we need a better solution, but + # it's kind of hard, because for Jedi it's not clear + # that that name has not been defined, yet. + if n.tree_name == self._name: + if self._name.get_definition().type == 'import_from': + continue break - debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self.name_str, - self.scope, u(names), self.position) - return list(self._clean_names(names)) - - def _clean_names(self, names): - """ - ``NameFinder.filter_name`` should only output names with correct - wrapper parents. We don't want to see AST classes out in the - evaluation, so remove them already here! - """ - for n in names: - definition = n.parent - if isinstance(definition, (tree.Function, tree.Class, tree.Module)): - yield self._evaluator.wrap(definition).name - else: - yield n + debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self._string_name, + self._context, names, self._position) + return list(names) def _check_getattr(self, inst): """Checks for both __getattr__ and __getattribute__ methods""" - result = [] # str is important, because it shouldn't be `Name`! - name = compiled.create(self._evaluator, str(self.name_str)) - with common.ignored(KeyError): - result = inst.execute_subscope_by_name('__getattr__', name) - if not result: - # this is a little bit special. `__getattribute__` is executed - # before anything else. But: I know no use case, where this - # could be practical and the jedi would return wrong types. If - # you ever have something, let me know! - with common.ignored(KeyError): - result = inst.execute_subscope_by_name('__getattribute__', name) - return result - - def _names_to_types(self, names, search_global): - types = [] + name = compiled.create(self._evaluator, self._string_name) + + # This is a little bit special. `__getattribute__` is in Python + # executed before `__getattr__`. But: I know no use case, where + # this could be practical and where Jedi would return wrong types. + # If you ever find something, let me know! + # We are inversing this, because a hand-crafted `__getattribute__` + # could still call another hand-crafted `__getattr__`, but not the + # other way around. + names = (inst.get_function_slot_names('__getattr__') or + inst.get_function_slot_names('__getattribute__')) + return inst.execute_function_slots(names, name) + + def _names_to_types(self, names, attribute_lookup): + contexts = ContextSet.from_sets(name.infer() for name in names) + + debug.dbg('finder._names_to_types: %s -> %s', names, contexts) + if not names and isinstance(self._context, AbstractInstanceContext): + # handling __getattr__ / __getattribute__ + return self._check_getattr(self._context) # Add isinstance and other if/assert knowledge. - if isinstance(self.name_str, tree.Name): - # Ignore FunctionExecution parents for now. - flow_scope = self.name_str - until = flow_scope.get_parent_until(er.FunctionExecution) - while not isinstance(until, er.FunctionExecution): - flow_scope = flow_scope.get_parent_scope(include_flows=True) - if flow_scope is None: - break - # TODO check if result is in scope -> no evaluation necessary - n = check_flow_information(self._evaluator, flow_scope, - self.name_str, self.position) - if n: + if not contexts and isinstance(self._name, tree.Name) and \ + not isinstance(self._name_context, AbstractInstanceContext): + flow_scope = self._name + base_node = self._name_context.tree_node + if base_node.type == 'comp_for': + return contexts + while True: + flow_scope = get_parent_scope(flow_scope, include_flows=True) + n = _check_flow_information(self._name_context, flow_scope, + self._name, self._position) + if n is not None: return n - - for name in names: - new_types = _name_to_types(self._evaluator, name, self.scope) - if isinstance(self.scope, (er.Class, er.Instance)) and not search_global: - types += self._resolve_descriptors(name, new_types) - else: - types += new_types - if not names and isinstance(self.scope, er.Instance): - # handling __getattr__ / __getattribute__ - types = self._check_getattr(self.scope) - - return types - - def _resolve_descriptors(self, name, types): - # The name must not be in the dictionary, but part of the class - # definition. __get__ is only called if the descriptor is defined in - # the class dictionary. - name_scope = name.get_definition().get_parent_scope() - if not isinstance(name_scope, (er.Instance, tree.Class)): - return types - - result = [] - for r in types: - try: - desc_return = r.get_descriptor_returns - except AttributeError: - result.append(r) - else: - result += desc_return(self.scope) - return result - - -@memoize_default([], evaluator_is_first_arg=True) -def _name_to_types(evaluator, name, scope): - types = [] - typ = name.get_definition() - if typ.isinstance(tree.ForStmt): - for_types = evaluator.eval_element(typ.children[3]) - for_types = iterable.get_iterator_types(for_types) - types += check_tuple_assignments(for_types, name) - elif typ.isinstance(tree.CompFor): - for_types = evaluator.eval_element(typ.children[3]) - for_types = iterable.get_iterator_types(for_types) - types += check_tuple_assignments(for_types, name) - elif isinstance(typ, tree.Param): - types += _eval_param(evaluator, typ, scope) - elif typ.isinstance(tree.ExprStmt): - types += _remove_statements(evaluator, typ, name) - elif typ.isinstance(tree.WithStmt): - types += evaluator.eval_element(typ.node_from_name(name)) - elif isinstance(typ, tree.Import): - types += imports.ImportWrapper(evaluator, name).follow() - elif isinstance(typ, tree.GlobalStmt): - # TODO theoretically we shouldn't be using search_global here, it - # doesn't make sense, because it's a local search (for that name)! - # However, globals are not that important and resolving them doesn't - # guarantee correctness in any way, because we don't check for when - # something is executed. - types += evaluator.find_types(typ.get_parent_scope(), str(name), - search_global=True) - elif isinstance(typ, tree.TryStmt): - # TODO an exception can also be a tuple. Check for those. - # TODO check for types that are not classes and add it to - # the static analysis report. - exceptions = evaluator.eval_element(name.prev_sibling().prev_sibling()) - types = list(chain.from_iterable( - evaluator.execute(t) for t in exceptions)) - else: - if typ.isinstance(er.Function): - typ = typ.get_decorated_func() - types.append(typ) - return types - - -def _remove_statements(evaluator, stmt, name): - """ - This is the part where statements are being stripped. - - Due to lazy evaluation, statements like a = func; b = a; b() have to be - evaluated. - """ - types = [] - # Remove the statement docstr stuff for now, that has to be - # implemented with the evaluator class. - #if stmt.docstr: - #res_new.append(stmt) - - check_instance = None - if isinstance(stmt, er.InstanceElement) and stmt.is_class_var: - check_instance = stmt.instance - stmt = stmt.var - - types += evaluator.eval_statement(stmt, seek_name=name) - - if check_instance is not None: - # class renames - types = [er.get_instance_el(evaluator, check_instance, a, True) - if isinstance(a, (er.Function, tree.Function)) - else a for a in types] - return types - - -def _eval_param(evaluator, param, scope): - res_new = [] - func = param.get_parent_scope() - - cls = func.parent.get_parent_until((tree.Class, tree.Function)) - - from jedi.evaluate.param import ExecutedParam, Arguments - if isinstance(cls, tree.Class) and param.position_nr == 0 \ - and not isinstance(param, ExecutedParam): - # This is where we add self - if it has never been - # instantiated. - if isinstance(scope, er.InstanceElement): - res_new.append(scope.instance) - else: - inst = er.Instance(evaluator, evaluator.wrap(cls), - Arguments(evaluator, ()), is_generated=True) - res_new.append(inst) - return res_new - - # Instances are typically faked, if the instance is not called from - # outside. Here we check it for __init__ functions and return. - if isinstance(func, er.InstanceElement) \ - and func.instance.is_generated and str(func.name) == '__init__': - param = func.var.params[param.position_nr] - - # Add docstring knowledge. - doc_params = docstrings.follow_param(evaluator, param) - if doc_params: - return doc_params - - if isinstance(param, ExecutedParam): - return res_new + param.eval(evaluator) - else: - # Param owns no information itself. - res_new += dynamic.search_params(evaluator, param) - if not res_new: - if param.stars: - t = 'tuple' if param.stars == 1 else 'dict' - typ = evaluator.find_types(compiled.builtin, t)[0] - res_new = evaluator.execute(typ) - if param.default: - res_new += evaluator.eval_element(param.default) - return res_new + if flow_scope == base_node: + break + return contexts -def check_flow_information(evaluator, flow, search_name, pos): +def _check_flow_information(context, flow, search_name, pos): """ Try to find out the type of a variable just with the information that is given by the flows: e.g. It is also responsible for assert checks.:: @@ -387,161 +191,68 @@ def check_flow_information(evaluator, flow, search_name, pos): if not settings.dynamic_flow_information: return None - result = [] - if flow.is_scope(): + result = None + if is_scope(flow): # Check for asserts. + module_node = flow.get_root_node() try: - names = reversed(flow.names_dict[search_name.value]) - except (KeyError, AttributeError): - names = [] + names = module_node.get_used_names()[search_name.value] + except KeyError: + return None + names = reversed([ + n for n in names + if flow.start_pos <= n.start_pos < (pos or flow.end_pos) + ]) for name in names: - ass = name.get_parent_until(tree.AssertStmt) - if isinstance(ass, tree.AssertStmt) and pos is not None and ass.start_pos < pos: - result = _check_isinstance_type(evaluator, ass.assertion(), search_name) - if result: - break - - if isinstance(flow, (tree.IfStmt, tree.WhileStmt)): - element = flow.children[1] - result = _check_isinstance_type(evaluator, element, search_name) + ass = search_ancestor(name, 'assert_stmt') + if ass is not None: + result = _check_isinstance_type(context, ass.assertion, search_name) + if result is not None: + return result + + if flow.type in ('if_stmt', 'while_stmt'): + potential_ifs = [c for c in flow.children[1::4] if c != ':'] + for if_test in reversed(potential_ifs): + if search_name.start_pos > if_test.end_pos: + return _check_isinstance_type(context, if_test, search_name) return result -def _check_isinstance_type(evaluator, element, search_name): +def _check_isinstance_type(context, element, search_name): try: - assert element.type == 'power' + assert element.type in ('power', 'atom_expr') # this might be removed if we analyze and, etc assert len(element.children) == 2 first, trailer = element.children - assert isinstance(first, tree.Name) and first.value == 'isinstance' + assert first.type == 'name' and first.value == 'isinstance' assert trailer.type == 'trailer' and trailer.children[0] == '(' assert len(trailer.children) == 3 # arglist stuff arglist = trailer.children[1] - args = param.Arguments(evaluator, arglist, trailer) - lst = list(args.unpack()) + args = TreeArguments(context.evaluator, context, arglist, trailer) + param_list = list(args.unpack()) # Disallow keyword arguments - assert len(lst) == 2 and lst[0][0] is None and lst[1][0] is None - name = lst[0][1][0] # first argument, values, first value + assert len(param_list) == 2 + (key1, lazy_context_object), (key2, lazy_context_cls) = param_list + assert key1 is None and key2 is None + call = helpers.call_of_leaf(search_name) + is_instance_call = helpers.call_of_leaf(lazy_context_object.data) # Do a simple get_code comparison. They should just have the same code, # and everything will be all right. - classes = lst[1][1][0] - call = helpers.call_of_name(search_name) - assert name.get_code() == call.get_code() + normalize = context.evaluator.grammar._normalize + assert normalize(is_instance_call) == normalize(call) except AssertionError: - return [] - - result = [] - for typ in evaluator.eval_element(classes): - for typ in (typ.values() if isinstance(typ, iterable.Array) else [typ]): - result += evaluator.execute(typ) - return result - - -def global_names_dict_generator(evaluator, scope, position): - """ - For global name lookups. Yields tuples of (names_dict, position). If the - position is None, the position does not matter anymore in that scope. - - This function is used to include names from outer scopes. For example, when - the current scope is function: - - >>> from jedi._compatibility import u, no_unicode_pprint - >>> from jedi.parser import Parser, load_grammar - >>> parser = Parser(load_grammar(), u(''' - ... x = ['a', 'b', 'c'] - ... def func(): - ... y = None - ... ''')) - >>> scope = parser.module.subscopes[0] - >>> scope - - - `global_names_dict_generator` is a generator. First it yields names from - most inner scope. - - >>> from jedi.evaluate import Evaluator - >>> evaluator = Evaluator(load_grammar()) - >>> scope = evaluator.wrap(scope) - >>> pairs = list(global_names_dict_generator(evaluator, scope, (4, 0))) - >>> no_unicode_pprint(pairs[0]) - ({'func': [], 'y': []}, (4, 0)) - - Then it yields the names from one level "lower". In this example, this - is the most outer scope. As you can see, the position in the tuple is now - None, because typically the whole module is loaded before the function is - called. - - >>> no_unicode_pprint(pairs[1]) - ({'func': [], 'x': []}, None) - - After that we have a few underscore names that are part of the module. - - >>> sorted(pairs[2][0].keys()) - ['__doc__', '__file__', '__name__', '__package__'] - >>> pairs[3] # global names -> there are none in our example. - ({}, None) - >>> pairs[4] # package modules -> Also none. - ({}, None) - - Finally, it yields names from builtin, if `include_builtin` is - true (default). - - >>> pairs[5][0].values() #doctest: +ELLIPSIS - [[], ...] - """ - in_func = False - while scope is not None: - if not (scope.type == 'classdef' and in_func): - # Names in methods cannot be resolved within the class. - - for names_dict in scope.names_dicts(True): - yield names_dict, position - if scope.type == 'funcdef': - # The position should be reset if the current scope is a function. - in_func = True - position = None - scope = evaluator.wrap(scope.get_parent_scope()) - - # Add builtins to the global scope. - for names_dict in compiled.builtin.names_dicts(True): - yield names_dict, None - - -def check_tuple_assignments(types, name): - """ - Checks if tuples are assigned. - """ - for index in name.assignment_indexes(): - new_types = [] - for r in types: - try: - func = r.get_exact_index_types - except AttributeError: - debug.warning("Invalid tuple lookup #%s of result %s in %s", - index, types, name) - else: - try: - new_types += func(index) - except IndexError: - pass - types = new_types - return types - - -def filter_private_variable(scope, origin_node): - """Check if a variable is defined inside the same class or outside.""" - instance = scope.get_parent_scope() - coming_from = origin_node - while coming_from is not None \ - and not isinstance(coming_from, (tree.Class, compiled.CompiledObject)): - coming_from = coming_from.get_parent_scope() + return None - # CompiledObjects don't have double underscore attributes, but Jedi abuses - # those for fakes (builtins.pym -> list). - if isinstance(instance, compiled.CompiledObject): - return instance != coming_from - else: - return isinstance(instance, er.Instance) and instance.base.base != coming_from + context_set = ContextSet() + for cls_or_tup in lazy_context_cls.infer(): + if isinstance(cls_or_tup, iterable.AbstractIterable) and \ + cls_or_tup.array_type == 'tuple': + for lazy_context in cls_or_tup.py__iter__(): + for context in lazy_context.infer(): + context_set |= context.execute_evaluated() + else: + context_set |= cls_or_tup.execute_evaluated() + return context_set diff --git a/pythonFiles/release/jedi/evaluate/flow_analysis.py b/pythonFiles/release/jedi/evaluate/flow_analysis.py index cd3df554fa7e..670b7a71934c 100755 --- a/pythonFiles/release/jedi/evaluate/flow_analysis.py +++ b/pythonFiles/release/jedi/evaluate/flow_analysis.py @@ -1,4 +1,4 @@ -from jedi.parser import tree +from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope class Status(object): @@ -32,51 +32,79 @@ def __repr__(self): UNSURE = Status(None, 'unsure') -def break_check(evaluator, base_scope, stmt, origin_scope=None): - element_scope = evaluator.wrap(stmt.get_parent_scope(include_flows=True)) - # Direct parents get resolved, we filter scopes that are separate branches. - # This makes sense for autocompletion and static analysis. For actual - # Python it doesn't matter, because we're talking about potentially - # unreachable code. - # e.g. `if 0:` would cause all name lookup within the flow make - # unaccessible. This is not a "problem" in Python, because the code is - # never called. In Jedi though, we still want to infer types. - while origin_scope is not None: - if element_scope == origin_scope: - return REACHABLE - origin_scope = origin_scope.parent - return _break_check(evaluator, stmt, base_scope, element_scope) - - -def _break_check(evaluator, stmt, base_scope, element_scope): - element_scope = evaluator.wrap(element_scope) - base_scope = evaluator.wrap(base_scope) - +def _get_flow_scopes(node): + while True: + node = get_parent_scope(node, include_flows=True) + if node is None or is_scope(node): + return + yield node + + +def reachability_check(context, context_scope, node, origin_scope=None): + first_flow_scope = get_parent_scope(node, include_flows=True) + if origin_scope is not None: + origin_flow_scopes = list(_get_flow_scopes(origin_scope)) + node_flow_scopes = list(_get_flow_scopes(node)) + + branch_matches = True + for flow_scope in origin_flow_scopes: + if flow_scope in node_flow_scopes: + node_keyword = get_flow_branch_keyword(flow_scope, node) + origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope) + branch_matches = node_keyword == origin_keyword + if flow_scope.type == 'if_stmt': + if not branch_matches: + return UNREACHABLE + elif flow_scope.type == 'try_stmt': + if not branch_matches and origin_keyword == 'else' \ + and node_keyword == 'except': + return UNREACHABLE + break + + # Direct parents get resolved, we filter scopes that are separate + # branches. This makes sense for autocompletion and static analysis. + # For actual Python it doesn't matter, because we're talking about + # potentially unreachable code. + # e.g. `if 0:` would cause all name lookup within the flow make + # unaccessible. This is not a "problem" in Python, because the code is + # never called. In Jedi though, we still want to infer types. + while origin_scope is not None: + if first_flow_scope == origin_scope and branch_matches: + return REACHABLE + origin_scope = origin_scope.parent + + return _break_check(context, context_scope, first_flow_scope, node) + + +def _break_check(context, context_scope, flow_scope, node): reachable = REACHABLE - if isinstance(element_scope, tree.IfStmt): - if element_scope.node_after_else(stmt): - for check_node in element_scope.check_nodes(): - reachable = _check_if(evaluator, check_node) + if flow_scope.type == 'if_stmt': + if flow_scope.is_node_after_else(node): + for check_node in flow_scope.get_test_nodes(): + reachable = _check_if(context, check_node) if reachable in (REACHABLE, UNSURE): break reachable = reachable.invert() else: - node = element_scope.node_in_which_check_node(stmt) - reachable = _check_if(evaluator, node) - elif isinstance(element_scope, (tree.TryStmt, tree.WhileStmt)): + flow_node = flow_scope.get_corresponding_test_node(node) + if flow_node is not None: + reachable = _check_if(context, flow_node) + elif flow_scope.type in ('try_stmt', 'while_stmt'): return UNSURE # Only reachable branches need to be examined further. if reachable in (UNREACHABLE, UNSURE): return reachable - if base_scope != element_scope and base_scope != element_scope.parent: - return reachable & _break_check(evaluator, stmt, base_scope, element_scope.parent) - return reachable + if context_scope != flow_scope and context_scope != flow_scope.parent: + flow_scope = get_parent_scope(flow_scope, include_flows=True) + return reachable & _break_check(context, context_scope, flow_scope, node) + else: + return reachable -def _check_if(evaluator, node): - types = evaluator.eval_element(node) +def _check_if(context, node): + types = context.eval_node(node) values = set(x.py__bool__() for x in types) if len(values) == 1: return Status.lookup_table[values.pop()] diff --git a/pythonFiles/release/jedi/evaluate/helpers.py b/pythonFiles/release/jedi/evaluate/helpers.py index 4802bee02afe..3b21e01bda9e 100755 --- a/pythonFiles/release/jedi/evaluate/helpers.py +++ b/pythonFiles/release/jedi/evaluate/helpers.py @@ -1,77 +1,51 @@ import copy +import sys +import re +import os from itertools import chain +from contextlib import contextmanager -from jedi.parser import tree +from parso.python import tree +from jedi._compatibility import unicode +from jedi.parser_utils import get_parent_scope +from jedi.evaluate.compiled import CompiledObject -def deep_ast_copy(obj, parent=None, new_elements=None): + +def is_stdlib_path(path): + # Python standard library paths look like this: + # /usr/lib/python3.5/... + # TODO The implementation below is probably incorrect and not complete. + if 'dist-packages' in path or 'site-packages' in path: + return False + + base_path = os.path.join(sys.prefix, 'lib', 'python') + return bool(re.match(re.escape(base_path) + '\d.\d', path)) + + +def deep_ast_copy(obj): """ - Much, much faster than copy.deepcopy, but just for Parser elements (Doesn't - copy parents). + Much, much faster than copy.deepcopy, but just for parser tree nodes. """ - - if new_elements is None: - new_elements = {} - - def copy_node(obj): - # If it's already in the cache, just return it. - try: - return new_elements[obj] - except KeyError: - # Actually copy and set attributes. - new_obj = copy.copy(obj) - new_elements[obj] = new_obj - - # Copy children - new_children = [] - for child in obj.children: - typ = child.type - if typ in ('whitespace', 'operator', 'keyword', 'number', 'string'): - # At the moment we're not actually copying those primitive - # elements, because there's really no need to. The parents are - # obviously wrong, but that's not an issue. - new_child = child - elif typ == 'name': - new_elements[child] = new_child = copy.copy(child) - new_child.parent = new_obj - else: # Is a BaseNode. - new_child = copy_node(child) - new_child.parent = new_obj - new_children.append(new_child) - new_obj.children = new_children - - # Copy the names_dict (if there is one). - try: - names_dict = obj.names_dict - except AttributeError: - pass + # If it's already in the cache, just return it. + new_obj = copy.copy(obj) + + # Copy children + new_children = [] + for child in obj.children: + if isinstance(child, tree.Leaf): + new_child = copy.copy(child) + new_child.parent = new_obj else: - try: - new_obj.names_dict = new_names_dict = {} - except AttributeError: # Impossible to set CompFor.names_dict - pass - else: - for string, names in names_dict.items(): - new_names_dict[string] = [new_elements[n] for n in names] - return new_obj - - if obj.type == 'name': - # Special case of a Name object. - new_elements[obj] = new_obj = copy.copy(obj) - if parent is not None: - new_obj.parent = parent - elif isinstance(obj, tree.BaseNode): - new_obj = copy_node(obj) - if parent is not None: - for child in new_obj.children: - if isinstance(child, (tree.Name, tree.BaseNode)): - child.parent = parent - else: # String literals and so on. - new_obj = obj # Good enough, don't need to copy anything. + new_child = deep_ast_copy(child) + new_child.parent = new_obj + new_children.append(new_child) + new_obj.children = new_children + return new_obj -def call_of_name(name, cut_own_trailer=False): +def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False): """ Creates a "call" node that consist of all ``trailer`` and ``power`` objects. E.g. if you call it with ``append``:: @@ -81,99 +55,147 @@ def call_of_name(name, cut_own_trailer=False): You would get a node with the content ``list([]).append`` back. This generates a copy of the original ast node. - """ - par = name - if tree.is_node(par.parent, 'trailer'): - par = par.parent - - power = par.parent - if tree.is_node(power, 'power') and power.children[0] != name \ - and not (power.children[-2] == '**' and - name.start_pos > power.children[-1].start_pos): - par = power - # Now the name must be part of a trailer - index = par.children.index(name.parent) - if index != len(par.children) - 1 or cut_own_trailer: - # Now we have to cut the other trailers away. - par = deep_ast_copy(par) - if not cut_own_trailer: - # Normally we would remove just the stuff after the index, but - # if the option is set remove the index as well. (for goto) - index = index + 1 - par.children[index:] = [] - - return par + If you're using the leaf, e.g. the bracket `)` it will return ``list([])``. -def get_module_names(module, all_scopes): + We use this function for two purposes. Given an expression ``bar.foo``, + we may want to + - infer the type of ``foo`` to offer completions after foo + - infer the type of ``bar`` to be able to jump to the definition of foo + The option ``cut_own_trailer`` must be set to true for the second purpose. """ - Returns a dictionary with name parts as keys and their call paths as - values. - """ - if all_scopes: - dct = module.used_names + trailer = leaf.parent + # The leaf may not be the last or first child, because there exist three + # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples + # we should not match anything more than x. + if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]): + if trailer.type == 'atom': + return context.eval_node(trailer) + return context.eval_node(leaf) + + power = trailer.parent + index = power.children.index(trailer) + if cut_own_trailer: + cut = index + else: + cut = index + 1 + + if power.type == 'error_node': + start = index + while True: + start -= 1 + base = power.children[start] + if base.type != 'trailer': + break + trailers = power.children[start + 1: index + 1] else: - dct = module.names_dict - return chain.from_iterable(dct.values()) + base = power.children[0] + trailers = power.children[1:cut] + if base == 'await': + base = trailers[0] + trailers = trailers[1:] -class FakeImport(tree.ImportName): - def __init__(self, name, parent, level=0): - super(FakeImport, self).__init__([]) - self.parent = parent - self._level = level - self.name = name + values = context.eval_node(base) + from jedi.evaluate.syntax_tree import eval_trailer + for trailer in trailers: + values = eval_trailer(context, values, trailer) + return values - def get_defined_names(self): - return [self.name] - def aliases(self): - return {} +def call_of_leaf(leaf): + """ + Creates a "call" node that consist of all ``trailer`` and ``power`` + objects. E.g. if you call it with ``append``:: - @property - def level(self): - return self._level + list([]).append(3) or None - @property - def start_pos(self): - return 0, 0 + You would get a node with the content ``list([]).append`` back. - def paths(self): - return [[self.name]] + This generates a copy of the original ast node. - def is_definition(self): - return True + If you're using the leaf, e.g. the bracket `)` it will return ``list([])``. + """ + # TODO this is the old version of this call. Try to remove it. + trailer = leaf.parent + # The leaf may not be the last or first child, because there exist three + # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples + # we should not match anything more than x. + if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]): + if trailer.type == 'atom': + return trailer + return leaf + + power = trailer.parent + index = power.children.index(trailer) + + new_power = copy.copy(power) + new_power.children = list(new_power.children) + new_power.children[index + 1:] = [] + + if power.type == 'error_node': + start = index + while True: + start -= 1 + if power.children[start].type != 'trailer': + break + transformed = tree.Node('power', power.children[start:]) + transformed.parent = power.parent + return transformed + + return power + + +def get_names_of_node(node): + try: + children = node.children + except AttributeError: + if node.type == 'name': + return [node] + else: + return [] + else: + return list(chain.from_iterable(get_names_of_node(c) for c in children)) -class FakeName(tree.Name): - def __init__(self, name_str, parent=None, start_pos=(0, 0), is_definition=None): - """ - In case is_definition is defined (not None), that bool value will be - returned. - """ - super(FakeName, self).__init__(tree.zero_position_modifier, name_str, start_pos) - self.parent = parent - self._is_definition = is_definition +def get_module_names(module, all_scopes): + """ + Returns a dictionary with name parts as keys and their call paths as + values. + """ + names = chain.from_iterable(module.get_used_names().values()) + if not all_scopes: + # We have to filter all the names that don't have the module as a + # parent_scope. There's None as a parent, because nodes in the module + # node have the parent module and not suite as all the others. + # Therefore it's important to catch that case. + names = [n for n in names if get_parent_scope(n).parent in (module, None)] + return names - def get_definition(self): - return self.parent - def is_definition(self): - if self._is_definition is None: - return super(FakeName, self).is_definition() - else: - return self._is_definition +@contextmanager +def predefine_names(context, flow_scope, dct): + predefined = context.predefined_names + if flow_scope in predefined: + raise NotImplementedError('Why does this happen?') + predefined[flow_scope] = dct + try: + yield + finally: + del predefined[flow_scope] + + +def is_compiled(context): + return isinstance(context, CompiledObject) + + +def is_string(context): + return is_compiled(context) and isinstance(context.obj, (str, unicode)) -class LazyName(FakeName): - def __init__(self, name, parent_callback, is_definition=None): - super(LazyName, self).__init__(name, is_definition=is_definition) - self._parent_callback = parent_callback +def is_literal(context): + return is_number(context) or is_string(context) - @property - def parent(self): - return self._parent_callback() - @parent.setter - def parent(self, value): - pass # Do nothing, super classes can try to set the parent. +def is_number(context): + return is_compiled(context) and isinstance(context.obj, (int, float)) diff --git a/pythonFiles/release/jedi/evaluate/imports.py b/pythonFiles/release/jedi/evaluate/imports.py index c14a50c0dce0..ecf656b1a676 100755 --- a/pythonFiles/release/jedi/evaluate/imports.py +++ b/pythonFiles/release/jedi/evaluate/imports.py @@ -15,110 +15,85 @@ import os import pkgutil import sys -from itertools import chain -from jedi._compatibility import find_module, unicode -from jedi import common +from parso.python import tree +from parso.tree import search_ancestor +from parso.cache import parser_cache +from parso import python_bytes_to_unicode + +from jedi._compatibility import find_module, unicode, ImplicitNSInfo from jedi import debug -from jedi import cache -from jedi.parser import fast -from jedi.parser import tree +from jedi import settings from jedi.evaluate import sys_path from jedi.evaluate import helpers -from jedi import settings -from jedi.common import source_to_unicode from jedi.evaluate import compiled from jedi.evaluate import analysis -from jedi.evaluate.cache import memoize_default, NO_DEFAULT - - -def completion_names(evaluator, imp, pos): - name = imp.name_for_position(pos) - module = evaluator.wrap(imp.get_parent_until()) - if name is None: - level = 0 - for node in imp.children: - if node.end_pos <= pos: - if node in ('.', '...'): - level += len(node.value) - import_path = [] +from jedi.evaluate.utils import unite +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.filters import AbstractNameDefinition +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS + + +# This memoization is needed, because otherwise we will infinitely loop on +# certain imports. +@evaluator_method_cache(default=NO_CONTEXTS) +def infer_import(context, tree_name, is_goto=False): + module_context = context.get_root_context() + import_node = search_ancestor(tree_name, 'import_name', 'import_from') + import_path = import_node.get_path_for_name(tree_name) + from_import_name = None + evaluator = context.evaluator + try: + from_names = import_node.get_from_names() + except AttributeError: + # Is an import_name + pass else: - # Completion on an existing name. - - # The import path needs to be reduced by one, because we're completing. - import_path = imp.path_for_name(name)[:-1] - level = imp.level - - importer = Importer(evaluator, tuple(import_path), module, level) - if isinstance(imp, tree.ImportFrom): - c = imp.children - only_modules = c[c.index('import')].start_pos >= pos - else: - only_modules = True - return importer.completion_names(evaluator, only_modules) - - -class ImportWrapper(tree.Base): - def __init__(self, evaluator, name): - self._evaluator = evaluator - self._name = name - - self._import = name.get_parent_until(tree.Import) - self.import_path = self._import.path_for_name(name) - - @memoize_default() - def follow(self, is_goto=False): - if self._evaluator.recursion_detector.push_stmt(self._import): - # check recursion - return [] - - try: - module = self._evaluator.wrap(self._import.get_parent_until()) - import_path = self._import.path_for_name(self._name) - from_import_name = None - try: - from_names = self._import.get_from_names() - except AttributeError: - # Is an import_name - pass - else: - if len(from_names) + 1 == len(import_path): - # We have to fetch the from_names part first and then check - # if from_names exists in the modules. - from_import_name = import_path[-1] - import_path = from_names - - importer = Importer(self._evaluator, tuple(import_path), - module, self._import.level) - + if len(from_names) + 1 == len(import_path): + # We have to fetch the from_names part first and then check + # if from_names exists in the modules. + from_import_name = import_path[-1] + import_path = from_names + + importer = Importer(evaluator, tuple(import_path), + module_context, import_node.level) + + types = importer.follow() + + #if import_node.is_nested() and not self.nested_resolve: + # scopes = [NestedImportModule(module, import_node)] + + if not types: + return NO_CONTEXTS + + if from_import_name is not None: + types = unite( + t.py__getattribute__( + from_import_name, + name_context=context, + is_goto=is_goto, + analysis_errors=False + ) + for t in types + ) + if not is_goto: + types = ContextSet.from_set(types) + + if not types: + path = import_path + [from_import_name] + importer = Importer(evaluator, tuple(path), + module_context, import_node.level) types = importer.follow() + # goto only accepts `Name` + if is_goto: + types = set(s.name for s in types) + else: + # goto only accepts `Name` + if is_goto: + types = set(s.name for s in types) - #if self._import.is_nested() and not self.nested_resolve: - # scopes = [NestedImportModule(module, self._import)] - - if from_import_name is not None: - types = list(chain.from_iterable( - self._evaluator.find_types(t, unicode(from_import_name), - is_goto=is_goto) - for t in types)) - - if not types: - path = import_path + [from_import_name] - importer = Importer(self._evaluator, tuple(path), - module, self._import.level) - types = importer.follow() - # goto only accepts `Name` - if is_goto: - types = [s.name for s in types] - else: - # goto only accepts `Name` - if is_goto: - types = [s.name for s in types] - - debug.dbg('after import: %s', types) - finally: - self._evaluator.recursion_detector.pop_stmt() - return types + debug.dbg('after import: %s', types) + return types class NestedImportModule(tree.Module): @@ -153,10 +128,10 @@ def __repr__(self): self._nested_import) -def _add_error(evaluator, name, message=None): +def _add_error(context, name, message=None): + # Should be a name, not a string! if hasattr(name, 'parent'): - # Should be a name, not a string! - analysis.add(evaluator, 'import-error', name, message) + analysis.add(context, 'import-error', name, message) def get_init_path(directory_path): @@ -171,8 +146,40 @@ def get_init_path(directory_path): return None +class ImportName(AbstractNameDefinition): + start_pos = (1, 0) + _level = 0 + + def __init__(self, parent_context, string_name): + self.parent_context = parent_context + self.string_name = string_name + + def infer(self): + return Importer( + self.parent_context.evaluator, + [self.string_name], + self.parent_context, + level=self._level, + ).follow() + + def goto(self): + return [m.name for m in self.infer()] + + def get_root_context(self): + # Not sure if this is correct. + return self.parent_context.get_root_context() + + @property + def api_type(self): + return 'module' + + +class SubModuleName(ImportName): + _level = 1 + + class Importer(object): - def __init__(self, evaluator, import_path, module, level=0): + def __init__(self, evaluator, import_path, module_context, level=0): """ An implementation similar to ``__import__``. Use `follow` to actually follow the imports. @@ -188,47 +195,65 @@ def __init__(self, evaluator, import_path, module, level=0): debug.speed('import %s' % (import_path,)) self._evaluator = evaluator self.level = level - self.module = module + self.module_context = module_context try: - self.file_path = module.py__file__() + self.file_path = module_context.py__file__() except AttributeError: # Can be None for certain compiled modules like 'builtins'. self.file_path = None if level: - base = module.py__package__().split('.') + base = module_context.py__package__().split('.') if base == ['']: base = [] if level > len(base): - path = module.py__file__() - import_path = list(import_path) - for i in range(level): - path = os.path.dirname(path) - dir_name = os.path.basename(path) - # This is not the proper way to do relative imports. However, since - # Jedi cannot be sure about the entry point, we just calculate an - # absolute path here. - if dir_name: - import_path.insert(0, dir_name) - else: - _add_error(self._evaluator, import_path[-1]) - import_path = [] - # TODO add import error. - debug.warning('Attempted relative import beyond top-level package.') + path = module_context.py__file__() + if path is not None: + import_path = list(import_path) + p = path + for i in range(level): + p = os.path.dirname(p) + dir_name = os.path.basename(p) + # This is not the proper way to do relative imports. However, since + # Jedi cannot be sure about the entry point, we just calculate an + # absolute path here. + if dir_name: + # TODO those sys.modules modifications are getting + # really stupid. this is the 3rd time that we're using + # this. We should probably refactor. + if path.endswith(os.path.sep + 'os.py'): + import_path.insert(0, 'os') + else: + import_path.insert(0, dir_name) + else: + _add_error(module_context, import_path[-1]) + import_path = [] + # TODO add import error. + debug.warning('Attempted relative import beyond top-level package.') + # If no path is defined in the module we have no ideas where we + # are in the file system. Therefore we cannot know what to do. + # In this case we just let the path there and ignore that it's + # a relative path. Not sure if that's a good idea. else: # Here we basically rewrite the level to 0. - import_path = tuple(base) + import_path + base = tuple(base) + if level > 1: + base = base[:-level + 1] + + import_path = base + tuple(import_path) self.import_path = import_path @property def str_import_path(self): """Returns the import path as pure strings instead of `Name`.""" - return tuple(str(name) for name in self.import_path) + return tuple( + name.value if isinstance(name, tree.Name) else name + for name in self.import_path) - @memoize_default() def sys_path_with_modifications(self): in_path = [] - sys_path_mod = list(sys_path.sys_path_with_modifications(self._evaluator, self.module)) + sys_path_mod = self._evaluator.project.sys_path \ + + sys_path.check_sys_path_modifications(self.module_context) if self.file_path is not None: # If you edit e.g. gunicorn, there will be imports like this: # `from gunicorn import something`. But gunicorn is not in the @@ -245,17 +270,19 @@ def sys_path_with_modifications(self): return in_path + sys_path_mod - @memoize_default(NO_DEFAULT) def follow(self): if not self.import_path: - return [] + return NO_CONTEXTS return self._do_import(self.import_path, self.sys_path_with_modifications()) def _do_import(self, import_path, sys_path): """ This method is very similar to importlib's `_gcd_import`. """ - import_parts = [str(i) for i in import_path] + import_parts = [ + i.value if isinstance(i, tree.Name) else i + for i in import_path + ] # Handle "magic" Flask extension imports: # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. @@ -271,7 +298,7 @@ def _do_import(self, import_path, sys_path): module_name = '.'.join(import_parts) try: - return [self._evaluator.modules[module_name]] + return ContextSet(self._evaluator.modules[module_name]) except KeyError: pass @@ -280,43 +307,44 @@ def _do_import(self, import_path, sys_path): # the module cache. bases = self._do_import(import_path[:-1], sys_path) if not bases: - return [] + return NO_CONTEXTS # We can take the first element, because only the os special # case yields multiple modules, which is not important for # further imports. - base = bases[0] + parent_module = list(bases)[0] # This is a huge exception, we follow a nested import # ``os.path``, because it's a very important one in Python # that is being achieved by messing with ``sys.modules`` in # ``os``. - if [str(i) for i in import_path] == ['os', 'path']: - return self._evaluator.find_types(base, 'path') + if import_parts == ['os', 'path']: + return parent_module.py__getattribute__('path') try: - # It's possible that by giving it always the sys path (and not - # the __path__ attribute of the parent, we get wrong results - # and nested namespace packages don't work. But I'm not sure. - paths = base.py__path__(sys_path) + method = parent_module.py__path__ except AttributeError: # The module is not a package. - _add_error(self._evaluator, import_path[-1]) - return [] + _add_error(self.module_context, import_path[-1]) + return NO_CONTEXTS else: + paths = method() debug.dbg('search_module %s in paths %s', module_name, paths) for path in paths: # At the moment we are only using one path. So this is # not important to be correct. try: + if not isinstance(path, list): + path = [path] module_file, module_path, is_pkg = \ - find_module(import_parts[-1], [path]) + find_module(import_parts[-1], path, fullname=module_name) break except ImportError: module_path = None if module_path is None: - _add_error(self._evaluator, import_path[-1]) - return [] + _add_error(self.module_context, import_path[-1]) + return NO_CONTEXTS else: + parent_module = None try: debug.dbg('search_module %s in %s', import_parts[-1], self.file_path) # Override the sys.path. It works only good that way. @@ -324,35 +352,51 @@ def _do_import(self, import_path, sys_path): sys.path, temp = sys_path, sys.path try: module_file, module_path, is_pkg = \ - find_module(import_parts[-1]) + find_module(import_parts[-1], fullname=module_name) finally: sys.path = temp except ImportError: # The module is not a package. - _add_error(self._evaluator, import_path[-1]) - return [] + _add_error(self.module_context, import_path[-1]) + return NO_CONTEXTS - source = None + code = None if is_pkg: # In this case, we don't have a file yet. Search for the # __init__ file. - module_path = get_init_path(module_path) + if module_path.endswith(('.zip', '.egg')): + code = module_file.loader.get_source(module_name) + else: + module_path = get_init_path(module_path) elif module_file: - source = module_file.read() + code = module_file.read() module_file.close() - if module_file is None and not module_path.endswith('.py'): - module = compiled.load_module(module_path) + if isinstance(module_path, ImplicitNSInfo): + from jedi.evaluate.context.namespace import ImplicitNamespaceContext + fullname, paths = module_path.name, module_path.paths + module = ImplicitNamespaceContext(self._evaluator, fullname=fullname) + module.paths = paths + elif module_file is None and not module_path.endswith(('.py', '.zip', '.egg')): + module = compiled.load_module(self._evaluator, module_path) else: - module = _load_module(self._evaluator, module_path, source, sys_path) + module = _load_module(self._evaluator, module_path, code, sys_path, parent_module) + + if module is None: + # The file might raise an ImportError e.g. and therefore not be + # importable. + return NO_CONTEXTS self._evaluator.modules[module_name] = module - return [module] + return ContextSet(module) - def _generate_name(self, name): - return helpers.FakeName(name, parent=self.module) + def _generate_name(self, name, in_module=None): + # Create a pseudo import to be able to follow them. + if in_module is None: + return ImportName(self.module_context, name) + return SubModuleName(in_module, name) - def _get_module_names(self, search_path=None): + def _get_module_names(self, search_path=None, in_module=None): """ Get the names of all modules in the search_path. This means file names and not names defined in the files. @@ -360,13 +404,13 @@ def _get_module_names(self, search_path=None): names = [] # add builtin module names - if search_path is None: + if search_path is None and in_module is None: names += [self._generate_name(name) for name in sys.builtin_module_names] if search_path is None: search_path = self.sys_path_with_modifications() for module_loader, name, is_pkg in pkgutil.iter_modules(search_path): - names.append(self._generate_name(name)) + names.append(self._generate_name(name, in_module=in_module)) return names def completion_names(self, evaluator, only_modules=False): @@ -374,14 +418,15 @@ def completion_names(self, evaluator, only_modules=False): :param only_modules: Indicates wheter it's possible to import a definition that is not defined in a module. """ - from jedi.evaluate import finder + from jedi.evaluate.context import ModuleContext + from jedi.evaluate.context.namespace import ImplicitNamespaceContext names = [] if self.import_path: # flask if self.str_import_path == ('flask', 'ext'): # List Flask extensions like ``flask_foo`` for mod in self._get_module_names(): - modname = str(mod) + modname = mod.string_name if modname.startswith('flask_'): extname = modname[len('flask_'):] names.append(self._generate_name(extname)) @@ -391,14 +436,18 @@ def completion_names(self, evaluator, only_modules=False): if os.path.isdir(flaskext): names += self._get_module_names([flaskext]) - for scope in self.follow(): + for context in self.follow(): # Non-modules are not completable. - if not scope.type == 'file_input': # not a module + if context.api_type != 'module': # not a module continue - # namespace packages - if isinstance(scope, tree.Module) and scope.path.endswith('__init__.py'): - paths = scope.py__path__(self.sys_path_with_modifications()) + if isinstance(context, ModuleContext) and context.py__file__().endswith('__init__.py'): + paths = context.py__path__() + names += self._get_module_names(paths, in_module=context) + + # implicit namespace packages + elif isinstance(context, ImplicitNamespaceContext): + paths = context.paths names += self._get_module_names(paths) if only_modules: @@ -407,16 +456,12 @@ def completion_names(self, evaluator, only_modules=False): if ('os',) == self.str_import_path and not self.level: # os.path is a hardcoded exception, because it's a # ``sys.modules`` modification. - names.append(self._generate_name('path')) + names.append(self._generate_name('path', context)) continue - for names_dict in scope.names_dicts(search_global=False): - _names = list(chain.from_iterable(names_dict.values())) - if not _names: - continue - _names = finder.filter_definition_names(_names, scope) - names += _names + for filter in context.get_filters(search_global=False): + names += filter.values() else: # Empty import path=completion after import if not self.level: @@ -431,25 +476,22 @@ def completion_names(self, evaluator, only_modules=False): return names -def _load_module(evaluator, path=None, source=None, sys_path=None): - def load(source): - dotted_path = path and compiled.dotted_from_fs_path(path, sys_path) - if path is not None and path.endswith('.py') \ - and not dotted_path in settings.auto_import_modules: - if source is None: - with open(path, 'rb') as f: - source = f.read() - else: - return compiled.load_module(path) - p = path - p = fast.FastParser(evaluator.grammar, common.source_to_unicode(source), p) - cache.save_parser(path, p) - return p.module +def _load_module(evaluator, path=None, code=None, sys_path=None, parent_module=None): + if sys_path is None: + sys_path = evaluator.project.sys_path + + dotted_path = path and compiled.dotted_from_fs_path(path, sys_path) + if path is not None and path.endswith(('.py', '.zip', '.egg')) \ + and dotted_path not in settings.auto_import_modules: - cached = cache.load_parser(path) - module = load(source) if cached is None else cached.module - module = evaluator.wrap(module) - return module + module_node = evaluator.grammar.parse( + code=code, path=path, cache=True, diff_cache=True, + cache_path=settings.cache_directory) + + from jedi.evaluate.context import ModuleContext + return ModuleContext(evaluator, module_node, path=path) + else: + return compiled.load_module(evaluator, path) def add_module(evaluator, module_name, module): @@ -461,47 +503,68 @@ def add_module(evaluator, module_name, module): evaluator.modules[module_name] = module -def get_modules_containing_name(evaluator, mods, name): +def get_modules_containing_name(evaluator, modules, name): """ Search a name in the directories of modules. """ + from jedi.evaluate.context import ModuleContext + def check_directories(paths): + for p in paths: + if p is not None: + # We need abspath, because the seetings paths might not already + # have been converted to absolute paths. + d = os.path.dirname(os.path.abspath(p)) + for file_name in os.listdir(d): + path = os.path.join(d, file_name) + if file_name.endswith('.py'): + yield path + def check_python_file(path): try: - return cache.parser_cache[path].parser.module + # TODO I don't think we should use the cache here?! + node_cache_item = parser_cache[evaluator.grammar._hashed][path] except KeyError: try: return check_fs(path) except IOError: return None + else: + module_node = node_cache_item.node + return ModuleContext(evaluator, module_node, path=path) def check_fs(path): with open(path, 'rb') as f: - source = source_to_unicode(f.read()) - if name in source: - module_name = os.path.basename(path)[:-3] # Remove `.py`. - module = _load_module(evaluator, path, source) - add_module(evaluator, module_name, module) + code = python_bytes_to_unicode(f.read(), errors='replace') + if name in code: + module = _load_module(evaluator, path, code) + + module_name = sys_path.dotted_path_in_sys_path(evaluator.project.sys_path, path) + if module_name is not None: + add_module(evaluator, module_name, module) return module # skip non python modules - mods = set(m for m in mods if not isinstance(m, compiled.CompiledObject)) - mod_paths = set() - for m in mods: - mod_paths.add(m.path) + used_mod_paths = set() + for m in modules: + try: + path = m.py__file__() + except AttributeError: + pass + else: + used_mod_paths.add(path) yield m - if settings.dynamic_params_for_other_modules: - paths = set(settings.additional_dynamic_modules) - for p in mod_paths: - if p is not None: - d = os.path.dirname(p) - for entry in os.listdir(d): - if entry not in mod_paths: - if entry.endswith('.py'): - paths.add(d + os.path.sep + entry) - - for p in sorted(paths): - # make testing easier, sort it - same results on every interpreter - c = check_python_file(p) - if c is not None and c not in mods and not isinstance(c, compiled.CompiledObject): - yield c + if not settings.dynamic_params_for_other_modules: + return + + additional = set(os.path.abspath(p) for p in settings.additional_dynamic_modules) + # Check the directories of used modules. + paths = (additional | set(check_directories(used_mod_paths))) \ + - used_mod_paths + + # Sort here to make issues less random. + for p in sorted(paths): + # make testing easier, sort it - same results on every interpreter + m = check_python_file(p) + if m is not None and not isinstance(m, compiled.CompiledObject): + yield m diff --git a/pythonFiles/release/jedi/evaluate/iterable.py b/pythonFiles/release/jedi/evaluate/iterable.py deleted file mode 100755 index d14a4c5fd2fb..000000000000 --- a/pythonFiles/release/jedi/evaluate/iterable.py +++ /dev/null @@ -1,631 +0,0 @@ -""" -Contains all classes and functions to deal with lists, dicts, generators and -iterators in general. - -Array modifications -******************* - -If the content of an array (``set``/``list``) is requested somewhere, the -current module will be checked for appearances of ``arr.append``, -``arr.insert``, etc. If the ``arr`` name points to an actual array, the -content will be added - -This can be really cpu intensive, as you can imagine. Because |jedi| has to -follow **every** ``append`` and check wheter it's the right array. However this -works pretty good, because in *slow* cases, the recursion detector and other -settings will stop this process. - -It is important to note that: - -1. Array modfications work only in the current module. -2. Jedi only checks Array additions; ``list.pop``, etc are ignored. -""" -from itertools import chain - -from jedi import common -from jedi import debug -from jedi import settings -from jedi._compatibility import use_metaclass, is_py3, unicode -from jedi.parser import tree -from jedi.evaluate import compiled -from jedi.evaluate import helpers -from jedi.evaluate.cache import CachedMetaClass, memoize_default -from jedi.evaluate import analysis - - -def unite(iterable): - """Turns a two dimensional array into a one dimensional.""" - return list(chain.from_iterable(iterable)) - - -class IterableWrapper(tree.Base): - def is_class(self): - return False - - -class GeneratorMixin(object): - @memoize_default() - def names_dicts(self, search_global=False): # is always False - dct = {} - executes_generator = '__next__', 'send', 'next' - for names in compiled.generator_obj.names_dict.values(): - for name in names: - if name.value in executes_generator: - parent = GeneratorMethod(self, name.parent) - dct[name.value] = [helpers.FakeName(name.name, parent, is_definition=True)] - else: - dct[name.value] = [name] - yield dct - - def get_index_types(self, evaluator, index_array): - #debug.warning('Tried to get array access on a generator: %s', self) - analysis.add(self._evaluator, 'type-error-generator', index_array) - return [] - - def get_exact_index_types(self, index): - """ - Exact lookups are used for tuple lookups, which are perfectly fine if - used with generators. - """ - return [self.iter_content()[index]] - - def py__bool__(self): - return True - - -class Generator(use_metaclass(CachedMetaClass, IterableWrapper, GeneratorMixin)): - """Handling of `yield` functions.""" - def __init__(self, evaluator, func, var_args): - super(Generator, self).__init__() - self._evaluator = evaluator - self.func = func - self.var_args = var_args - - def iter_content(self): - """ returns the content of __iter__ """ - # Directly execute it, because with a normal call to py__call__ a - # Generator will be returned. - from jedi.evaluate.representation import FunctionExecution - f = FunctionExecution(self._evaluator, self.func, self.var_args) - return f.get_return_types(check_yields=True) - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'parent', 'get_imports', - 'doc', 'docstr', 'get_parent_until', - 'get_code', 'subscopes']: - raise AttributeError("Accessing %s of %s is not allowed." - % (self, name)) - return getattr(self.func, name) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.func) - - -class GeneratorMethod(IterableWrapper): - """``__next__`` and ``send`` methods.""" - def __init__(self, generator, builtin_func): - self._builtin_func = builtin_func - self._generator = generator - - def py__call__(self, evaluator, params): - # TODO add TypeError if params are given. - return self._generator.iter_content() - - def __getattr__(self, name): - return getattr(self._builtin_func, name) - - -class Comprehension(IterableWrapper): - @staticmethod - def from_atom(evaluator, atom): - mapping = { - '(': GeneratorComprehension, - '[': ListComprehension - } - return mapping[atom.children[0]](evaluator, atom) - - def __init__(self, evaluator, atom): - self._evaluator = evaluator - self._atom = atom - - @memoize_default() - def eval_node(self): - """ - The first part `x + 1` of the list comprehension: - - [x + 1 for x in foo] - """ - comprehension = self._atom.children[1] - # For nested comprehensions we need to search the last one. - last = comprehension.children[-1] - last_comp = comprehension.children[1] - while True: - if isinstance(last, tree.CompFor): - last_comp = last - elif not tree.is_node(last, 'comp_if'): - break - last = last.children[-1] - - return helpers.deep_ast_copy(comprehension.children[0], parent=last_comp) - - def get_exact_index_types(self, index): - return [self._evaluator.eval_element(self.eval_node())[index]] - - def __repr__(self): - return "" % (type(self).__name__, self._atom) - - -class ArrayMixin(object): - @memoize_default() - def names_dicts(self, search_global=False): # Always False. - # `array.type` is a string with the type, e.g. 'list'. - scope = self._evaluator.find_types(compiled.builtin, self.type)[0] - # builtins only have one class -> [0] - scope = self._evaluator.execute(scope, (AlreadyEvaluated((self,)),))[0] - return scope.names_dicts(search_global) - - def py__bool__(self): - return None # We don't know the length, because of appends. - - -class ListComprehension(Comprehension, ArrayMixin): - type = 'list' - - def get_index_types(self, evaluator, index): - return self.iter_content() - - def iter_content(self): - return self._evaluator.eval_element(self.eval_node()) - - @property - def name(self): - return FakeSequence(self._evaluator, [], 'list').name - - -class GeneratorComprehension(Comprehension, GeneratorMixin): - def iter_content(self): - return self._evaluator.eval_element(self.eval_node()) - - -class Array(IterableWrapper, ArrayMixin): - mapping = {'(': 'tuple', - '[': 'list', - '{': 'dict'} - - def __init__(self, evaluator, atom): - self._evaluator = evaluator - self.atom = atom - self.type = Array.mapping[atom.children[0]] - """The builtin name of the array (list, set, tuple or dict).""" - - c = self.atom.children - array_node = c[1] - if self.type == 'dict' and array_node != '}' \ - and (not hasattr(array_node, 'children') - or ':' not in array_node.children): - self.type = 'set' - - @property - def name(self): - return helpers.FakeName(self.type, parent=self) - - @memoize_default() - def get_index_types(self, evaluator, index=()): - """ - Get the types of a specific index or all, if not given. - - :param index: A subscriptlist node (or subnode). - """ - indexes = create_indexes_or_slices(evaluator, index) - lookup_done = False - types = [] - for index in indexes: - if isinstance(index, Slice): - types += [self] - lookup_done = True - elif isinstance(index, compiled.CompiledObject) \ - and isinstance(index.obj, (int, str, unicode)): - with common.ignored(KeyError, IndexError, TypeError): - types += self.get_exact_index_types(index.obj) - lookup_done = True - - return types if lookup_done else self.values() - - @memoize_default() - def values(self): - result = unite(self._evaluator.eval_element(v) for v in self._values()) - result += check_array_additions(self._evaluator, self) - return result - - def get_exact_index_types(self, mixed_index): - """ Here the index is an int/str. Raises IndexError/KeyError """ - if self.type == 'dict': - for key, values in self._items(): - # Because we only want the key to be a string. - keys = self._evaluator.eval_element(key) - - for k in keys: - if isinstance(k, compiled.CompiledObject) \ - and mixed_index == k.obj: - for value in values: - return self._evaluator.eval_element(value) - raise KeyError('No key found in dictionary %s.' % self) - - # Can raise an IndexError - return self._evaluator.eval_element(self._items()[mixed_index]) - - def iter_content(self): - return self.values() - - @common.safe_property - def parent(self): - return compiled.builtin - - def get_parent_until(self): - return compiled.builtin - - def __getattr__(self, name): - if name not in ['start_pos', 'get_only_subelement', 'parent', - 'get_parent_until', 'items']: - raise AttributeError('Strange access on %s: %s.' % (self, name)) - return getattr(self.atom, name) - - def _values(self): - """Returns a list of a list of node.""" - if self.type == 'dict': - return list(chain.from_iterable(v for k, v in self._items())) - else: - return self._items() - - def _items(self): - c = self.atom.children - array_node = c[1] - if array_node in (']', '}', ')'): - return [] # Direct closing bracket, doesn't contain items. - - if tree.is_node(array_node, 'testlist_comp'): - return array_node.children[::2] - elif tree.is_node(array_node, 'dictorsetmaker'): - kv = [] - iterator = iter(array_node.children) - for key in iterator: - op = next(iterator, None) - if op is None or op == ',': - kv.append(key) # A set. - elif op == ':': # A dict. - kv.append((key, [next(iterator)])) - next(iterator, None) # Possible comma. - else: - raise NotImplementedError('dict/set comprehensions') - return kv - else: - return [array_node] - - def __iter__(self): - return iter(self._items()) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.atom) - - -class _FakeArray(Array): - def __init__(self, evaluator, container, type): - self.type = type - self._evaluator = evaluator - self.atom = container - - -class ImplicitTuple(_FakeArray): - def __init__(self, evaluator, testlist): - super(ImplicitTuple, self).__init__(evaluator, testlist, 'tuple') - self._testlist = testlist - - def _items(self): - return self._testlist.children[::2] - - -class FakeSequence(_FakeArray): - def __init__(self, evaluator, sequence_values, type): - super(FakeSequence, self).__init__(evaluator, sequence_values, type) - self._sequence_values = sequence_values - - def _items(self): - return self._sequence_values - - def get_exact_index_types(self, index): - value = self._sequence_values[index] - return self._evaluator.eval_element(value) - - -class AlreadyEvaluated(frozenset): - """A simple container to add already evaluated objects to an array.""" - def get_code(self): - # For debugging purposes. - return str(self) - - -class MergedNodes(frozenset): - pass - - -class FakeDict(_FakeArray): - def __init__(self, evaluator, dct): - super(FakeDict, self).__init__(evaluator, dct, 'dict') - self._dct = dct - - def get_exact_index_types(self, index): - return list(chain.from_iterable(self._evaluator.eval_element(v) - for v in self._dct[index])) - - def _items(self): - return self._dct.items() - - -class MergedArray(_FakeArray): - def __init__(self, evaluator, arrays): - super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].type) - self._arrays = arrays - - def get_exact_index_types(self, mixed_index): - raise IndexError - - def values(self): - return list(chain(*(a.values() for a in self._arrays))) - - def __iter__(self): - for array in self._arrays: - for a in array: - yield a - - def __len__(self): - return sum(len(a) for a in self._arrays) - - -def get_iterator_types(inputs): - """Returns the types of any iterator (arrays, yields, __iter__, etc).""" - iterators = [] - # Take the first statement (for has always only - # one, remember `in`). And follow it. - for it in inputs: - if isinstance(it, (Generator, Array, ArrayInstance, Comprehension)): - iterators.append(it) - else: - if not hasattr(it, 'execute_subscope_by_name'): - debug.warning('iterator/for loop input wrong: %s', it) - continue - try: - iterators += it.execute_subscope_by_name('__iter__') - except KeyError: - debug.warning('iterators: No __iter__ method found.') - - result = [] - from jedi.evaluate.representation import Instance - for it in iterators: - if isinstance(it, Array): - # Array is a little bit special, since this is an internal array, - # but there's also the list builtin, which is another thing. - result += it.values() - elif isinstance(it, Instance): - # __iter__ returned an instance. - name = '__next__' if is_py3 else 'next' - try: - result += it.execute_subscope_by_name(name) - except KeyError: - debug.warning('Instance has no __next__ function in %s.', it) - else: - # TODO this is not correct, __iter__ can return arbitrary input! - # Is a generator. - result += it.iter_content() - return result - - -def check_array_additions(evaluator, array): - """ Just a mapper function for the internal _check_array_additions """ - if array.type not in ('list', 'set'): - # TODO also check for dict updates - return [] - - is_list = array.type == 'list' - try: - current_module = array.atom.get_parent_until() - except AttributeError: - # If there's no get_parent_until, it's a FakeSequence or another Fake - # type. Those fake types are used inside Jedi's engine. No values may - # be added to those after their creation. - return [] - return _check_array_additions(evaluator, array, current_module, is_list) - - -@memoize_default([], evaluator_is_first_arg=True) -def _check_array_additions(evaluator, compare_array, module, is_list): - """ - Checks if a `Array` has "add" (append, insert, extend) statements: - - >>> a = [""] - >>> a.append(1) - """ - if not settings.dynamic_array_additions or isinstance(module, compiled.CompiledObject): - return [] - - def check_additions(arglist, add_name): - params = list(param.Arguments(evaluator, arglist).unpack()) - result = [] - if add_name in ['insert']: - params = params[1:] - if add_name in ['append', 'add', 'insert']: - for key, nodes in params: - result += unite(evaluator.eval_element(node) for node in nodes) - elif add_name in ['extend', 'update']: - for key, nodes in params: - iterators = unite(evaluator.eval_element(node) for node in nodes) - result += get_iterator_types(iterators) - return result - - from jedi.evaluate import representation as er, param - - def get_execution_parent(element): - """ Used to get an Instance/FunctionExecution parent """ - if isinstance(element, Array): - node = element.atom - else: - # Is an Instance with an - # Arguments([AlreadyEvaluated([ArrayInstance])]) inside - # Yeah... I know... It's complicated ;-) - node = list(element.var_args.argument_node[0])[0].var_args.trailer - if isinstance(node, er.InstanceElement): - return node - return node.get_parent_until(er.FunctionExecution) - - temp_param_add, settings.dynamic_params_for_other_modules = \ - settings.dynamic_params_for_other_modules, False - - search_names = ['append', 'extend', 'insert'] if is_list else ['add', 'update'] - comp_arr_parent = get_execution_parent(compare_array) - - added_types = [] - for add_name in search_names: - try: - possible_names = module.used_names[add_name] - except KeyError: - continue - else: - for name in possible_names: - # Check if the original scope is an execution. If it is, one - # can search for the same statement, that is in the module - # dict. Executions are somewhat special in jedi, since they - # literally copy the contents of a function. - if isinstance(comp_arr_parent, er.FunctionExecution): - if comp_arr_parent.start_pos < name.start_pos < comp_arr_parent.end_pos: - name = comp_arr_parent.name_for_position(name.start_pos) - else: - # Don't check definitions that are not defined in the - # same function. This is not "proper" anyway. It also - # improves Jedi's speed for array lookups, since we - # don't have to check the whole source tree anymore. - continue - trailer = name.parent - power = trailer.parent - trailer_pos = power.children.index(trailer) - try: - execution_trailer = power.children[trailer_pos + 1] - except IndexError: - continue - else: - if execution_trailer.type != 'trailer' \ - or execution_trailer.children[0] != '(' \ - or execution_trailer.children[1] == ')': - continue - power = helpers.call_of_name(name, cut_own_trailer=True) - # InstanceElements are special, because they don't get copied, - # but have this wrapper around them. - if isinstance(comp_arr_parent, er.InstanceElement): - power = er.get_instance_el(evaluator, comp_arr_parent.instance, power) - - if evaluator.recursion_detector.push_stmt(power): - # Check for recursion. Possible by using 'extend' in - # combination with function calls. - continue - if compare_array in evaluator.eval_element(power): - # The arrays match. Now add the results - added_types += check_additions(execution_trailer.children[1], add_name) - - evaluator.recursion_detector.pop_stmt() - # reset settings - settings.dynamic_params_for_other_modules = temp_param_add - return added_types - - -def check_array_instances(evaluator, instance): - """Used for set() and list() instances.""" - if not settings.dynamic_array_additions: - return instance.var_args - - ai = ArrayInstance(evaluator, instance) - from jedi.evaluate import param - return param.Arguments(evaluator, [AlreadyEvaluated([ai])]) - - -class ArrayInstance(IterableWrapper): - """ - Used for the usage of set() and list(). - This is definitely a hack, but a good one :-) - It makes it possible to use set/list conversions. - - In contrast to Array, ListComprehension and all other iterable types, this - is something that is only used inside `evaluate/compiled/fake/builtins.py` - and therefore doesn't need `names_dicts`, `py__bool__` and so on, because - we don't use these operations in `builtins.py`. - """ - def __init__(self, evaluator, instance): - self._evaluator = evaluator - self.instance = instance - self.var_args = instance.var_args - - def iter_content(self): - """ - The index is here just ignored, because of all the appends, etc. - lists/sets are too complicated too handle that. - """ - items = [] - for key, nodes in self.var_args.unpack(): - for node in nodes: - for typ in self._evaluator.eval_element(node): - items += get_iterator_types([typ]) - - module = self.var_args.get_parent_until() - is_list = str(self.instance.name) == 'list' - items += _check_array_additions(self._evaluator, self.instance, module, is_list) - return items - - -class Slice(object): - def __init__(self, evaluator, start, stop, step): - self._evaluator = evaluator - # all of them are either a Precedence or None. - self._start = start - self._stop = stop - self._step = step - - @property - def obj(self): - """ - Imitate CompiledObject.obj behavior and return a ``builtin.slice()`` - object. - """ - def get(element): - if element is None: - return None - - result = self._evaluator.eval_element(element) - if len(result) != 1: - # We want slices to be clear defined with just one type. - # Otherwise we will return an empty slice object. - raise IndexError - try: - return result[0].obj - except AttributeError: - return None - - try: - return slice(get(self._start), get(self._stop), get(self._step)) - except IndexError: - return slice(None, None, None) - - -def create_indexes_or_slices(evaluator, index): - if tree.is_node(index, 'subscript'): # subscript is a slice operation. - start, stop, step = None, None, None - result = [] - for el in index.children: - if el == ':': - if not result: - result.append(None) - elif tree.is_node(el, 'sliceop'): - if len(el.children) == 2: - result.append(el.children[1]) - else: - result.append(el) - result += [None] * (3 - len(result)) - - return (Slice(evaluator, *result),) - return evaluator.eval_element(index) diff --git a/pythonFiles/release/jedi/evaluate/jedi_typing.py b/pythonFiles/release/jedi/evaluate/jedi_typing.py new file mode 100644 index 000000000000..f48a567327eb --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/jedi_typing.py @@ -0,0 +1,100 @@ +""" +This module is not intended to be used in jedi, rather it will be fed to the +jedi-parser to replace classes in the typing module +""" + +try: + from collections import abc +except ImportError: + # python 2 + import collections as abc + + +def factory(typing_name, indextypes): + class Iterable(abc.Iterable): + def __iter__(self): + while True: + yield indextypes[0]() + + class Iterator(Iterable, abc.Iterator): + def next(self): + """ needed for python 2 """ + return self.__next__() + + def __next__(self): + return indextypes[0]() + + class Sequence(abc.Sequence): + def __getitem__(self, index): + return indextypes[0]() + + class MutableSequence(Sequence, abc.MutableSequence): + pass + + class List(MutableSequence, list): + pass + + class Tuple(Sequence, tuple): + def __getitem__(self, index): + if indextypes[1] == Ellipsis: + # https://www.python.org/dev/peps/pep-0484/#the-typing-module + # Tuple[int, ...] means a tuple of ints of indetermined length + return indextypes[0]() + else: + return indextypes[index]() + + class AbstractSet(Iterable, abc.Set): + pass + + class MutableSet(AbstractSet, abc.MutableSet): + pass + + class KeysView(Iterable, abc.KeysView): + pass + + class ValuesView(abc.ValuesView): + def __iter__(self): + while True: + yield indextypes[1]() + + class ItemsView(abc.ItemsView): + def __iter__(self): + while True: + yield (indextypes[0](), indextypes[1]()) + + class Mapping(Iterable, abc.Mapping): + def __getitem__(self, item): + return indextypes[1]() + + def keys(self): + return KeysView() + + def values(self): + return ValuesView() + + def items(self): + return ItemsView() + + class MutableMapping(Mapping, abc.MutableMapping): + pass + + class Dict(MutableMapping, dict): + pass + + dct = { + "Sequence": Sequence, + "MutableSequence": MutableSequence, + "List": List, + "Iterable": Iterable, + "Iterator": Iterator, + "AbstractSet": AbstractSet, + "MutableSet": MutableSet, + "Mapping": Mapping, + "MutableMapping": MutableMapping, + "Tuple": Tuple, + "KeysView": KeysView, + "ItemsView": ItemsView, + "ValuesView": ValuesView, + "Dict": Dict, + } + return dct[typing_name] diff --git a/pythonFiles/release/jedi/evaluate/lazy_context.py b/pythonFiles/release/jedi/evaluate/lazy_context.py new file mode 100644 index 000000000000..9380212e7ce7 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/lazy_context.py @@ -0,0 +1,61 @@ +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS + +class AbstractLazyContext(object): + def __init__(self, data): + self.data = data + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.data) + + def infer(self): + raise NotImplementedError + + +class LazyKnownContext(AbstractLazyContext): + """data is a context.""" + def infer(self): + return ContextSet(self.data) + + +class LazyKnownContexts(AbstractLazyContext): + """data is a ContextSet.""" + def infer(self): + return self.data + + +class LazyUnknownContext(AbstractLazyContext): + def __init__(self): + super(LazyUnknownContext, self).__init__(None) + + def infer(self): + return NO_CONTEXTS + + +class LazyTreeContext(AbstractLazyContext): + def __init__(self, context, node): + super(LazyTreeContext, self).__init__(node) + self._context = context + # We need to save the predefined names. It's an unfortunate side effect + # that needs to be tracked otherwise results will be wrong. + self._predefined_names = dict(context.predefined_names) + + def infer(self): + old, self._context.predefined_names = \ + self._context.predefined_names, self._predefined_names + try: + return self._context.eval_node(self.data) + finally: + self._context.predefined_names = old + + +def get_merged_lazy_context(lazy_contexts): + if len(lazy_contexts) > 1: + return MergedLazyContexts(lazy_contexts) + else: + return lazy_contexts[0] + + +class MergedLazyContexts(AbstractLazyContext): + """data is a list of lazy contexts.""" + def infer(self): + return ContextSet.from_sets(l.infer() for l in self.data) diff --git a/pythonFiles/release/jedi/evaluate/param.py b/pythonFiles/release/jedi/evaluate/param.py index 8524bf958582..a46394ce8a81 100755 --- a/pythonFiles/release/jedi/evaluate/param.py +++ b/pythonFiles/release/jedi/evaluate/param.py @@ -1,303 +1,128 @@ from collections import defaultdict -from itertools import chain -from jedi._compatibility import unicode, zip_longest -from jedi import debug -from jedi import common -from jedi.parser import tree -from jedi.evaluate import iterable +from jedi.evaluate.utils import PushBackIterator from jedi.evaluate import analysis -from jedi.evaluate import precedence -from jedi.evaluate.helpers import FakeName -from jedi.cache import underscore_memoization +from jedi.evaluate.lazy_context import LazyKnownContext, \ + LazyTreeContext, LazyUnknownContext +from jedi.evaluate import docstrings +from jedi.evaluate import pep0484 +from jedi.evaluate.context import iterable -class Arguments(tree.Base): - def __init__(self, evaluator, argument_node, trailer=None): - """ - The argument_node is either a parser node or a list of evaluated - objects. Those evaluated objects may be lists of evaluated objects - themselves (one list for the first argument, one for the second, etc). +def _add_argument_issue(parent_context, error_name, lazy_context, message): + if isinstance(lazy_context, LazyTreeContext): + node = lazy_context.data + if node.parent.type == 'argument': + node = node.parent + analysis.add(parent_context, error_name, node, message) - :param argument_node: May be an argument_node or a list of nodes. - """ - self.argument_node = argument_node - self._evaluator = evaluator - self.trailer = trailer # Can be None, e.g. in a class definition. - def _split(self): - if isinstance(self.argument_node, (tuple, list)): - for el in self.argument_node: - yield 0, el - else: - if not tree.is_node(self.argument_node, 'arglist'): - yield 0, self.argument_node - return - - iterator = iter(self.argument_node.children) - for child in iterator: - if child == ',': - continue - elif child in ('*', '**'): - yield len(child.value), next(iterator) - else: - yield 0, child - - def get_parent_until(self, *args, **kwargs): - if self.trailer is None: - try: - element = self.argument_node[0] - from jedi.evaluate.iterable import AlreadyEvaluated - if isinstance(element, AlreadyEvaluated): - element = self._evaluator.eval_element(element)[0] - except IndexError: - return None - else: - return element.get_parent_until(*args, **kwargs) - else: - return self.trailer.get_parent_until(*args, **kwargs) - - def as_tuple(self): - for stars, argument in self._split(): - if tree.is_node(argument, 'argument'): - argument, default = argument.children[::2] - else: - default = None - yield argument, default, stars - - def unpack(self, func=None): - named_args = [] - for stars, el in self._split(): - if stars == 1: - arrays = self._evaluator.eval_element(el) - iterators = [_iterate_star_args(self._evaluator, a, el, func) - for a in arrays] - iterators = list(iterators) - for values in list(zip_longest(*iterators)): - yield None, [v for v in values if v is not None] - elif stars == 2: - arrays = self._evaluator.eval_element(el) - dicts = [_star_star_dict(self._evaluator, a, el, func) - for a in arrays] - for dct in dicts: - for key, values in dct.items(): - yield key, values - else: - if tree.is_node(el, 'argument'): - c = el.children - if len(c) == 3: # Keyword argument. - named_args.append((c[0].value, (c[2],))) - else: # Generator comprehension. - # Include the brackets with the parent. - comp = iterable.GeneratorComprehension( - self._evaluator, self.argument_node.parent) - yield None, (iterable.AlreadyEvaluated([comp]),) - elif isinstance(el, (list, tuple)): - yield None, el - else: - yield None, (el,) - - # Reordering var_args is necessary, because star args sometimes appear - # after named argument, but in the actual order it's prepended. - for key_arg in named_args: - yield key_arg - - def _reorder_var_args(var_args): - named_index = None - new_args = [] - for i, stmt in enumerate(var_args): - if isinstance(stmt, tree.ExprStmt): - if named_index is None and stmt.assignment_details: - named_index = i - - if named_index is not None: - expression_list = stmt.expression_list() - if expression_list and expression_list[0] == '*': - new_args.insert(named_index, stmt) - named_index += 1 - continue - - new_args.append(stmt) - return new_args - - def eval_argument_clinic(self, arguments): - """Uses a list with argument clinic information (see PEP 436).""" - iterator = self.unpack() - for i, (name, optional, allow_kwargs) in enumerate(arguments): - key, va_values = next(iterator, (None, [])) - if key is not None: - raise NotImplementedError - if not va_values and not optional: - debug.warning('TypeError: %s expected at least %s arguments, got %s', - name, len(arguments), i) - raise ValueError - values = list(chain.from_iterable(self._evaluator.eval_element(el) - for el in va_values)) - if not values and not optional: - # For the stdlib we always want values. If we don't get them, - # that's ok, maybe something is too hard to resolve, however, - # we will not proceed with the evaluation of that function. - debug.warning('argument_clinic "%s" not resolvable.', name) - raise ValueError - yield values - - def scope(self): - # Returns the scope in which the arguments are used. - return (self.trailer or self.argument_node).get_parent_until(tree.IsScope) - - def eval_args(self): - # TODO this method doesn't work with named args and a lot of other - # things. Use unpack. - return [self._evaluator.eval_element(el) for stars, el in self._split()] - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self.argument_node) - - def get_calling_var_args(self): - if tree.is_node(self.argument_node, 'arglist', 'argument') \ - or self.argument_node == () and self.trailer is not None: - return _get_calling_var_args(self._evaluator, self) - else: - return None - - -class ExecutedParam(tree.Param): +class ExecutedParam(object): """Fake a param and give it values.""" - def __init__(self, original_param, var_args, values): - self._original_param = original_param - self.var_args = var_args - self._values = values + def __init__(self, execution_context, param_node, lazy_context): + self._execution_context = execution_context + self._param_node = param_node + self._lazy_context = lazy_context + self.string_name = param_node.name.value - def eval(self, evaluator): - types = [] - for v in self._values: - types += evaluator.eval_element(v) - return types + def infer(self): + pep0484_hints = pep0484.infer_param(self._execution_context, self._param_node) + doc_params = docstrings.infer_param(self._execution_context, self._param_node) + if pep0484_hints or doc_params: + return pep0484_hints | doc_params - @property - def position_nr(self): - # Need to use the original logic here, because it uses the parent. - return self._original_param.position_nr + return self._lazy_context.infer() @property - @underscore_memoization - def name(self): - return FakeName(str(self._original_param.name), self, self.start_pos) - - def __getattr__(self, name): - return getattr(self._original_param, name) - - -def _get_calling_var_args(evaluator, var_args): - old_var_args = None - while var_args != old_var_args: - old_var_args = var_args - for name, default, stars in reversed(list(var_args.as_tuple())): - if not stars or not isinstance(name, tree.Name): - continue + def var_args(self): + return self._execution_context.var_args - names = evaluator.goto(name) - if len(names) != 1: - break - param = names[0].get_definition() - if not isinstance(param, ExecutedParam): - if isinstance(param, tree.Param): - # There is no calling var_args in this case - there's just - # a param without any input. - return None - break - # We never want var_args to be a tuple. This should be enough for - # now, we can change it later, if we need to. - if isinstance(param.var_args, Arguments): - var_args = param.var_args - return var_args.argument_node or var_args.trailer + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.string_name) -def get_params(evaluator, func, var_args): - param_names = [] +def get_params(execution_context, var_args): + result_params = [] param_dict = {} - for param in func.params: - param_dict[str(param.name)] = param - unpacked_va = list(var_args.unpack(func)) - from jedi.evaluate.representation import InstanceElement - if isinstance(func, InstanceElement): - # Include self at this place. - unpacked_va.insert(0, (None, [iterable.AlreadyEvaluated([func.instance])])) - var_arg_iterator = common.PushBackIterator(iter(unpacked_va)) + funcdef = execution_context.tree_node + parent_context = execution_context.parent_context + + for param in funcdef.get_params(): + param_dict[param.name.value] = param + unpacked_va = list(var_args.unpack(funcdef)) + var_arg_iterator = PushBackIterator(iter(unpacked_va)) non_matching_keys = defaultdict(lambda: []) keys_used = {} keys_only = False had_multiple_value_error = False - for param in func.params: + for param in funcdef.get_params(): # The value and key can both be null. There, the defaults apply. # args / kwargs will just be empty arrays / dicts, respectively. # Wrong value count is just ignored. If you try to test cases that are # not allowed in Python, Jedi will maybe not show any completions. - default = [] if param.default is None else [param.default] - key, va_values = next(var_arg_iterator, (None, default)) + key, argument = next(var_arg_iterator, (None, None)) while key is not None: keys_only = True - k = unicode(key) try: - key_param = param_dict[unicode(key)] + key_param = param_dict[key] except KeyError: - non_matching_keys[key] += va_values + non_matching_keys[key] = argument else: - param_names.append(ExecutedParam(key_param, var_args, va_values).name) + if key in keys_used: + had_multiple_value_error = True + m = ("TypeError: %s() got multiple values for keyword argument '%s'." + % (funcdef.name, key)) + for node in var_args.get_calling_nodes(): + analysis.add(parent_context, 'type-error-multiple-values', + node, message=m) + else: + keys_used[key] = ExecutedParam(execution_context, key_param, argument) + key, argument = next(var_arg_iterator, (None, None)) - if k in keys_used: - had_multiple_value_error = True - m = ("TypeError: %s() got multiple values for keyword argument '%s'." - % (func.name, k)) - calling_va = _get_calling_var_args(evaluator, var_args) - if calling_va is not None: - analysis.add(evaluator, 'type-error-multiple-values', - calling_va, message=m) - else: - try: - keys_used[k] = param_names[-1] - except IndexError: - # TODO this is wrong stupid and whatever. - pass - key, va_values = next(var_arg_iterator, (None, ())) + try: + result_params.append(keys_used[param.name.value]) + continue + except KeyError: + pass - values = [] - if param.stars == 1: + if param.star_count == 1: # *args param - lst_values = [iterable.MergedNodes(va_values)] if va_values else [] - for key, va_values in var_arg_iterator: - # Iterate until a key argument is found. - if key: - var_arg_iterator.push_back((key, va_values)) - break - if va_values: - lst_values.append(iterable.MergedNodes(va_values)) - seq = iterable.FakeSequence(evaluator, lst_values, 'tuple') - values = [iterable.AlreadyEvaluated([seq])] - elif param.stars == 2: + lazy_context_list = [] + if argument is not None: + lazy_context_list.append(argument) + for key, argument in var_arg_iterator: + # Iterate until a key argument is found. + if key: + var_arg_iterator.push_back((key, argument)) + break + lazy_context_list.append(argument) + seq = iterable.FakeSequence(execution_context.evaluator, 'tuple', lazy_context_list) + result_arg = LazyKnownContext(seq) + elif param.star_count == 2: # **kwargs param - dct = iterable.FakeDict(evaluator, dict(non_matching_keys)) - values = [iterable.AlreadyEvaluated([dct])] + dct = iterable.FakeDict(execution_context.evaluator, dict(non_matching_keys)) + result_arg = LazyKnownContext(dct) non_matching_keys = {} else: # normal param - if va_values: - values = va_values - else: + if argument is None: # No value: Return an empty container - values = [] - if not keys_only: - calling_va = var_args.get_calling_var_args() - if calling_va is not None: - m = _error_argument_count(func, len(unpacked_va)) - analysis.add(evaluator, 'type-error-too-few-arguments', - calling_va, message=m) + if param.default is None: + result_arg = LazyUnknownContext() + if not keys_only: + for node in var_args.get_calling_nodes(): + m = _error_argument_count(funcdef, len(unpacked_va)) + analysis.add(parent_context, 'type-error-too-few-arguments', + node, message=m) + else: + result_arg = LazyTreeContext(parent_context, param.default) + else: + result_arg = argument - # Now add to result if it's not one of the previously covered cases. - if (not keys_only or param.stars == 2): - param_names.append(ExecutedParam(param, var_args, values).name) - keys_used[unicode(param.name)] = param_names[-1] + result_params.append(ExecutedParam(execution_context, param, result_arg)) + if not isinstance(result_arg, LazyUnknownContext): + keys_used[param.name.value] = result_params[-1] if keys_only: # All arguments should be handed over to the next function. It's not @@ -305,99 +130,66 @@ def get_params(evaluator, func, var_args): # there's nothing to find for certain names. for k in set(param_dict) - set(keys_used): param = param_dict[k] - values = [] if param.default is None else [param.default] - param_names.append(ExecutedParam(param, var_args, values).name) - if not (non_matching_keys or had_multiple_value_error - or param.stars or param.default): + if not (non_matching_keys or had_multiple_value_error or + param.star_count or param.default): # add a warning only if there's not another one. - calling_va = _get_calling_var_args(evaluator, var_args) - if calling_va is not None: - m = _error_argument_count(func, len(unpacked_va)) - analysis.add(evaluator, 'type-error-too-few-arguments', - calling_va, message=m) + for node in var_args.get_calling_nodes(): + m = _error_argument_count(funcdef, len(unpacked_va)) + analysis.add(parent_context, 'type-error-too-few-arguments', + node, message=m) - for key, va_values in non_matching_keys.items(): + for key, lazy_context in non_matching_keys.items(): m = "TypeError: %s() got an unexpected keyword argument '%s'." \ - % (func.name, key) - for value in va_values: - analysis.add(evaluator, 'type-error-keyword-argument', value.parent, message=m) - - remaining_params = list(var_arg_iterator) - if remaining_params: - m = _error_argument_count(func, len(unpacked_va)) + % (funcdef.name, key) + _add_argument_issue( + parent_context, + 'type-error-keyword-argument', + lazy_context, + message=m + ) + + remaining_arguments = list(var_arg_iterator) + if remaining_arguments: + m = _error_argument_count(funcdef, len(unpacked_va)) # Just report an error for the first param that is not needed (like # cPython). - first_key, first_values = remaining_params[0] - for v in first_values: - if first_key is not None: - # Is a keyword argument, return the whole thing instead of just - # the value node. - v = v.parent - try: - non_kw_param = keys_used[first_key] - except KeyError: - pass - else: - origin_args = non_kw_param.parent.var_args.argument_node - # TODO calculate the var_args tree and check if it's in - # the tree (if not continue). - # print('\t\tnonkw', non_kw_param.parent.var_args.argument_node, ) - if origin_args not in [f.parent.parent for f in first_values]: - continue - analysis.add(evaluator, 'type-error-too-many-arguments', - v, message=m) - return param_names + first_key, lazy_context = remaining_arguments[0] + if var_args.get_calling_nodes(): + # There might not be a valid calling node so check for that first. + _add_argument_issue(parent_context, 'type-error-too-many-arguments', lazy_context, message=m) + return result_params -def _iterate_star_args(evaluator, array, input_node, func=None): - from jedi.evaluate.representation import Instance - if isinstance(array, iterable.Array): - for field_stmt in array: # yield from plz! - yield field_stmt - elif isinstance(array, iterable.Generator): - for field_stmt in array.iter_content(): - yield iterable.AlreadyEvaluated([field_stmt]) - elif isinstance(array, Instance) and array.name.get_code() == 'tuple': - debug.warning('Ignored a tuple *args input %s' % array) - else: - if func is not None: - m = "TypeError: %s() argument after * must be a sequence, not %s" \ - % (func.name.value, array) - analysis.add(evaluator, 'type-error-star', input_node, message=m) - - -def _star_star_dict(evaluator, array, input_node, func): - dct = defaultdict(lambda: []) - from jedi.evaluate.representation import Instance - if isinstance(array, Instance) and array.name.get_code() == 'dict': - # For now ignore this case. In the future add proper iterators and just - # make one call without crazy isinstance checks. - return {} - - if isinstance(array, iterable.FakeDict): - return array._dct - elif isinstance(array, iterable.Array) and array.type == 'dict': - # TODO bad call to non-public API - for key_node, values in array._items(): - for key in evaluator.eval_element(key_node): - if precedence.is_string(key): - dct[key.obj] += values - - else: - if func is not None: - m = "TypeError: %s argument after ** must be a mapping, not %s" \ - % (func.name.value, array) - analysis.add(evaluator, 'type-error-star-star', input_node, message=m) - return dict(dct) - - -def _error_argument_count(func, actual_count): - default_arguments = sum(1 for p in func.params if p.default or p.stars) +def _error_argument_count(funcdef, actual_count): + params = funcdef.get_params() + default_arguments = sum(1 for p in params if p.default or p.star_count) if default_arguments == 0: before = 'exactly ' else: - before = 'from %s to ' % (len(func.params) - default_arguments) + before = 'from %s to ' % (len(params) - default_arguments) return ('TypeError: %s() takes %s%s arguments (%s given).' - % (func.name, before, len(func.params), actual_count)) + % (funcdef.name, before, len(params), actual_count)) + + +def _create_default_param(execution_context, param): + if param.star_count == 1: + result_arg = LazyKnownContext( + iterable.FakeSequence(execution_context.evaluator, 'tuple', []) + ) + elif param.star_count == 2: + result_arg = LazyKnownContext( + iterable.FakeDict(execution_context.evaluator, {}) + ) + elif param.default is None: + result_arg = LazyUnknownContext() + else: + result_arg = LazyTreeContext(execution_context.parent_context, param.default) + return ExecutedParam(execution_context, param, result_arg) + + +def create_default_params(execution_context, funcdef): + return [_create_default_param(execution_context, p) + for p in funcdef.get_params()] + diff --git a/pythonFiles/release/jedi/evaluate/parser_cache.py b/pythonFiles/release/jedi/evaluate/parser_cache.py new file mode 100644 index 000000000000..84fe52d977f8 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/parser_cache.py @@ -0,0 +1,6 @@ +from jedi.evaluate.cache import evaluator_function_cache + + +@evaluator_function_cache() +def get_yield_exprs(evaluator, funcdef): + return list(funcdef.iter_yield_exprs()) diff --git a/pythonFiles/release/jedi/evaluate/pep0484.py b/pythonFiles/release/jedi/evaluate/pep0484.py new file mode 100644 index 000000000000..820f112c54e0 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/pep0484.py @@ -0,0 +1,222 @@ +""" +PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints +through function annotations. There is a strong suggestion in this document +that only the type of type hinting defined in PEP0484 should be allowed +as annotations in future python versions. + +The (initial / probably incomplete) implementation todo list for pep-0484: +v Function parameter annotations with builtin/custom type classes +v Function returntype annotations with builtin/custom type classes +v Function parameter annotations with strings (forward reference) +v Function return type annotations with strings (forward reference) +v Local variable type hints +v Assigned types: `Url = str\ndef get(url:Url) -> str:` +v Type hints in `with` statements +x Stub files support +x support `@no_type_check` and `@no_type_check_decorator` +x support for typing.cast() operator +x support for type hint comments for functions, `# type: (int, str) -> int`. + See comment from Guido https://github.com/davidhalter/jedi/issues/662 +""" + +import os +import re + +from parso import ParserSyntaxError +from parso.python import tree + +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate import compiled +from jedi.evaluate.base_context import NO_CONTEXTS, ContextSet +from jedi.evaluate.lazy_context import LazyTreeContext +from jedi.evaluate.context import ModuleContext +from jedi import debug +from jedi import _compatibility +from jedi import parser_utils + + +def _evaluate_for_annotation(context, annotation, index=None): + """ + Evaluates a string-node, looking for an annotation + If index is not None, the annotation is expected to be a tuple + and we're interested in that index + """ + if annotation is not None: + context_set = context.eval_node(_fix_forward_reference(context, annotation)) + if index is not None: + context_set = context_set.filter( + lambda context: context.array_type == 'tuple' \ + and len(list(context.py__iter__())) >= index + ).py__getitem__(index) + return context_set.execute_evaluated() + else: + return NO_CONTEXTS + + +def _fix_forward_reference(context, node): + evaled_nodes = context.eval_node(node) + if len(evaled_nodes) != 1: + debug.warning("Eval'ed typing index %s should lead to 1 object, " + " not %s" % (node, evaled_nodes)) + return node + evaled_node = list(evaled_nodes)[0] + if isinstance(evaled_node, compiled.CompiledObject) and \ + isinstance(evaled_node.obj, str): + try: + new_node = context.evaluator.grammar.parse( + _compatibility.unicode(evaled_node.obj), + start_symbol='eval_input', + error_recovery=False + ) + except ParserSyntaxError: + debug.warning('Annotation not parsed: %s' % evaled_node.obj) + return node + else: + module = node.get_root_node() + parser_utils.move(new_node, module.end_pos[0]) + new_node.parent = context.tree_node + return new_node + else: + return node + + +@evaluator_method_cache() +def infer_param(execution_context, param): + annotation = param.annotation + module_context = execution_context.get_root_context() + return _evaluate_for_annotation(module_context, annotation) + + +def py__annotations__(funcdef): + return_annotation = funcdef.annotation + if return_annotation: + dct = {'return': return_annotation} + else: + dct = {} + for function_param in funcdef.get_params(): + param_annotation = function_param.annotation + if param_annotation is not None: + dct[function_param.name.value] = param_annotation + return dct + + +@evaluator_method_cache() +def infer_return_types(function_context): + annotation = py__annotations__(function_context.tree_node).get("return", None) + module_context = function_context.get_root_context() + return _evaluate_for_annotation(module_context, annotation) + + +_typing_module = None + + +def _get_typing_replacement_module(grammar): + """ + The idea is to return our jedi replacement for the PEP-0484 typing module + as discussed at https://github.com/davidhalter/jedi/issues/663 + """ + global _typing_module + if _typing_module is None: + typing_path = \ + os.path.abspath(os.path.join(__file__, "../jedi_typing.py")) + with open(typing_path) as f: + code = _compatibility.unicode(f.read()) + _typing_module = grammar.parse(code) + return _typing_module + + +def py__getitem__(context, typ, node): + if not typ.get_root_context().name.string_name == "typing": + return None + # we assume that any class using [] in a module called + # "typing" with a name for which we have a replacement + # should be replaced by that class. This is not 100% + # airtight but I don't have a better idea to check that it's + # actually the PEP-0484 typing module and not some other + if node.type == "subscriptlist": + nodes = node.children[::2] # skip the commas + else: + nodes = [node] + del node + + nodes = [_fix_forward_reference(context, node) for node in nodes] + type_name = typ.name.string_name + + # hacked in Union and Optional, since it's hard to do nicely in parsed code + if type_name in ("Union", '_Union'): + # In Python 3.6 it's still called typing.Union but it's an instance + # called _Union. + return ContextSet.from_sets(context.eval_node(node) for node in nodes) + if type_name in ("Optional", '_Optional'): + # Here we have the same issue like in Union. Therefore we also need to + # check for the instance typing._Optional (Python 3.6). + return context.eval_node(nodes[0]) + + typing = ModuleContext( + context.evaluator, + module_node=_get_typing_replacement_module(context.evaluator.latest_grammar), + path=None + ) + factories = typing.py__getattribute__("factory") + assert len(factories) == 1 + factory = list(factories)[0] + assert factory + function_body_nodes = factory.tree_node.children[4].children + valid_classnames = set(child.name.value + for child in function_body_nodes + if isinstance(child, tree.Class)) + if type_name not in valid_classnames: + return None + compiled_classname = compiled.create(context.evaluator, type_name) + + from jedi.evaluate.context.iterable import FakeSequence + args = FakeSequence( + context.evaluator, + "tuple", + [LazyTreeContext(context, n) for n in nodes] + ) + + result = factory.execute_evaluated(compiled_classname, args) + return result + + +def find_type_from_comment_hint_for(context, node, name): + return _find_type_from_comment_hint(context, node, node.children[1], name) + + +def find_type_from_comment_hint_with(context, node, name): + assert len(node.children[1].children) == 3, \ + "Can only be here when children[1] is 'foo() as f'" + varlist = node.children[1].children[2] + return _find_type_from_comment_hint(context, node, varlist, name) + + +def find_type_from_comment_hint_assign(context, node, name): + return _find_type_from_comment_hint(context, node, node.children[0], name) + + +def _find_type_from_comment_hint(context, node, varlist, name): + index = None + if varlist.type in ("testlist_star_expr", "exprlist", "testlist"): + # something like "a, b = 1, 2" + index = 0 + for child in varlist.children: + if child == name: + break + if child.type == "operator": + continue + index += 1 + else: + return [] + + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return [] + match = re.match(r"^#\s*type:\s*([^#]*)", comment) + if not match: + return [] + annotation = tree.String( + repr(str(match.group(1).strip())), + node.start_pos) + annotation.parent = node.parent + return _evaluate_for_annotation(context, annotation, index) diff --git a/pythonFiles/release/jedi/evaluate/precedence.py b/pythonFiles/release/jedi/evaluate/precedence.py deleted file mode 100755 index 7a2ee6d2763b..000000000000 --- a/pythonFiles/release/jedi/evaluate/precedence.py +++ /dev/null @@ -1,174 +0,0 @@ -""" -Handles operator precedence. -""" -import operator - -from jedi._compatibility import unicode -from jedi.parser import tree -from jedi import debug -from jedi.evaluate.compiled import (CompiledObject, create, builtin, - keyword_from_value, true_obj, false_obj) -from jedi.evaluate import analysis - -# Maps Python syntax to the operator module. -COMPARISON_OPERATORS = { - '==': operator.eq, - '!=': operator.ne, - 'is': operator.is_, - 'is not': operator.is_not, - '<': operator.lt, - '<=': operator.le, - '>': operator.gt, - '>=': operator.ge, -} - - -def _literals_to_types(evaluator, result): - # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), - # int(), float(), etc). - for i, r in enumerate(result): - if is_literal(r): - # Literals are only valid as long as the operations are - # correct. Otherwise add a value-free instance. - cls = builtin.get_by_name(r.name.get_code()) - result[i] = evaluator.execute(cls)[0] - return list(set(result)) - - -def calculate_children(evaluator, children): - """ - Calculate a list of children with operators. - """ - iterator = iter(children) - types = evaluator.eval_element(next(iterator)) - for operator in iterator: - right = next(iterator) - if tree.is_node(operator, 'comp_op'): # not in / is not - operator = ' '.join(str(c.value) for c in operator.children) - - # handle lazy evaluation of and/or here. - if operator in ('and', 'or'): - left_bools = set([left.py__bool__() for left in types]) - if left_bools == set([True]): - if operator == 'and': - types = evaluator.eval_element(right) - elif left_bools == set([False]): - if operator != 'and': - types = evaluator.eval_element(right) - # Otherwise continue, because of uncertainty. - else: - types = calculate(evaluator, types, operator, - evaluator.eval_element(right)) - debug.dbg('calculate_children types %s', types) - return types - - -def calculate(evaluator, left_result, operator, right_result): - result = [] - if not left_result or not right_result: - # illegal slices e.g. cause left/right_result to be None - result = (left_result or []) + (right_result or []) - result = _literals_to_types(evaluator, result) - else: - # I don't think there's a reasonable chance that a string - # operation is still correct, once we pass something like six - # objects. - if len(left_result) * len(right_result) > 6: - result = _literals_to_types(evaluator, left_result + right_result) - else: - for left in left_result: - for right in right_result: - result += _element_calculate(evaluator, left, operator, right) - return result - - -def factor_calculate(evaluator, types, operator): - """ - Calculates `+`, `-`, `~` and `not` prefixes. - """ - for typ in types: - if operator == '-': - if _is_number(typ): - yield create(evaluator, -typ.obj) - elif operator == 'not': - value = typ.py__bool__() - if value is None: # Uncertainty. - return - yield keyword_from_value(not value) - else: - yield typ - - -def _is_number(obj): - return isinstance(obj, CompiledObject) \ - and isinstance(obj.obj, (int, float)) - - -def is_string(obj): - return isinstance(obj, CompiledObject) \ - and isinstance(obj.obj, (str, unicode)) - - -def is_literal(obj): - return _is_number(obj) or is_string(obj) - - -def _is_tuple(obj): - from jedi.evaluate import iterable - return isinstance(obj, iterable.Array) and obj.type == 'tuple' - - -def _is_list(obj): - from jedi.evaluate import iterable - return isinstance(obj, iterable.Array) and obj.type == 'list' - - -def _element_calculate(evaluator, left, operator, right): - from jedi.evaluate import iterable, representation as er - l_is_num = _is_number(left) - r_is_num = _is_number(right) - if operator == '*': - # for iterables, ignore * operations - if isinstance(left, iterable.Array) or is_string(left): - return [left] - elif isinstance(right, iterable.Array) or is_string(right): - return [right] - elif operator == '+': - if l_is_num and r_is_num or is_string(left) and is_string(right): - return [create(evaluator, left.obj + right.obj)] - elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): - return [iterable.MergedArray(evaluator, (left, right))] - elif operator == '-': - if l_is_num and r_is_num: - return [create(evaluator, left.obj - right.obj)] - elif operator == '%': - # With strings and numbers the left type typically remains. Except for - # `int() % float()`. - return [left] - elif operator in COMPARISON_OPERATORS: - operation = COMPARISON_OPERATORS[operator] - if isinstance(left, CompiledObject) and isinstance(right, CompiledObject): - # Possible, because the return is not an option. Just compare. - left = left.obj - right = right.obj - - try: - return [keyword_from_value(operation(left, right))] - except TypeError: - # Could be True or False. - return [true_obj, false_obj] - elif operator == 'in': - return [] - - def check(obj): - """Checks if a Jedi object is either a float or an int.""" - return isinstance(obj, er.Instance) and obj.name.get_code() in ('int', 'float') - - # Static analysis, one is a number, the other one is not. - if operator in ('+', '-') and l_is_num != r_is_num \ - and not (check(left) or check(right)): - message = "TypeError: unsupported operand type(s) for +: %s and %s" - analysis.add(evaluator, 'type-error-operation', operator, - message % (left, right)) - - return [left, right] diff --git a/pythonFiles/release/jedi/evaluate/project.py b/pythonFiles/release/jedi/evaluate/project.py new file mode 100644 index 000000000000..b90f0f0c3be8 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/project.py @@ -0,0 +1,40 @@ +import os +import sys + +from jedi.evaluate.sys_path import get_venv_path, detect_additional_paths +from jedi.cache import underscore_memoization + + +class Project(object): + def __init__(self, sys_path=None): + if sys_path is not None: + self._sys_path = sys_path + + venv = os.getenv('VIRTUAL_ENV') + if venv: + sys_path = get_venv_path(venv) + + if sys_path is None: + sys_path = sys.path + + base_sys_path = list(sys_path) + try: + base_sys_path.remove('') + except ValueError: + pass + + self._base_sys_path = base_sys_path + + def add_script_path(self, script_path): + self._script_path = script_path + + def add_evaluator(self, evaluator): + self._evaluator = evaluator + + @property + @underscore_memoization + def sys_path(self): + if self._script_path is None: + return self._base_sys_path + + return self._base_sys_path + detect_additional_paths(self._evaluator, self._script_path) diff --git a/pythonFiles/release/jedi/evaluate/recursion.py b/pythonFiles/release/jedi/evaluate/recursion.py index a4f5fbc33fc3..e2f34a4a9a06 100755 --- a/pythonFiles/release/jedi/evaluate/recursion.py +++ b/pythonFiles/release/jedi/evaluate/recursion.py @@ -6,152 +6,130 @@ Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not thread-safe. Why? ``execution_recursion_decorator`` uses class variables to count the function calls. + +.. _settings-recursion: + +Settings +~~~~~~~~~~ + +Recursion settings are important if you don't want extremly +recursive python code to go absolutely crazy. + +The default values are based on experiments while completing the |jedi| library +itself (inception!). But I don't think there's any other Python library that +uses recursion in a similarly extreme way. Completion should also be fast and +therefore the quality might not always be maximal. + +.. autodata:: recursion_limit +.. autodata:: total_function_execution_limit +.. autodata:: per_function_execution_limit +.. autodata:: per_function_recursion_limit """ -from jedi import debug -from jedi import settings -from jedi.evaluate import compiled -from jedi.evaluate import iterable +from contextlib import contextmanager + +from jedi import debug +from jedi.evaluate.base_context import NO_CONTEXTS -def recursion_decorator(func): - def run(evaluator, stmt, *args, **kwargs): - rec_detect = evaluator.recursion_detector - # print stmt, len(self.node_statements()) - if rec_detect.push_stmt(stmt): - return [] - else: - result = func(evaluator, stmt, *args, **kwargs) - rec_detect.pop_stmt() - return result - return run +recursion_limit = 15 +""" +Like ``sys.getrecursionlimit()``, just for |jedi|. +""" +total_function_execution_limit = 200 +""" +This is a hard limit of how many non-builtin functions can be executed. +""" +per_function_execution_limit = 6 +""" +The maximal amount of times a specific function may be executed. +""" +per_function_recursion_limit = 2 +""" +A function may not be executed more than this number of times recursively. +""" class RecursionDetector(object): + def __init__(self): + self.pushed_nodes = [] + + +@contextmanager +def execution_allowed(evaluator, node): """ A decorator to detect recursions in statements. In a recursion a statement at the same place, in the same module may not be executed two times. """ - def __init__(self): - self.top = None - self.current = None - - def push_stmt(self, stmt): - self.current = _RecursionNode(stmt, self.current) - check = self._check_recursion() - if check: - debug.warning('catched stmt recursion: %s against %s @%s', stmt, - check.stmt, stmt.start_pos) - self.pop_stmt() - return True - return False - - def pop_stmt(self): - if self.current is not None: - # I don't know how current can be None, but sometimes it happens - # with Python3. - self.current = self.current.parent - - def _check_recursion(self): - test = self.current - while True: - test = test.parent - if self.current == test: - return test - if not test: - return False - - def node_statements(self): - result = [] - n = self.current - while n: - result.insert(0, n.stmt) - n = n.parent - return result - - -class _RecursionNode(object): - """ A node of the RecursionDecorator. """ - def __init__(self, stmt, parent): - self.script = stmt.get_parent_until() - self.position = stmt.start_pos - self.parent = parent - self.stmt = stmt - - # Don't check param instances, they are not causing recursions - # The same's true for the builtins, because the builtins are really - # simple. - self.is_ignored = self.script == compiled.builtin - - def __eq__(self, other): - if not other: - return None - - return self.script == other.script \ - and self.position == other.position \ - and not self.is_ignored and not other.is_ignored - - -def execution_recursion_decorator(func): - def run(execution, **kwargs): - detector = execution._evaluator.execution_recursion_detector - if detector.push_execution(execution): - result = [] - else: - result = func(execution, **kwargs) - detector.pop_execution() - return result - - return run + pushed_nodes = evaluator.recursion_detector.pushed_nodes + + if node in pushed_nodes: + debug.warning('catched stmt recursion: %s @%s', node, + node.start_pos) + yield False + else: + pushed_nodes.append(node) + yield True + pushed_nodes.pop() + + +def execution_recursion_decorator(default=NO_CONTEXTS): + def decorator(func): + def wrapper(execution, **kwargs): + detector = execution.evaluator.execution_recursion_detector + allowed = detector.push_execution(execution) + try: + if allowed: + result = default + else: + result = func(execution, **kwargs) + finally: + detector.pop_execution() + return result + return wrapper + return decorator class ExecutionRecursionDetector(object): """ Catches recursions of executions. - It is designed like a Singelton. Only one instance should exist. """ - def __init__(self): - self.recursion_level = 0 - self.parent_execution_funcs = [] - self.execution_funcs = set() - self.execution_count = 0 - - def __call__(self, execution): - debug.dbg('Execution recursions: %s', execution, self.recursion_level, - self.execution_count, len(self.execution_funcs)) - if self.check_recursion(execution): - result = [] - else: - result = self.func(execution) - self.pop_execution() - return result - - def pop_execution(cls): - cls.parent_execution_funcs.pop() - cls.recursion_level -= 1 - - def push_execution(cls, execution): - in_par_execution_funcs = execution.base in cls.parent_execution_funcs - in_execution_funcs = execution.base in cls.execution_funcs - cls.recursion_level += 1 - cls.execution_count += 1 - cls.execution_funcs.add(execution.base) - cls.parent_execution_funcs.append(execution.base) - - if cls.execution_count > settings.max_executions: + def __init__(self, evaluator): + self._evaluator = evaluator + + self._recursion_level = 0 + self._parent_execution_funcs = [] + self._funcdef_execution_counts = {} + self._execution_count = 0 + + def pop_execution(self): + self._parent_execution_funcs.pop() + self._recursion_level -= 1 + + def push_execution(self, execution): + funcdef = execution.tree_node + + # These two will be undone in pop_execution. + self._recursion_level += 1 + self._parent_execution_funcs.append(funcdef) + + module = execution.get_root_context() + if module == self._evaluator.BUILTINS: + # We have control over builtins so we know they are not recursing + # like crazy. Therefore we just let them execute always, because + # they usually just help a lot with getting good results. + return False + + if self._recursion_level > recursion_limit: return True - if isinstance(execution.base, (iterable.Array, iterable.Generator)): - return False - module = execution.get_parent_until() - if module == compiled.builtin: - return False + if self._execution_count >= total_function_execution_limit: + return True + self._execution_count += 1 - if in_par_execution_funcs: - if cls.recursion_level > settings.max_function_recursion_level: - return True - if in_execution_funcs and \ - len(cls.execution_funcs) > settings.max_until_execution_unique: + if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit: return True - if cls.execution_count > settings.max_executions_without_builtins: + self._funcdef_execution_counts[funcdef] += 1 + + if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit: return True return False diff --git a/pythonFiles/release/jedi/evaluate/representation.py b/pythonFiles/release/jedi/evaluate/representation.py deleted file mode 100755 index 3cfcaa9b5564..000000000000 --- a/pythonFiles/release/jedi/evaluate/representation.py +++ /dev/null @@ -1,857 +0,0 @@ -""" -Like described in the :mod:`jedi.parser.tree` module, -there's a need for an ast like module to represent the states of parsed -modules. - -But now there are also structures in Python that need a little bit more than -that. An ``Instance`` for example is only a ``Class`` before it is -instantiated. This class represents these cases. - -So, why is there also a ``Class`` class here? Well, there are decorators and -they change classes in Python 3. - -Representation modules also define "magic methods". Those methods look like -``py__foo__`` and are typically mappable to the Python equivalents ``__call__`` -and others. Here's a list: - -====================================== ======================================== -**Method** **Description** --------------------------------------- ---------------------------------------- -py__call__(evaluator, params: Array) On callable objects, returns types. -py__bool__() Returns True/False/None; None means that - there's no certainty. -py__bases__(evaluator) Returns a list of base classes. -py__mro__(evaluator) Returns a list of classes (the mro). -py__getattribute__(evaluator, name) Returns a list of attribute values. The - name can be str or Name. -====================================== ======================================== - -__ -""" -import os -import pkgutil -import imp -import re -from itertools import chain - -from jedi._compatibility import use_metaclass, unicode, Python3Method -from jedi.parser import tree -from jedi import debug -from jedi import common -from jedi.cache import underscore_memoization, cache_star_import -from jedi.evaluate.cache import memoize_default, CachedMetaClass, NO_DEFAULT -from jedi.evaluate import compiled -from jedi.evaluate import recursion -from jedi.evaluate import iterable -from jedi.evaluate import docstrings -from jedi.evaluate import helpers -from jedi.evaluate import param -from jedi.evaluate import flow_analysis -from jedi.evaluate import imports - - -class Executed(tree.Base): - """ - An instance is also an executable - because __init__ is called - :param var_args: The param input array, consist of a parser node or a list. - """ - def __init__(self, evaluator, base, var_args=()): - self._evaluator = evaluator - self.base = base - self.var_args = var_args - - def is_scope(self): - return True - - def get_parent_until(self, *args, **kwargs): - return tree.Base.get_parent_until(self, *args, **kwargs) - - @common.safe_property - def parent(self): - return self.base.parent - - -class Instance(use_metaclass(CachedMetaClass, Executed)): - """ - This class is used to evaluate instances. - """ - def __init__(self, evaluator, base, var_args, is_generated=False): - super(Instance, self).__init__(evaluator, base, var_args) - self.decorates = None - # Generated instances are classes that are just generated by self - # (No var_args) used. - self.is_generated = is_generated - - if base.name.get_code() in ['list', 'set'] \ - and compiled.builtin == base.get_parent_until(): - # compare the module path with the builtin name. - self.var_args = iterable.check_array_instances(evaluator, self) - elif not is_generated: - # Need to execute the __init__ function, because the dynamic param - # searching needs it. - try: - method = self.get_subscope_by_name('__init__') - except KeyError: - pass - else: - evaluator.execute(method, self.var_args) - - @property - def py__call__(self): - def actual(evaluator, params): - return evaluator.execute(method, params) - - try: - method = self.get_subscope_by_name('__call__') - except KeyError: - # Means the Instance is not callable. - raise AttributeError - - return actual - - def py__class__(self, evaluator): - return self.base - - def py__bool__(self): - # Signalize that we don't know about the bool type. - return None - - @memoize_default() - def _get_method_execution(self, func): - func = get_instance_el(self._evaluator, self, func, True) - return FunctionExecution(self._evaluator, func, self.var_args) - - def _get_func_self_name(self, func): - """ - Returns the name of the first param in a class method (which is - normally self. - """ - try: - return str(func.params[0].name) - except IndexError: - return None - - def _self_names_dict(self, add_mro=True): - names = {} - # This loop adds the names of the self object, copies them and removes - # the self. - for sub in self.base.subscopes: - if isinstance(sub, tree.Class): - continue - # Get the self name, if there's one. - self_name = self._get_func_self_name(sub) - if self_name is None: - continue - - if sub.name.value == '__init__' and not self.is_generated: - # ``__init__`` is special because the params need are injected - # this way. Therefore an execution is necessary. - if not sub.get_decorators(): - # __init__ decorators should generally just be ignored, - # because to follow them and their self variables is too - # complicated. - sub = self._get_method_execution(sub) - for name_list in sub.names_dict.values(): - for name in name_list: - if name.value == self_name and name.prev_sibling() is None: - trailer = name.next_sibling() - if tree.is_node(trailer, 'trailer') \ - and len(trailer.children) == 2 \ - and trailer.children[0] == '.': - name = trailer.children[1] # After dot. - if name.is_definition(): - arr = names.setdefault(name.value, []) - arr.append(get_instance_el(self._evaluator, self, name)) - return names - - def get_subscope_by_name(self, name): - sub = self.base.get_subscope_by_name(name) - return get_instance_el(self._evaluator, self, sub, True) - - def execute_subscope_by_name(self, name, *args): - method = self.get_subscope_by_name(name) - return self._evaluator.execute_evaluated(method, *args) - - def get_descriptor_returns(self, obj): - """ Throws a KeyError if there's no method. """ - # Arguments in __get__ descriptors are obj, class. - # `method` is the new parent of the array, don't know if that's good. - args = [obj, obj.base] if isinstance(obj, Instance) else [compiled.none_obj, obj] - try: - return self.execute_subscope_by_name('__get__', *args) - except KeyError: - return [self] - - @memoize_default() - def names_dicts(self, search_global): - yield self._self_names_dict() - - for s in self.base.py__mro__(self._evaluator)[1:]: - if not isinstance(s, compiled.CompiledObject): - # Compiled objects don't have `self.` names. - for inst in self._evaluator.execute(s): - yield inst._self_names_dict(add_mro=False) - - for names_dict in self.base.names_dicts(search_global=False, is_instance=True): - yield LazyInstanceDict(self._evaluator, self, names_dict) - - def get_index_types(self, evaluator, index_array): - indexes = iterable.create_indexes_or_slices(self._evaluator, index_array) - if any([isinstance(i, iterable.Slice) for i in indexes]): - # Slice support in Jedi is very marginal, at the moment, so just - # ignore them in case of __getitem__. - # TODO support slices in a more general way. - indexes = [] - - try: - method = self.get_subscope_by_name('__getitem__') - except KeyError: - debug.warning('No __getitem__, cannot access the array.') - return [] - else: - return self._evaluator.execute(method, [iterable.AlreadyEvaluated(indexes)]) - - @property - @underscore_memoization - def name(self): - name = self.base.name - return helpers.FakeName(unicode(name), self, name.start_pos) - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'get_imports', 'type', - 'doc', 'raw_doc']: - raise AttributeError("Instance %s: Don't touch this (%s)!" - % (self, name)) - return getattr(self.base, name) - - def __repr__(self): - dec = '' - if self.decorates is not None: - dec = " decorates " + repr(self.decorates) - return "" % (type(self).__name__, self.base, - self.var_args, dec) - - -class LazyInstanceDict(object): - def __init__(self, evaluator, instance, dct): - self._evaluator = evaluator - self._instance = instance - self._dct = dct - - def __getitem__(self, name): - return [get_instance_el(self._evaluator, self._instance, var, True) - for var in self._dct[name]] - - def values(self): - return [self[key] for key in self._dct] - - -class InstanceName(tree.Name): - def __init__(self, origin_name, parent): - super(InstanceName, self).__init__(tree.zero_position_modifier, - origin_name.value, - origin_name.start_pos) - self._origin_name = origin_name - self.parent = parent - - def is_definition(self): - return self._origin_name.is_definition() - - -def get_instance_el(evaluator, instance, var, is_class_var=False): - """ - Returns an InstanceElement if it makes sense, otherwise leaves the object - untouched. - - Basically having an InstanceElement is context information. That is needed - in quite a lot of cases, which includes Nodes like ``power``, that need to - know where a self name comes from for example. - """ - if isinstance(var, tree.Name): - parent = get_instance_el(evaluator, instance, var.parent, is_class_var) - return InstanceName(var, parent) - elif var.type != 'funcdef' \ - and isinstance(var, (Instance, compiled.CompiledObject, tree.Leaf, - tree.Module, FunctionExecution)): - return var - - var = evaluator.wrap(var) - return InstanceElement(evaluator, instance, var, is_class_var) - - -class InstanceElement(use_metaclass(CachedMetaClass, tree.Base)): - """ - InstanceElement is a wrapper for any object, that is used as an instance - variable (e.g. self.variable or class methods). - """ - def __init__(self, evaluator, instance, var, is_class_var): - self._evaluator = evaluator - self.instance = instance - self.var = var - self.is_class_var = is_class_var - - @common.safe_property - @memoize_default() - def parent(self): - par = self.var.parent - if isinstance(par, Class) and par == self.instance.base \ - or isinstance(par, tree.Class) \ - and par == self.instance.base.base: - par = self.instance - else: - par = get_instance_el(self._evaluator, self.instance, par, - self.is_class_var) - return par - - def get_parent_until(self, *args, **kwargs): - return tree.BaseNode.get_parent_until(self, *args, **kwargs) - - def get_definition(self): - return self.get_parent_until((tree.ExprStmt, tree.IsScope, tree.Import)) - - def get_decorated_func(self): - """ Needed because the InstanceElement should not be stripped """ - func = self.var.get_decorated_func() - func = get_instance_el(self._evaluator, self.instance, func) - return func - - def get_rhs(self): - return get_instance_el(self._evaluator, self.instance, - self.var.get_rhs(), self.is_class_var) - - def is_definition(self): - return self.var.is_definition() - - @property - def children(self): - # Copy and modify the array. - return [get_instance_el(self._evaluator, self.instance, command, self.is_class_var) - for command in self.var.children] - - @property - @memoize_default() - def name(self): - name = self.var.name - return helpers.FakeName(unicode(name), self, name.start_pos) - - def __iter__(self): - for el in self.var.__iter__(): - yield get_instance_el(self._evaluator, self.instance, el, - self.is_class_var) - - def __getitem__(self, index): - return get_instance_el(self._evaluator, self.instance, self.var[index], - self.is_class_var) - - def __getattr__(self, name): - return getattr(self.var, name) - - def isinstance(self, *cls): - return isinstance(self.var, cls) - - def is_scope(self): - """ - Since we inherit from Base, it would overwrite the action we want here. - """ - return self.var.is_scope() - - def py__call__(self, evaluator, params): - if isinstance(self.var, compiled.CompiledObject): - # This check is a bit strange, but CompiledObject itself is a bit - # more complicated than we would it actually like to be. - return self.var.py__call__(evaluator, params) - else: - return Function.py__call__(self, evaluator, params) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.var) - - -class Wrapper(tree.Base): - def is_scope(self): - return True - - def is_class(self): - return False - - def py__bool__(self): - """ - Since Wrapper is a super class for classes, functions and modules, - the return value will always be true. - """ - return True - - @property - @underscore_memoization - def name(self): - name = self.base.name - return helpers.FakeName(unicode(name), self, name.start_pos) - - -class Class(use_metaclass(CachedMetaClass, Wrapper)): - """ - This class is not only important to extend `tree.Class`, it is also a - important for descriptors (if the descriptor methods are evaluated or not). - """ - def __init__(self, evaluator, base): - self._evaluator = evaluator - self.base = base - - @memoize_default(default=()) - def py__mro__(self, evaluator): - def add(cls): - if cls not in mro: - mro.append(cls) - - mro = [self] - # TODO Do a proper mro resolution. Currently we are just listing - # classes. However, it's a complicated algorithm. - for cls in self.py__bases__(self._evaluator): - # TODO detect for TypeError: duplicate base class str, - # e.g. `class X(str, str): pass` - try: - mro_method = cls.py__mro__ - except AttributeError: - # TODO add a TypeError like: - """ - >>> class Y(lambda: test): pass - Traceback (most recent call last): - File "", line 1, in - TypeError: function() argument 1 must be code, not str - >>> class Y(1): pass - Traceback (most recent call last): - File "", line 1, in - TypeError: int() takes at most 2 arguments (3 given) - """ - pass - else: - add(cls) - for cls_new in mro_method(evaluator): - add(cls_new) - return tuple(mro) - - @memoize_default(default=()) - def py__bases__(self, evaluator): - arglist = self.base.get_super_arglist() - if arglist: - args = param.Arguments(self._evaluator, arglist) - return list(chain.from_iterable(args.eval_args())) - else: - return [compiled.object_obj] - - def py__call__(self, evaluator, params): - return [Instance(evaluator, self, params)] - - def py__getattribute__(self, name): - return self._evaluator.find_types(self, name) - - @property - def params(self): - return self.get_subscope_by_name('__init__').params - - def names_dicts(self, search_global, is_instance=False): - if search_global: - yield self.names_dict - else: - for scope in self.py__mro__(self._evaluator): - if isinstance(scope, compiled.CompiledObject): - yield scope.names_dicts(False, is_instance)[0] - else: - yield scope.names_dict - - def is_class(self): - return True - - def get_subscope_by_name(self, name): - for s in self.py__mro__(self._evaluator): - for sub in reversed(s.subscopes): - if sub.name.value == name: - return sub - raise KeyError("Couldn't find subscope.") - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'parent', 'raw_doc', - 'doc', 'get_imports', 'get_parent_until', 'get_code', - 'subscopes', 'names_dict', 'type']: - raise AttributeError("Don't touch this: %s of %s !" % (name, self)) - return getattr(self.base, name) - - def __repr__(self): - return "" % (type(self).__name__, self.base) - - -class Function(use_metaclass(CachedMetaClass, Wrapper)): - """ - Needed because of decorators. Decorators are evaluated here. - """ - def __init__(self, evaluator, func, is_decorated=False): - """ This should not be called directly """ - self._evaluator = evaluator - self.base = self.base_func = func - self.is_decorated = is_decorated - # A property that is set by the decorator resolution. - self.decorates = None - - @memoize_default() - def get_decorated_func(self): - """ - Returns the function, that should to be executed in the end. - This is also the places where the decorators are processed. - """ - f = self.base_func - decorators = self.base_func.get_decorators() - - if not decorators or self.is_decorated: - return self - - # Only enter it, if has not already been processed. - if not self.is_decorated: - for dec in reversed(decorators): - debug.dbg('decorator: %s %s', dec, f) - dec_results = self._evaluator.eval_element(dec.children[1]) - trailer = dec.children[2:-1] - if trailer: - # Create a trailer and evaluate it. - trailer = tree.Node('trailer', trailer) - trailer.parent = dec - dec_results = self._evaluator.eval_trailer(dec_results, trailer) - - if not len(dec_results): - debug.warning('decorator not found: %s on %s', dec, self.base_func) - return self - decorator = dec_results.pop() - if dec_results: - debug.warning('multiple decorators found %s %s', - self.base_func, dec_results) - - # Create param array. - if isinstance(f, Function): - old_func = f # TODO this is just hacky. change. - else: - old_func = Function(self._evaluator, f, is_decorated=True) - - wrappers = self._evaluator.execute_evaluated(decorator, old_func) - if not len(wrappers): - debug.warning('no wrappers found %s', self.base_func) - return self - if len(wrappers) > 1: - # TODO resolve issue with multiple wrappers -> multiple types - debug.warning('multiple wrappers found %s %s', - self.base_func, wrappers) - f = wrappers[0] - if isinstance(f, (Instance, Function)): - f.decorates = self - - debug.dbg('decorator end %s', f) - return f - - def names_dicts(self, search_global): - if search_global: - yield self.names_dict - else: - for names_dict in compiled.magic_function_class.names_dicts(False): - yield names_dict - - @Python3Method - def py__call__(self, evaluator, params): - if self.base.is_generator(): - return [iterable.Generator(evaluator, self, params)] - else: - return FunctionExecution(evaluator, self, params).get_return_types() - - def __getattr__(self, name): - return getattr(self.base_func, name) - - def __repr__(self): - dec = '' - if self.decorates is not None: - dec = " decorates " + repr(self.decorates) - return "" % (type(self).__name__, self.base_func, dec) - - -class LambdaWrapper(Function): - def get_decorated_func(self): - return self - - -class FunctionExecution(Executed): - """ - This class is used to evaluate functions and their returns. - - This is the most complicated class, because it contains the logic to - transfer parameters. It is even more complicated, because there may be - multiple calls to functions and recursion has to be avoided. But this is - responsibility of the decorators. - """ - type = 'funcdef' - - def __init__(self, evaluator, base, *args, **kwargs): - super(FunctionExecution, self).__init__(evaluator, base, *args, **kwargs) - self._copy_dict = {} - new_func = helpers.deep_ast_copy(base.base_func, self, self._copy_dict) - self.children = new_func.children - self.names_dict = new_func.names_dict - - @memoize_default(default=()) - @recursion.execution_recursion_decorator - def get_return_types(self, check_yields=False): - func = self.base - - if func.isinstance(LambdaWrapper): - return self._evaluator.eval_element(self.children[-1]) - - if func.listeners: - # Feed the listeners, with the params. - for listener in func.listeners: - listener.execute(self._get_params()) - # If we do have listeners, that means that there's not a regular - # execution ongoing. In this case Jedi is interested in the - # inserted params, not in the actual execution of the function. - return [] - - if check_yields: - types = [] - returns = self.yields - else: - returns = self.returns - types = list(docstrings.find_return_types(self._evaluator, func)) - - for r in returns: - check = flow_analysis.break_check(self._evaluator, self, r) - if check is flow_analysis.UNREACHABLE: - debug.dbg('Return unreachable: %s', r) - else: - types += self._evaluator.eval_element(r.children[1]) - if check is flow_analysis.REACHABLE: - debug.dbg('Return reachable: %s', r) - break - return types - - def names_dicts(self, search_global): - yield self.names_dict - - @memoize_default(default=NO_DEFAULT) - def _get_params(self): - """ - This returns the params for an TODO and is injected as a - 'hack' into the tree.Function class. - This needs to be here, because Instance can have __init__ functions, - which act the same way as normal functions. - """ - return param.get_params(self._evaluator, self.base, self.var_args) - - def param_by_name(self, name): - return [n for n in self._get_params() if str(n) == name][0] - - def name_for_position(self, position): - return tree.Function.name_for_position(self, position) - - def _copy_list(self, lst): - """ - Copies a list attribute of a parser Function. Copying is very - expensive, because it is something like `copy.deepcopy`. However, these - copied objects can be used for the executions, as if they were in the - execution. - """ - objects = [] - for element in lst: - self._scope_copy(element.parent) - copied = helpers.deep_ast_copy(element, self._copy_dict) - objects.append(copied) - return objects - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'imports', 'name', 'type']: - raise AttributeError('Tried to access %s: %s. Why?' % (name, self)) - return getattr(self.base, name) - - def _scope_copy(self, scope): - raise NotImplementedError - """ Copies a scope (e.g. `if foo:`) in an execution """ - if scope != self.base.base_func: - # Just make sure the parents been copied. - self._scope_copy(scope.parent) - helpers.deep_ast_copy(scope, self._copy_dict) - - @common.safe_property - @memoize_default([]) - def returns(self): - return tree.Scope._search_in_scope(self, tree.ReturnStmt) - - @common.safe_property - @memoize_default([]) - def yields(self): - return tree.Scope._search_in_scope(self, tree.YieldExpr) - - @common.safe_property - @memoize_default([]) - def statements(self): - return tree.Scope._search_in_scope(self, tree.ExprStmt) - - @common.safe_property - @memoize_default([]) - def subscopes(self): - return tree.Scope._search_in_scope(self, tree.Scope) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.base) - - -class GlobalName(helpers.FakeName): - def __init__(self, name): - """ - We need to mark global names somehow. Otherwise they are just normal - names that are not definitions. - """ - super(GlobalName, self).__init__(name.value, name.parent, - name.start_pos, is_definition=True) - - -class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)): - def __init__(self, evaluator, module): - self._evaluator = evaluator - self.base = self._module = module - - def names_dicts(self, search_global): - yield self.base.names_dict - yield self._module_attributes_dict() - - for star_module in self.star_imports(): - yield star_module.names_dict - - yield dict((str(n), [GlobalName(n)]) for n in self.base.global_names) - yield self._sub_modules_dict() - - # I'm not sure if the star import cache is really that effective anymore - # with all the other really fast import caches. Recheck. Also we would need - # to push the star imports into Evaluator.modules, if we reenable this. - #@cache_star_import - @memoize_default([]) - def star_imports(self): - modules = [] - for i in self.base.imports: - if i.is_star_import(): - name = i.star_import_name() - new = imports.ImportWrapper(self._evaluator, name).follow() - for module in new: - if isinstance(module, tree.Module): - modules += module.star_imports() - modules += new - return modules - - @memoize_default() - def _module_attributes_dict(self): - def parent_callback(): - return self._evaluator.execute(compiled.create(self._evaluator, str))[0] - - names = ['__file__', '__package__', '__doc__', '__name__'] - # All the additional module attributes are strings. - return dict((n, [helpers.LazyName(n, parent_callback, is_definition=True)]) - for n in names) - - @property - @memoize_default() - def name(self): - return helpers.FakeName(unicode(self.base.name), self, (1, 0)) - - def _get_init_directory(self): - for suffix, _, _ in imp.get_suffixes(): - ending = '__init__' + suffix - if self.py__file__().endswith(ending): - # Remove the ending, including the separator. - return self.py__file__()[:-len(ending) - 1] - return None - - def py__name__(self): - for name, module in self._evaluator.modules.items(): - if module == self: - return name - - return '__main__' - - def py__file__(self): - """ - In contrast to Python's __file__ can be None. - """ - if self._module.path is None: - return None - - return os.path.abspath(self._module.path) - - def py__package__(self): - if self._get_init_directory() is None: - return re.sub(r'\.?[^\.]+$', '', self.py__name__()) - else: - return self.py__name__() - - @property - def py__path__(self): - """ - Not seen here, since it's a property. The callback actually uses a - variable, so use it like:: - - foo.py__path__(sys_path) - - In case of a package, this returns Python's __path__ attribute, which - is a list of paths (strings). - Raises an AttributeError if the module is not a package. - """ - def return_value(search_path): - init_path = self.py__file__() - if os.path.basename(init_path) == '__init__.py': - - with open(init_path, 'rb') as f: - content = common.source_to_unicode(f.read()) - # these are strings that need to be used for namespace packages, - # the first one is ``pkgutil``, the second ``pkg_resources``. - options = ('declare_namespace(__name__)', 'extend_path(__path__') - if options[0] in content or options[1] in content: - # It is a namespace, now try to find the rest of the - # modules on sys_path or whatever the search_path is. - paths = set() - for s in search_path: - other = os.path.join(s, unicode(self.name)) - if os.path.isdir(other): - paths.add(other) - return list(paths) - # Default to this. - return [path] - - path = self._get_init_directory() - - if path is None: - raise AttributeError('Only packages have __path__ attributes.') - else: - return return_value - - @memoize_default() - def _sub_modules_dict(self): - """ - Lists modules in the directory of this module (if this module is a - package). - """ - path = self._module.path - names = {} - if path is not None and path.endswith(os.path.sep + '__init__.py'): - mods = pkgutil.iter_modules([os.path.dirname(path)]) - for module_loader, name, is_pkg in mods: - fake_n = helpers.FakeName(name) - # It's obviously a relative import to the current module. - imp = helpers.FakeImport(fake_n, self, level=1) - fake_n.parent = imp - names[name] = [fake_n] - - # TODO add something like this in the future, its cleaner than the - # import hacks. - # ``os.path`` is a hardcoded exception, because it's a - # ``sys.modules`` modification. - #if str(self.name) == 'os': - # names.append(helpers.FakeName('path', parent=self)) - - return names - - def __getattr__(self, name): - return getattr(self._module, name) - - def __repr__(self): - return "<%s: %s>" % (type(self).__name__, self._module) diff --git a/pythonFiles/release/jedi/evaluate/site.py b/pythonFiles/release/jedi/evaluate/site.py new file mode 100644 index 000000000000..bf884faefaaf --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/site.py @@ -0,0 +1,110 @@ +"""An adapted copy of relevant site-packages functionality from Python stdlib. + +This file contains some functions related to handling site-packages in Python +with jedi-specific modifications: + +- the functions operate on sys_path argument rather than global sys.path + +- in .pth files "import ..." lines that allow execution of arbitrary code are + skipped to prevent code injection into jedi interpreter + +""" + +# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved + +from __future__ import print_function + +import sys +import os + + +def makepath(*paths): + dir = os.path.join(*paths) + try: + dir = os.path.abspath(dir) + except OSError: + pass + return dir, os.path.normcase(dir) + + +def _init_pathinfo(sys_path): + """Return a set containing all existing directory entries from sys_path""" + d = set() + for dir in sys_path: + try: + if os.path.isdir(dir): + dir, dircase = makepath(dir) + d.add(dircase) + except TypeError: + continue + return d + + +def addpackage(sys_path, sitedir, name, known_paths): + """Process a .pth file within the site-packages directory: + For each line in the file, either combine it with sitedir to a path + and add that to known_paths, or execute it if it starts with 'import '. + """ + if known_paths is None: + known_paths = _init_pathinfo(sys_path) + reset = 1 + else: + reset = 0 + fullname = os.path.join(sitedir, name) + try: + f = open(fullname, "r") + except OSError: + return + with f: + for n, line in enumerate(f): + if line.startswith("#"): + continue + try: + if line.startswith(("import ", "import\t")): + # Change by immerrr: don't evaluate import lines to prevent + # code injection into jedi through pth files. + # + # exec(line) + continue + line = line.rstrip() + dir, dircase = makepath(sitedir, line) + if not dircase in known_paths and os.path.exists(dir): + sys_path.append(dir) + known_paths.add(dircase) + except Exception: + print("Error processing line {:d} of {}:\n".format(n+1, fullname), + file=sys.stderr) + import traceback + for record in traceback.format_exception(*sys.exc_info()): + for line in record.splitlines(): + print(' '+line, file=sys.stderr) + print("\nRemainder of file ignored", file=sys.stderr) + break + if reset: + known_paths = None + return known_paths + + +def addsitedir(sys_path, sitedir, known_paths=None): + """Add 'sitedir' argument to sys_path if missing and handle .pth files in + 'sitedir'""" + if known_paths is None: + known_paths = _init_pathinfo(sys_path) + reset = 1 + else: + reset = 0 + sitedir, sitedircase = makepath(sitedir) + if not sitedircase in known_paths: + sys_path.append(sitedir) # Add path component + known_paths.add(sitedircase) + try: + names = os.listdir(sitedir) + except OSError: + return + names = [name for name in names if name.endswith(".pth")] + for name in sorted(names): + addpackage(sys_path, sitedir, name, known_paths) + if reset: + known_paths = None + return known_paths diff --git a/pythonFiles/release/jedi/evaluate/stdlib.py b/pythonFiles/release/jedi/evaluate/stdlib.py index 20ff5d51aa86..259d692733db 100755 --- a/pythonFiles/release/jedi/evaluate/stdlib.py +++ b/pythonFiles/release/jedi/evaluate/stdlib.py @@ -5,56 +5,85 @@ To add a new implementation, create a function and add it to the ``_implemented`` dict at the bottom of this module. +Note that this module exists only to implement very specific functionality in +the standard library. The usual way to understand the standard library is the +compiled module that returns the types for C-builtins. """ import collections import re -from jedi._compatibility import unicode -from jedi.evaluate import compiled -from jedi.evaluate import representation as er -from jedi.evaluate import iterable -from jedi.parser import Parser -from jedi.parser import tree from jedi import debug -from jedi.evaluate import precedence -from jedi.evaluate import param +from jedi.evaluate.arguments import ValuesArguments +from jedi.evaluate import analysis +from jedi.evaluate import compiled +from jedi.evaluate.context.instance import InstanceFunctionExecution, \ + AbstractInstanceContext, CompiledInstance, BoundMethod, \ + AnonymousInstanceFunctionExecution +from jedi.evaluate.base_context import ContextualizedNode, \ + NO_CONTEXTS, ContextSet +from jedi.evaluate.context import ClassContext, ModuleContext +from jedi.evaluate.context import iterable +from jedi.evaluate.lazy_context import LazyTreeContext +from jedi.evaluate.syntax_tree import is_string + +# Now this is all part of fake tuples in Jedi. However super doesn't work on +# __init__ and __new__ doesn't work at all. So adding this to nametuples is +# just the easiest way. +_NAMEDTUPLE_INIT = """ + def __init__(_cls, {arg_list}): + 'A helper function for namedtuple.' + self.__iterable = ({arg_list}) + + def __iter__(self): + for i in self.__iterable: + yield i + + def __getitem__(self, y): + return self.__iterable[y] + +""" class NotInStdLib(LookupError): pass -def execute(evaluator, obj, params): +def execute(evaluator, obj, arguments): + if isinstance(obj, BoundMethod): + raise NotInStdLib() + try: - obj_name = str(obj.name) + obj_name = obj.name.string_name except AttributeError: pass else: - if obj.parent == compiled.builtin: + if obj.parent_context == evaluator.BUILTINS: module_name = 'builtins' - elif isinstance(obj.parent, tree.Module): - module_name = str(obj.parent.name) + elif isinstance(obj.parent_context, ModuleContext): + module_name = obj.parent_context.name.string_name else: module_name = '' # for now we just support builtin functions. try: - return _implemented[module_name][obj_name](evaluator, obj, params) + func = _implemented[module_name][obj_name] except KeyError: pass + else: + return func(evaluator, obj, arguments) raise NotInStdLib() -def _follow_param(evaluator, params, index): +def _follow_param(evaluator, arguments, index): try: - key, values = list(params.unpack())[index] + key, lazy_context = list(arguments.unpack())[index] except IndexError: - return [] + return NO_CONTEXTS else: - return iterable.unite(evaluator.eval_element(v) for v in values) + return lazy_context.infer() -def argument_clinic(string, want_obj=False, want_scope=False): +def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False): """ Works like Argument Clinic (PEP 436), to validate function params. """ @@ -77,116 +106,150 @@ def argument_clinic(string, want_obj=False, want_scope=False): def f(func): def wrapper(evaluator, obj, arguments): + debug.dbg('builtin start %s' % obj, color='MAGENTA') try: lst = list(arguments.eval_argument_clinic(clinic_args)) except ValueError: - return [] + return NO_CONTEXTS else: kwargs = {} - if want_scope: - kwargs['scope'] = arguments.scope() + if want_context: + kwargs['context'] = arguments.context if want_obj: kwargs['obj'] = obj + if want_arguments: + kwargs['arguments'] = arguments return func(evaluator, *lst, **kwargs) + finally: + debug.dbg('builtin end', color='MAGENTA') return wrapper return f +@argument_clinic('iterator[, default], /') +def builtins_next(evaluator, iterators, defaults): + """ + TODO this function is currently not used. It's a stab at implementing next + in a different way than fake objects. This would be a bit more flexible. + """ + if evaluator.python_version[0] == 2: + name = 'next' + else: + name = '__next__' + + context_set = NO_CONTEXTS + for iterator in iterators: + if isinstance(iterator, AbstractInstanceContext): + context_set = ContextSet.from_sets( + n.infer() + for filter in iterator.get_filters(include_self_names=True) + for n in filter.get(name) + ).execute_evaluated() + if context_set: + return context_set + return defaults + + @argument_clinic('object, name[, default], /') def builtins_getattr(evaluator, objects, names, defaults=None): - types = [] # follow the first param for obj in objects: - if not isinstance(obj, (er.Instance, er.Class, tree.Module, compiled.CompiledObject)): - debug.warning('getattr called without instance') - continue - for name in names: - if precedence.is_string(name): - return evaluator.find_types(obj, name.obj) + if is_string(name): + return obj.py__getattribute__(name.obj) else: debug.warning('getattr called without str') continue - return types + return NO_CONTEXTS @argument_clinic('object[, bases, dict], /') def builtins_type(evaluator, objects, bases, dicts): if bases or dicts: - # metaclass... maybe someday... - return [] + # It's a type creation... maybe someday... + return NO_CONTEXTS else: - return [o.base for o in objects if isinstance(o, er.Instance)] + return objects.py__class__() -class SuperInstance(er.Instance): +class SuperInstance(AbstractInstanceContext): """To be used like the object ``super`` returns.""" def __init__(self, evaluator, cls): su = cls.py_mro()[1] super().__init__(evaluator, su and su[0] or self) -@argument_clinic('[type[, obj]], /', want_scope=True) -def builtins_super(evaluator, types, objects, scope): +@argument_clinic('[type[, obj]], /', want_context=True) +def builtins_super(evaluator, types, objects, context): # TODO make this able to detect multiple inheritance super - accept = (tree.Function, er.FunctionExecution) - if scope.isinstance(*accept): - wanted = (tree.Class, er.Instance) - cls = scope.get_parent_until(accept + wanted, - include_current=False) - if isinstance(cls, wanted): - if isinstance(cls, tree.Class): - cls = er.Class(evaluator, cls) - elif isinstance(cls, er.Instance): - cls = cls.base - su = cls.py__bases__(evaluator) - if su: - return evaluator.execute(su[0]) - return [] - - -@argument_clinic('sequence, /', want_obj=True) -def builtins_reversed(evaluator, sequences, obj): - # Unpack the iterator values - objects = tuple(iterable.get_iterator_types(sequences)) - rev = [iterable.AlreadyEvaluated([o]) for o in reversed(objects)] + if isinstance(context, (InstanceFunctionExecution, + AnonymousInstanceFunctionExecution)): + su = context.instance.py__class__().py__bases__() + return su[0].infer().execute_evaluated() + return NO_CONTEXTS + + +@argument_clinic('sequence, /', want_obj=True, want_arguments=True) +def builtins_reversed(evaluator, sequences, obj, arguments): + # While we could do without this variable (just by using sequences), we + # want static analysis to work well. Therefore we need to generated the + # values again. + key, lazy_context = next(arguments.unpack()) + cn = None + if isinstance(lazy_context, LazyTreeContext): + # TODO access private + cn = ContextualizedNode(lazy_context._context, lazy_context.data) + ordered = list(sequences.iterate(cn)) + + rev = list(reversed(ordered)) # Repack iterator values and then run it the normal way. This is # necessary, because `reversed` is a function and autocompletion # would fail in certain cases like `reversed(x).__iter__` if we # just returned the result directly. - rev = iterable.AlreadyEvaluated( - [iterable.FakeSequence(evaluator, rev, 'list')] - ) - return [er.Instance(evaluator, obj, param.Arguments(evaluator, [rev]))] + seq = iterable.FakeSequence(evaluator, 'list', rev) + arguments = ValuesArguments([ContextSet(seq)]) + return ContextSet(CompiledInstance(evaluator, evaluator.BUILTINS, obj, arguments)) -@argument_clinic('obj, type, /') -def builtins_isinstance(evaluator, objects, types): - bool_results = set([]) +@argument_clinic('obj, type, /', want_arguments=True) +def builtins_isinstance(evaluator, objects, types, arguments): + bool_results = set() for o in objects: try: - mro_func = o.py__class__(evaluator).py__mro__ + mro_func = o.py__class__().py__mro__ except AttributeError: # This is temporary. Everything should have a class attribute in # Python?! Maybe we'll leave it here, because some numpy objects or # whatever might not. - return [compiled.true_obj, compiled.false_obj] + return ContextSet(compiled.create(True), compiled.create(False)) - mro = mro_func(evaluator) + mro = mro_func() for cls_or_tup in types: if cls_or_tup.is_class(): bool_results.add(cls_or_tup in mro) - else: + elif cls_or_tup.name.string_name == 'tuple' \ + and cls_or_tup.get_root_context() == evaluator.BUILTINS: # Check for tuples. - classes = iterable.get_iterator_types([cls_or_tup]) + classes = ContextSet.from_sets( + lazy_context.infer() + for lazy_context in cls_or_tup.iterate() + ) bool_results.add(any(cls in mro for cls in classes)) + else: + _, lazy_context = list(arguments.unpack())[1] + if isinstance(lazy_context, LazyTreeContext): + node = lazy_context.data + message = 'TypeError: isinstance() arg 2 must be a ' \ + 'class, type, or tuple of classes and types, ' \ + 'not %s.' % cls_or_tup + analysis.add(lazy_context._context, 'type-error-isinstance', node, message) - return [compiled.keyword_from_value(x) for x in bool_results] + return ContextSet.from_iterable(compiled.create(evaluator, x) for x in bool_results) -def collections_namedtuple(evaluator, obj, params): +def collections_namedtuple(evaluator, obj, arguments): """ Implementation of the namedtuple function. @@ -198,35 +261,41 @@ def collections_namedtuple(evaluator, obj, params): """ # Namedtuples are not supported on Python 2.6 if not hasattr(collections, '_class_template'): - return [] + return NO_CONTEXTS # Process arguments - name = _follow_param(evaluator, params, 0)[0].obj - _fields = _follow_param(evaluator, params, 1)[0] + # TODO here we only use one of the types, we should use all. + name = list(_follow_param(evaluator, arguments, 0))[0].obj + _fields = list(_follow_param(evaluator, arguments, 1))[0] if isinstance(_fields, compiled.CompiledObject): fields = _fields.obj.replace(',', ' ').split() - elif isinstance(_fields, iterable.Array): - try: - fields = [v.obj for v in _fields.values()] - except AttributeError: - return [] + elif isinstance(_fields, iterable.AbstractIterable): + fields = [ + v.obj + for lazy_context in _fields.py__iter__() + for v in lazy_context.infer() if hasattr(v, 'obj') + ] else: - return [] + return NO_CONTEXTS + base = collections._class_template + base += _NAMEDTUPLE_INIT # Build source - source = collections._class_template.format( + source = base.format( typename=name, - field_names=fields, + field_names=tuple(fields), num_fields=len(fields), - arg_list=', '.join(fields), + arg_list = repr(tuple(fields)).replace("'", "")[1:-1], repr_fmt=', '.join(collections._repr_template.format(name=name) for name in fields), field_defs='\n'.join(collections._field_template.format(index=index, name=name) for index, name in enumerate(fields)) ) # Parse source - generated_class = Parser(evaluator.grammar, unicode(source)).module.subscopes[0] - return [er.Class(evaluator, generated_class)] + module = evaluator.grammar.parse(source) + generated_class = next(module.iter_classdefs()) + parent_context = ModuleContext(evaluator, module, '') + return ContextSet(ClassContext(evaluator, parent_context, generated_class)) @argument_clinic('first, /') @@ -247,8 +316,8 @@ def _return_first_param(evaluator, firsts): 'deepcopy': _return_first_param, }, 'json': { - 'load': lambda *args: [], - 'loads': lambda *args: [], + 'load': lambda *args: NO_CONTEXTS, + 'loads': lambda *args: NO_CONTEXTS, }, 'collections': { 'namedtuple': collections_namedtuple, diff --git a/pythonFiles/release/jedi/evaluate/syntax_tree.py b/pythonFiles/release/jedi/evaluate/syntax_tree.py new file mode 100644 index 000000000000..1d847a4960de --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/syntax_tree.py @@ -0,0 +1,588 @@ +""" +Functions evaluating the syntax tree. +""" +import copy +import operator as op + +from parso.python import tree + +from jedi import debug +from jedi import parser_utils +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, ContextualizedNode, \ + ContextualizedName, iterator_to_context_set, iterate_contexts +from jedi.evaluate import compiled +from jedi.evaluate import pep0484 +from jedi.evaluate import recursion +from jedi.evaluate import helpers +from jedi.evaluate import analysis +from jedi.evaluate import imports +from jedi.evaluate import arguments +from jedi.evaluate.context import ClassContext, FunctionContext +from jedi.evaluate.context import iterable +from jedi.evaluate.context import TreeInstance, CompiledInstance +from jedi.evaluate.finder import NameFinder +from jedi.evaluate.helpers import is_string, is_literal, is_number, is_compiled + + +def _limit_context_infers(func): + """ + This is for now the way how we limit type inference going wild. There are + other ways to ensure recursion limits as well. This is mostly necessary + because of instance (self) access that can be quite tricky to limit. + + I'm still not sure this is the way to go, but it looks okay for now and we + can still go anther way in the future. Tests are there. ~ dave + """ + def wrapper(context, *args, **kwargs): + n = context.tree_node + evaluator = context.evaluator + try: + evaluator.inferred_element_counts[n] += 1 + if evaluator.inferred_element_counts[n] > 300: + debug.warning('In context %s there were too many inferences.', n) + return NO_CONTEXTS + except KeyError: + evaluator.inferred_element_counts[n] = 1 + return func(context, *args, **kwargs) + + return wrapper + + +@debug.increase_indent +@_limit_context_infers +def eval_node(context, element): + debug.dbg('eval_element %s@%s', element, element.start_pos) + evaluator = context.evaluator + typ = element.type + if typ in ('name', 'number', 'string', 'atom'): + return eval_atom(context, element) + elif typ == 'keyword': + # For False/True/None + if element.value in ('False', 'True', 'None'): + return ContextSet(compiled.builtin_from_name(evaluator, element.value)) + # else: print e.g. could be evaluated like this in Python 2.7 + return NO_CONTEXTS + elif typ == 'lambdef': + return ContextSet(FunctionContext(evaluator, context, element)) + elif typ == 'expr_stmt': + return eval_expr_stmt(context, element) + elif typ in ('power', 'atom_expr'): + first_child = element.children[0] + if not (first_child.type == 'keyword' and first_child.value == 'await'): + context_set = eval_atom(context, first_child) + for trailer in element.children[1:]: + if trailer == '**': # has a power operation. + right = evaluator.eval_element(context, element.children[2]) + context_set = _eval_comparison( + evaluator, + context, + context_set, + trailer, + right + ) + break + context_set = eval_trailer(context, context_set, trailer) + return context_set + return NO_CONTEXTS + elif typ in ('testlist_star_expr', 'testlist',): + # The implicit tuple in statements. + return ContextSet(iterable.SequenceLiteralContext(evaluator, context, element)) + elif typ in ('not_test', 'factor'): + context_set = context.eval_node(element.children[-1]) + for operator in element.children[:-1]: + context_set = eval_factor(context_set, operator) + return context_set + elif typ == 'test': + # `x if foo else y` case. + return (context.eval_node(element.children[0]) | + context.eval_node(element.children[-1])) + elif typ == 'operator': + # Must be an ellipsis, other operators are not evaluated. + # In Python 2 ellipsis is coded as three single dot tokens, not + # as one token 3 dot token. + assert element.value in ('.', '...') + return ContextSet(compiled.create(evaluator, Ellipsis)) + elif typ == 'dotted_name': + context_set = eval_atom(context, element.children[0]) + for next_name in element.children[2::2]: + # TODO add search_global=True? + context_set = context_set.py__getattribute__(next_name, name_context=context) + return context_set + elif typ == 'eval_input': + return eval_node(context, element.children[0]) + elif typ == 'annassign': + return pep0484._evaluate_for_annotation(context, element.children[1]) + else: + return eval_or_test(context, element) + + +def eval_trailer(context, base_contexts, trailer): + trailer_op, node = trailer.children[:2] + if node == ')': # `arglist` is optional. + node = () + + if trailer_op == '[': + trailer_op, node, _ = trailer.children + + # TODO It's kind of stupid to cast this from a context set to a set. + foo = set(base_contexts) + # special case: PEP0484 typing module, see + # https://github.com/davidhalter/jedi/issues/663 + result = ContextSet() + for typ in list(foo): + if isinstance(typ, (ClassContext, TreeInstance)): + typing_module_types = pep0484.py__getitem__(context, typ, node) + if typing_module_types is not None: + foo.remove(typ) + result |= typing_module_types + + return result | base_contexts.get_item( + eval_subscript_list(context.evaluator, context, node), + ContextualizedNode(context, trailer) + ) + else: + debug.dbg('eval_trailer: %s in %s', trailer, base_contexts) + if trailer_op == '.': + return base_contexts.py__getattribute__( + name_context=context, + name_or_str=node + ) + else: + assert trailer_op == '(' + args = arguments.TreeArguments(context.evaluator, context, node, trailer) + return base_contexts.execute(args) + + +def eval_atom(context, atom): + """ + Basically to process ``atom`` nodes. The parser sometimes doesn't + generate the node (because it has just one child). In that case an atom + might be a name or a literal as well. + """ + if atom.type == 'name': + # This is the first global lookup. + stmt = tree.search_ancestor( + atom, 'expr_stmt', 'lambdef' + ) or atom + if stmt.type == 'lambdef': + stmt = atom + return context.py__getattribute__( + name_or_str=atom, + position=stmt.start_pos, + search_global=True + ) + + elif isinstance(atom, tree.Literal): + string = parser_utils.safe_literal_eval(atom.value) + return ContextSet(compiled.create(context.evaluator, string)) + else: + c = atom.children + if c[0].type == 'string': + # Will be one string. + context_set = eval_atom(context, c[0]) + for string in c[1:]: + right = eval_atom(context, string) + context_set = _eval_comparison(context.evaluator, context, context_set, '+', right) + return context_set + # Parentheses without commas are not tuples. + elif c[0] == '(' and not len(c) == 2 \ + and not(c[1].type == 'testlist_comp' and + len(c[1].children) > 1): + return context.eval_node(c[1]) + + try: + comp_for = c[1].children[1] + except (IndexError, AttributeError): + pass + else: + if comp_for == ':': + # Dict comprehensions have a colon at the 3rd index. + try: + comp_for = c[1].children[3] + except IndexError: + pass + + if comp_for.type == 'comp_for': + return ContextSet(iterable.Comprehension.from_atom(context.evaluator, context, atom)) + + # It's a dict/list/tuple literal. + array_node = c[1] + try: + array_node_c = array_node.children + except AttributeError: + array_node_c = [] + if c[0] == '{' and (array_node == '}' or ':' in array_node_c): + context = iterable.DictLiteralContext(context.evaluator, context, atom) + else: + context = iterable.SequenceLiteralContext(context.evaluator, context, atom) + return ContextSet(context) + + +@_limit_context_infers +def eval_expr_stmt(context, stmt, seek_name=None): + with recursion.execution_allowed(context.evaluator, stmt) as allowed: + if allowed or context.get_root_context() == context.evaluator.BUILTINS: + return _eval_expr_stmt(context, stmt, seek_name) + return NO_CONTEXTS + + +@debug.increase_indent +def _eval_expr_stmt(context, stmt, seek_name=None): + """ + The starting point of the completion. A statement always owns a call + list, which are the calls, that a statement does. In case multiple + names are defined in the statement, `seek_name` returns the result for + this name. + + :param stmt: A `tree.ExprStmt`. + """ + debug.dbg('eval_expr_stmt %s (%s)', stmt, seek_name) + rhs = stmt.get_rhs() + context_set = context.eval_node(rhs) + + if seek_name: + c_node = ContextualizedName(context, seek_name) + context_set = check_tuple_assignments(context.evaluator, c_node, context_set) + + first_operator = next(stmt.yield_operators(), None) + if first_operator not in ('=', None) and first_operator.type == 'operator': + # `=` is always the last character in aug assignments -> -1 + operator = copy.copy(first_operator) + operator.value = operator.value[:-1] + name = stmt.get_defined_names()[0].value + left = context.py__getattribute__( + name, position=stmt.start_pos, search_global=True) + + for_stmt = tree.search_ancestor(stmt, 'for_stmt') + if for_stmt is not None and for_stmt.type == 'for_stmt' and context_set \ + and parser_utils.for_stmt_defines_one_name(for_stmt): + # Iterate through result and add the values, that's possible + # only in for loops without clutter, because they are + # predictable. Also only do it, if the variable is not a tuple. + node = for_stmt.get_testlist() + cn = ContextualizedNode(context, node) + ordered = list(cn.infer().iterate(cn)) + + for lazy_context in ordered: + dct = {for_stmt.children[1].value: lazy_context.infer()} + with helpers.predefine_names(context, for_stmt, dct): + t = context.eval_node(rhs) + left = _eval_comparison(context.evaluator, context, left, operator, t) + context_set = left + else: + context_set = _eval_comparison(context.evaluator, context, left, operator, context_set) + debug.dbg('eval_expr_stmt result %s', context_set) + return context_set + + +def eval_or_test(context, or_test): + iterator = iter(or_test.children) + types = context.eval_node(next(iterator)) + for operator in iterator: + right = next(iterator) + if operator.type == 'comp_op': # not in / is not + operator = ' '.join(c.value for c in operator.children) + + # handle lazy evaluation of and/or here. + if operator in ('and', 'or'): + left_bools = set(left.py__bool__() for left in types) + if left_bools == set([True]): + if operator == 'and': + types = context.eval_node(right) + elif left_bools == set([False]): + if operator != 'and': + types = context.eval_node(right) + # Otherwise continue, because of uncertainty. + else: + types = _eval_comparison(context.evaluator, context, types, operator, + context.eval_node(right)) + debug.dbg('eval_or_test types %s', types) + return types + + +@iterator_to_context_set +def eval_factor(context_set, operator): + """ + Calculates `+`, `-`, `~` and `not` prefixes. + """ + for context in context_set: + if operator == '-': + if is_number(context): + yield compiled.create(context.evaluator, -context.obj) + elif operator == 'not': + value = context.py__bool__() + if value is None: # Uncertainty. + return + yield compiled.create(context.evaluator, not value) + else: + yield context + + +# Maps Python syntax to the operator module. +COMPARISON_OPERATORS = { + '==': op.eq, + '!=': op.ne, + 'is': op.is_, + 'is not': op.is_not, + '<': op.lt, + '<=': op.le, + '>': op.gt, + '>=': op.ge, +} + + +def _literals_to_types(evaluator, result): + # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), + # int(), float(), etc). + new_result = NO_CONTEXTS + for typ in result: + if is_literal(typ): + # Literals are only valid as long as the operations are + # correct. Otherwise add a value-free instance. + cls = compiled.builtin_from_name(evaluator, typ.name.string_name) + new_result |= cls.execute_evaluated() + else: + new_result |= ContextSet(typ) + return new_result + + +def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts): + if not left_contexts or not right_contexts: + # illegal slices e.g. cause left/right_result to be None + result = (left_contexts or NO_CONTEXTS) | (right_contexts or NO_CONTEXTS) + return _literals_to_types(evaluator, result) + else: + # I don't think there's a reasonable chance that a string + # operation is still correct, once we pass something like six + # objects. + if len(left_contexts) * len(right_contexts) > 6: + return _literals_to_types(evaluator, left_contexts | right_contexts) + else: + return ContextSet.from_sets( + _eval_comparison_part(evaluator, context, left, operator, right) + for left in left_contexts + for right in right_contexts + ) + + +def _is_tuple(context): + return isinstance(context, iterable.AbstractIterable) and context.array_type == 'tuple' + + +def _is_list(context): + return isinstance(context, iterable.AbstractIterable) and context.array_type == 'list' + + +def _eval_comparison_part(evaluator, context, left, operator, right): + l_is_num = is_number(left) + r_is_num = is_number(right) + if operator == '*': + # for iterables, ignore * operations + if isinstance(left, iterable.AbstractIterable) or is_string(left): + return ContextSet(left) + elif isinstance(right, iterable.AbstractIterable) or is_string(right): + return ContextSet(right) + elif operator == '+': + if l_is_num and r_is_num or is_string(left) and is_string(right): + return ContextSet(compiled.create(evaluator, left.obj + right.obj)) + elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): + return ContextSet(iterable.MergedArray(evaluator, (left, right))) + elif operator == '-': + if l_is_num and r_is_num: + return ContextSet(compiled.create(evaluator, left.obj - right.obj)) + elif operator == '%': + # With strings and numbers the left type typically remains. Except for + # `int() % float()`. + return ContextSet(left) + elif operator in COMPARISON_OPERATORS: + operation = COMPARISON_OPERATORS[operator] + if is_compiled(left) and is_compiled(right): + # Possible, because the return is not an option. Just compare. + left = left.obj + right = right.obj + + try: + result = operation(left, right) + except TypeError: + # Could be True or False. + return ContextSet(compiled.create(evaluator, True), compiled.create(evaluator, False)) + else: + return ContextSet(compiled.create(evaluator, result)) + elif operator == 'in': + return NO_CONTEXTS + + def check(obj): + """Checks if a Jedi object is either a float or an int.""" + return isinstance(obj, CompiledInstance) and \ + obj.name.string_name in ('int', 'float') + + # Static analysis, one is a number, the other one is not. + if operator in ('+', '-') and l_is_num != r_is_num \ + and not (check(left) or check(right)): + message = "TypeError: unsupported operand type(s) for +: %s and %s" + analysis.add(context, 'type-error-operation', operator, + message % (left, right)) + + return ContextSet(left, right) + + +def _remove_statements(evaluator, context, stmt, name): + """ + This is the part where statements are being stripped. + + Due to lazy evaluation, statements like a = func; b = a; b() have to be + evaluated. + """ + pep0484_contexts = \ + pep0484.find_type_from_comment_hint_assign(context, stmt, name) + if pep0484_contexts: + return pep0484_contexts + + return eval_expr_stmt(context, stmt, seek_name=name) + + +def tree_name_to_contexts(evaluator, context, tree_name): + types = [] + node = tree_name.get_definition(import_name_always=True) + if node is None: + node = tree_name.parent + if node.type == 'global_stmt': + context = evaluator.create_context(context, tree_name) + finder = NameFinder(evaluator, context, context, tree_name.value) + filters = finder.get_filters(search_global=True) + # For global_stmt lookups, we only need the first possible scope, + # which means the function itself. + filters = [next(filters)] + return finder.find(filters, attribute_lookup=False) + elif node.type not in ('import_from', 'import_name'): + raise ValueError("Should not happen.") + + typ = node.type + if typ == 'for_stmt': + types = pep0484.find_type_from_comment_hint_for(context, node, tree_name) + if types: + return types + if typ == 'with_stmt': + types = pep0484.find_type_from_comment_hint_with(context, node, tree_name) + if types: + return types + + if typ in ('for_stmt', 'comp_for'): + try: + types = context.predefined_names[node][tree_name.value] + except KeyError: + cn = ContextualizedNode(context, node.children[3]) + for_types = iterate_contexts(cn.infer(), cn) + c_node = ContextualizedName(context, tree_name) + types = check_tuple_assignments(evaluator, c_node, for_types) + elif typ == 'expr_stmt': + types = _remove_statements(evaluator, context, node, tree_name) + elif typ == 'with_stmt': + context_managers = context.eval_node(node.get_test_node_from_name(tree_name)) + enter_methods = context_managers.py__getattribute__('__enter__') + return enter_methods.execute_evaluated() + elif typ in ('import_from', 'import_name'): + types = imports.infer_import(context, tree_name) + elif typ in ('funcdef', 'classdef'): + types = _apply_decorators(context, node) + elif typ == 'try_stmt': + # TODO an exception can also be a tuple. Check for those. + # TODO check for types that are not classes and add it to + # the static analysis report. + exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling()) + types = exceptions.execute_evaluated() + else: + raise ValueError("Should not happen.") + return types + + +def _apply_decorators(context, node): + """ + Returns the function, that should to be executed in the end. + This is also the places where the decorators are processed. + """ + if node.type == 'classdef': + decoratee_context = ClassContext( + context.evaluator, + parent_context=context, + classdef=node + ) + else: + decoratee_context = FunctionContext( + context.evaluator, + parent_context=context, + funcdef=node + ) + initial = values = ContextSet(decoratee_context) + for dec in reversed(node.get_decorators()): + debug.dbg('decorator: %s %s', dec, values) + dec_values = context.eval_node(dec.children[1]) + trailer_nodes = dec.children[2:-1] + if trailer_nodes: + # Create a trailer and evaluate it. + trailer = tree.PythonNode('trailer', trailer_nodes) + trailer.parent = dec + dec_values = eval_trailer(context, dec_values, trailer) + + if not len(dec_values): + debug.warning('decorator not found: %s on %s', dec, node) + return initial + + values = dec_values.execute(arguments.ValuesArguments([values])) + if not len(values): + debug.warning('not possible to resolve wrappers found %s', node) + return initial + + debug.dbg('decorator end %s', values) + return values + + +def check_tuple_assignments(evaluator, contextualized_name, context_set): + """ + Checks if tuples are assigned. + """ + lazy_context = None + for index, node in contextualized_name.assignment_indexes(): + cn = ContextualizedNode(contextualized_name.context, node) + iterated = context_set.iterate(cn) + for _ in range(index + 1): + try: + lazy_context = next(iterated) + except StopIteration: + # We could do this with the default param in next. But this + # would allow this loop to run for a very long time if the + # index number is high. Therefore break if the loop is + # finished. + return ContextSet() + context_set = lazy_context.infer() + return context_set + + +def eval_subscript_list(evaluator, context, index): + """ + Handles slices in subscript nodes. + """ + if index == ':': + # Like array[:] + return ContextSet(iterable.Slice(context, None, None, None)) + + elif index.type == 'subscript' and not index.children[0] == '.': + # subscript basically implies a slice operation, except for Python 2's + # Ellipsis. + # e.g. array[:3] + result = [] + for el in index.children: + if el == ':': + if not result: + result.append(None) + elif el.type == 'sliceop': + if len(el.children) == 2: + result.append(el.children[1]) + else: + result.append(el) + result += [None] * (3 - len(result)) + + return ContextSet(iterable.Slice(context, *result)) + + # No slices + return context.eval_node(index) diff --git a/pythonFiles/release/jedi/evaluate/sys_path.py b/pythonFiles/release/jedi/evaluate/sys_path.py index 7cfbd57b06ef..82e5e9df9ceb 100755 --- a/pythonFiles/release/jedi/evaluate/sys_path.py +++ b/pythonFiles/release/jedi/evaluate/sys_path.py @@ -1,34 +1,63 @@ import glob import os import sys - -from jedi._compatibility import exec_function, unicode -from jedi.parser import tree -from jedi.parser import Parser -from jedi.evaluate.cache import memoize_default +import imp +from jedi.evaluate.site import addsitedir + +from jedi._compatibility import unicode +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.base_context import ContextualizedNode +from jedi.evaluate.helpers import is_string +from jedi import settings from jedi import debug -from jedi import common -from jedi import cache +from jedi.evaluate.utils import ignored -def get_sys_path(): - def check_virtual_env(sys_path): - """ Add virtualenv's site-packages to the `sys.path`.""" - venv = os.getenv('VIRTUAL_ENV') - if not venv: - return - venv = os.path.abspath(venv) - p = _get_venv_sitepackages(venv) - if p not in sys_path: - sys_path.insert(0, p) +def get_venv_path(venv): + """Get sys.path for specified virtual environment.""" + sys_path = _get_venv_path_dirs(venv) + with ignored(ValueError): + sys_path.remove('') + sys_path = _get_sys_path_with_egglinks(sys_path) + # As of now, get_venv_path_dirs does not scan built-in pythonpath and + # user-local site-packages, let's approximate them using path from Jedi + # interpreter. + return sys_path + sys.path + - # Add all egg-links from the virtualenv. - for egg_link in glob.glob(os.path.join(p, '*.egg-link')): +def _get_sys_path_with_egglinks(sys_path): + """Find all paths including those referenced by egg-links. + + Egg-link-referenced directories are inserted into path immediately before + the directory on which their links were found. Such directories are not + taken into consideration by normal import mechanism, but they are traversed + when doing pkg_resources.require. + """ + result = [] + for p in sys_path: + # pkg_resources does not define a specific order for egg-link files + # using os.listdir to enumerate them, we're sorting them to have + # reproducible tests. + for egg_link in sorted(glob.glob(os.path.join(p, '*.egg-link'))): with open(egg_link) as fd: - sys_path.insert(0, fd.readline().rstrip()) + for line in fd: + line = line.strip() + if line: + result.append(os.path.join(p, line)) + # pkg_resources package only interprets the first + # non-empty line in egg-link files. + break + result.append(p) + return result - check_virtual_env(sys.path) - return [p for p in sys.path if p != ""] + +def _get_venv_path_dirs(venv): + """Get sys.path for venv without starting up the interpreter.""" + venv = os.path.abspath(venv) + sitedir = _get_venv_sitepackages(venv) + sys_path = [] + addsitedir(sys_path, sitedir) + return sys_path def _get_venv_sitepackages(venv): @@ -40,24 +69,21 @@ def _get_venv_sitepackages(venv): return p -def _execute_code(module_path, code): - c = "import os; from os.path import *; result=%s" - variables = {'__file__': module_path} - try: - exec_function(c % code, variables) - except Exception: - debug.warning('sys.path manipulation detected, but failed to evaluate.') - else: - try: - res = variables['result'] - if isinstance(res, str): - return [os.path.abspath(res)] - except KeyError: - pass - return [] +def _abs_path(module_context, path): + module_path = module_context.py__file__() + if os.path.isabs(path): + return path + + if module_path is None: + # In this case we have no idea where we actually are in the file + # system. + return None + + base_dir = os.path.dirname(module_path) + return os.path.abspath(os.path.join(base_dir, path)) -def _paths_from_assignment(evaluator, expr_stmt): +def _paths_from_assignment(module_context, expr_stmt): """ Extracts the assigned strings from an assignment that looks as follows:: @@ -71,15 +97,16 @@ def _paths_from_assignment(evaluator, expr_stmt): for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]): try: assert operator in ['=', '+='] - assert tree.is_node(assignee, 'power') and len(assignee.children) > 1 + assert assignee.type in ('power', 'atom_expr') and \ + len(assignee.children) > 1 c = assignee.children assert c[0].type == 'name' and c[0].value == 'sys' trailer = c[1] assert trailer.children[0] == '.' and trailer.children[1].value == 'path' # TODO Essentially we're not checking details on sys.path # manipulation. Both assigment of the sys.path and changing/adding - # parts of the sys.path are the same: They get added to the current - # sys.path. + # parts of the sys.path are the same: They get added to the end of + # the current sys.path. """ execution = c[2] assert execution.children[0] == '[' @@ -90,101 +117,105 @@ def _paths_from_assignment(evaluator, expr_stmt): except AssertionError: continue - from jedi.evaluate.iterable import get_iterator_types - from jedi.evaluate.precedence import is_string - for val in get_iterator_types(evaluator.eval_statement(expr_stmt)): - if is_string(val): - yield val.obj + cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt) + for lazy_context in cn.infer().iterate(cn): + for context in lazy_context.infer(): + if is_string(context): + abs_path = _abs_path(module_context, context.obj) + if abs_path is not None: + yield abs_path -def _paths_from_list_modifications(module_path, trailer1, trailer2): +def _paths_from_list_modifications(module_context, trailer1, trailer2): """ extract the path from either "sys.path.append" or "sys.path.insert" """ # Guarantee that both are trailers, the first one a name and the second one # a function execution with at least one param. - if not (tree.is_node(trailer1, 'trailer') and trailer1.children[0] == '.' - and tree.is_node(trailer2, 'trailer') and trailer2.children[0] == '(' + if not (trailer1.type == 'trailer' and trailer1.children[0] == '.' + and trailer2.type == 'trailer' and trailer2.children[0] == '(' and len(trailer2.children) == 3): - return [] + return name = trailer1.children[1].value if name not in ['insert', 'append']: - return [] - + return arg = trailer2.children[1] if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma. arg = arg.children[2] - return _execute_code(module_path, arg.get_code()) + + for context in module_context.create_context(arg).eval_node(arg): + if is_string(context): + abs_path = _abs_path(module_context, context.obj) + if abs_path is not None: + yield abs_path -def _check_module(evaluator, module): +@evaluator_method_cache(default=[]) +def check_sys_path_modifications(module_context): + """ + Detect sys.path modifications within module. + """ def get_sys_path_powers(names): for name in names: power = name.parent.parent - if tree.is_node(power, 'power'): + if power.type in ('power', 'atom_expr'): c = power.children - if isinstance(c[0], tree.Name) and c[0].value == 'sys' \ - and tree.is_node(c[1], 'trailer'): + if c[0].type == 'name' and c[0].value == 'sys' \ + and c[1].type == 'trailer': n = c[1].children[1] - if isinstance(n, tree.Name) and n.value == 'path': + if n.type == 'name' and n.value == 'path': yield name, power - sys_path = list(get_sys_path()) # copy + if module_context.tree_node is None: + return [] + + added = [] try: - possible_names = module.used_names['path'] + possible_names = module_context.tree_node.get_used_names()['path'] except KeyError: pass else: for name, power in get_sys_path_powers(possible_names): - stmt = name.get_definition() + expr_stmt = power.parent if len(power.children) >= 4: - sys_path.extend(_paths_from_list_modifications(module.path, *power.children[2:4])) - elif name.get_definition().type == 'expr_stmt': - sys_path.extend(_paths_from_assignment(evaluator, stmt)) - return sys_path + added.extend( + _paths_from_list_modifications( + module_context, *power.children[2:4] + ) + ) + elif expr_stmt is not None and expr_stmt.type == 'expr_stmt': + added.extend(_paths_from_assignment(module_context, expr_stmt)) + return added -@memoize_default(evaluator_is_first_arg=True, default=[]) -def sys_path_with_modifications(evaluator, module): - if module.path is None: - # Support for modules without a path is bad, therefore return the - # normal path. - return list(get_sys_path()) +def sys_path_with_modifications(evaluator, module_context): + return evaluator.project.sys_path + check_sys_path_modifications(module_context) - curdir = os.path.abspath(os.curdir) - with common.ignored(OSError): - os.chdir(os.path.dirname(module.path)) +def detect_additional_paths(evaluator, script_path): + django_paths = _detect_django_path(script_path) buildout_script_paths = set() - result = _check_module(evaluator, module) - result += _detect_django_path(module.path) - for buildout_script in _get_buildout_scripts(module.path): - for path in _get_paths_from_buildout_script(evaluator, buildout_script): + for buildout_script_path in _get_buildout_script_paths(script_path): + for path in _get_paths_from_buildout_script(evaluator, buildout_script_path): buildout_script_paths.add(path) - # cleanup, back to old directory - os.chdir(curdir) - return list(result) + list(buildout_script_paths) + return django_paths + list(buildout_script_paths) -def _get_paths_from_buildout_script(evaluator, buildout_script): - def load(buildout_script): - try: - with open(buildout_script, 'rb') as f: - source = common.source_to_unicode(f.read()) - except IOError: - debug.dbg('Error trying to read buildout_script: %s', buildout_script) - return - - p = Parser(evaluator.grammar, source, buildout_script) - cache.save_parser(buildout_script, p) - return p.module - cached = cache.load_parser(buildout_script) - module = cached and cached.module or load(buildout_script) - if not module: +def _get_paths_from_buildout_script(evaluator, buildout_script_path): + try: + module_node = evaluator.grammar.parse( + path=buildout_script_path, + cache=True, + cache_path=settings.cache_directory + ) + except IOError: + debug.warning('Error trying to read buildout_script: %s', buildout_script_path) return - for path in _check_module(evaluator, module): + from jedi.evaluate.context import ModuleContext + module = ModuleContext(evaluator, module_node, buildout_script_path) + for path in check_sys_path_modifications(module): yield path @@ -209,14 +240,14 @@ def _detect_django_path(module_path): result = [] for parent in traverse_parents(module_path): - with common.ignored(IOError): + with ignored(IOError): with open(parent + os.path.sep + 'manage.py'): debug.dbg('Found django path: %s', module_path) result.append(parent) return result -def _get_buildout_scripts(module_path): +def _get_buildout_script_paths(module_path): """ if there is a 'buildout.cfg' file in one of the parent directories of the given module it will return a list of all files in the buildout bin @@ -239,9 +270,39 @@ def _get_buildout_scripts(module_path): firstline = f.readline() if firstline.startswith('#!') and 'python' in firstline: extra_module_paths.append(filepath) - except IOError as e: - # either permission error or race cond. because file got deleted + except (UnicodeDecodeError, IOError) as e: + # Probably a binary file; permission error or race cond. because file got deleted # ignore debug.warning(unicode(e)) continue return extra_module_paths + + +def dotted_path_in_sys_path(sys_path, module_path): + """ + Returns the dotted path inside a sys.path. + """ + # First remove the suffix. + for suffix, _, _ in imp.get_suffixes(): + if module_path.endswith(suffix): + module_path = module_path[:-len(suffix)] + break + else: + # There should always be a suffix in a valid Python file on the path. + return None + + if module_path.startswith(os.path.sep): + # The paths in sys.path most of the times don't end with a slash. + module_path = module_path[1:] + + for p in sys_path: + if module_path.startswith(p): + rest = module_path[len(p):] + if rest: + split = rest.split(os.path.sep) + for string in split: + if not string or '.' in string: + return None + return '.'.join(split) + + return None diff --git a/pythonFiles/release/jedi/evaluate/usages.py b/pythonFiles/release/jedi/evaluate/usages.py new file mode 100644 index 000000000000..290c4695b169 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/usages.py @@ -0,0 +1,62 @@ +from jedi.evaluate import imports +from jedi.evaluate.filters import TreeNameDefinition +from jedi.evaluate.context import ModuleContext + + +def _resolve_names(definition_names, avoid_names=()): + for name in definition_names: + if name in avoid_names: + # Avoiding recursions here, because goto on a module name lands + # on the same module. + continue + + if not isinstance(name, imports.SubModuleName): + # SubModuleNames are not actually existing names but created + # names when importing something like `import foo.bar.baz`. + yield name + + if name.api_type == 'module': + for name in _resolve_names(name.goto(), definition_names): + yield name + + +def _dictionarize(names): + return dict( + (n if n.tree_name is None else n.tree_name, n) + for n in names + ) + + +def _find_names(module_context, tree_name): + context = module_context.create_context(tree_name) + name = TreeNameDefinition(context, tree_name) + found_names = set(name.goto()) + found_names.add(name) + return _dictionarize(_resolve_names(found_names)) + + +def usages(module_context, tree_name): + search_name = tree_name.value + found_names = _find_names(module_context, tree_name) + modules = set(d.get_root_context() for d in found_names.values()) + modules = set(m for m in modules if isinstance(m, ModuleContext)) + + non_matching_usage_maps = {} + for m in imports.get_modules_containing_name(module_context.evaluator, modules, search_name): + for name_leaf in m.tree_node.get_used_names().get(search_name, []): + new = _find_names(m, name_leaf) + if any(tree_name in found_names for tree_name in new): + found_names.update(new) + for tree_name in new: + for dct in non_matching_usage_maps.get(tree_name, []): + # A usage that was previously searched for matches with + # a now found name. Merge. + found_names.update(dct) + try: + del non_matching_usage_maps[tree_name] + except KeyError: + pass + else: + for name in new: + non_matching_usage_maps.setdefault(name, []).append(new) + return found_names.values() diff --git a/pythonFiles/release/jedi/common.py b/pythonFiles/release/jedi/evaluate/utils.py old mode 100755 new mode 100644 similarity index 62% rename from pythonFiles/release/jedi/common.py rename to pythonFiles/release/jedi/evaluate/utils.py index 0e9afd11fb5a..7fc1c246de0d --- a/pythonFiles/release/jedi/common.py +++ b/pythonFiles/release/jedi/evaluate/utils.py @@ -2,11 +2,19 @@ import sys import contextlib import functools -import re -from ast import literal_eval -from jedi._compatibility import unicode, reraise -from jedi import settings +from jedi._compatibility import reraise + + +def to_list(func): + def wrapper(*args, **kwargs): + return list(func(*args, **kwargs)) + return wrapper + + +def unite(iterable): + """Turns a two dimensional array into a one dimensional.""" + return set(typ for types in iterable for typ in types) class UncaughtAttributeError(Exception): @@ -81,16 +89,15 @@ def __next__(self): @contextlib.contextmanager -def scale_speed_settings(factor): - a = settings.max_executions - b = settings.max_until_execution_unique - settings.max_executions *= factor - settings.max_until_execution_unique *= factor +def ignored(*exceptions): + """ + Context manager that ignores all of the specified exceptions. This will + be in the standard library starting with Python 3.4. + """ try: yield - finally: - settings.max_executions = a - settings.max_until_execution_unique = b + except exceptions: + pass def indent_block(text, indention=' '): @@ -101,54 +108,3 @@ def indent_block(text, indention=' '): text = text[:-1] lines = text.split('\n') return '\n'.join(map(lambda s: indention + s, lines)) + temp - - -@contextlib.contextmanager -def ignored(*exceptions): - """ - Context manager that ignores all of the specified exceptions. This will - be in the standard library starting with Python 3.4. - """ - try: - yield - except exceptions: - pass - - -def source_to_unicode(source, encoding=None): - def detect_encoding(): - """ - For the implementation of encoding definitions in Python, look at: - - http://www.python.org/dev/peps/pep-0263/ - - http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations - """ - byte_mark = literal_eval(r"b'\xef\xbb\xbf'") - if source.startswith(byte_mark): - # UTF-8 byte-order mark - return 'utf-8' - - first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0) - possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", - first_two_lines) - if possible_encoding: - return possible_encoding.group(1) - else: - # the default if nothing else has been set -> PEP 263 - return encoding if encoding is not None else 'iso-8859-1' - - if isinstance(source, unicode): - # only cast str/bytes - return source - - # cast to unicode by default - return unicode(source, detect_encoding(), 'replace') - - -def splitlines(string): - """ - A splitlines for Python code. In contrast to Python's ``str.splitlines``, - looks at form feeds and other special characters as normal text. Just - splits ``\n`` and ``\r\n``. - Also different: Returns ``['']`` for an empty string input. - """ - return re.split('\n|\r\n', string) diff --git a/pythonFiles/release/jedi/parser_utils.py b/pythonFiles/release/jedi/parser_utils.py new file mode 100644 index 000000000000..59c6408ea1c6 --- /dev/null +++ b/pythonFiles/release/jedi/parser_utils.py @@ -0,0 +1,241 @@ +import textwrap +from inspect import cleandoc + +from jedi._compatibility import literal_eval, is_py3 +from parso.python import tree + +_EXECUTE_NODES = set([ + 'funcdef', 'classdef', 'import_from', 'import_name', 'test', 'or_test', + 'and_test', 'not_test', 'comparison', 'expr', 'xor_expr', 'and_expr', + 'shift_expr', 'arith_expr', 'atom_expr', 'term', 'factor', 'power', 'atom' +]) + +_FLOW_KEYWORDS = ( + 'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while' +) + + +def get_executable_nodes(node, last_added=False): + """ + For static analysis. + """ + result = [] + typ = node.type + if typ == 'name': + next_leaf = node.get_next_leaf() + if last_added is False and node.parent.type != 'param' and next_leaf != '=': + result.append(node) + elif typ == 'expr_stmt': + # I think evaluating the statement (and possibly returned arrays), + # should be enough for static analysis. + result.append(node) + for child in node.children: + result += get_executable_nodes(child, last_added=True) + elif typ == 'decorator': + # decorator + if node.children[-2] == ')': + node = node.children[-3] + if node != '(': + result += get_executable_nodes(node) + else: + try: + children = node.children + except AttributeError: + pass + else: + if node.type in _EXECUTE_NODES and not last_added: + result.append(node) + + for child in children: + result += get_executable_nodes(child, last_added) + + return result + + +def get_comp_fors(comp_for): + yield comp_for + last = comp_for.children[-1] + while True: + if last.type == 'comp_for': + yield last + elif not last.type == 'comp_if': + break + last = last.children[-1] + + +def for_stmt_defines_one_name(for_stmt): + """ + Returns True if only one name is returned: ``for x in y``. + Returns False if the for loop is more complicated: ``for x, z in y``. + + :returns: bool + """ + return for_stmt.children[1].type == 'name' + + +def get_flow_branch_keyword(flow_node, node): + start_pos = node.start_pos + if not (flow_node.start_pos < start_pos <= flow_node.end_pos): + raise ValueError('The node is not part of the flow.') + + keyword = None + for i, child in enumerate(flow_node.children): + if start_pos < child.start_pos: + return keyword + first_leaf = child.get_first_leaf() + if first_leaf in _FLOW_KEYWORDS: + keyword = first_leaf + return 0 + +def get_statement_of_position(node, pos): + for c in node.children: + if c.start_pos <= pos <= c.end_pos: + if c.type not in ('decorated', 'simple_stmt', 'suite') \ + and not isinstance(c, (tree.Flow, tree.ClassOrFunc)): + return c + else: + try: + return get_statement_of_position(c, pos) + except AttributeError: + pass # Must be a non-scope + return None + + +def clean_scope_docstring(scope_node): + """ Returns a cleaned version of the docstring token. """ + node = scope_node.get_doc_node() + if node is not None: + # TODO We have to check next leaves until there are no new + # leaves anymore that might be part of the docstring. A + # docstring can also look like this: ``'foo' 'bar' + # Returns a literal cleaned version of the ``Token``. + cleaned = cleandoc(safe_literal_eval(node.value)) + # Since we want the docstr output to be always unicode, just + # force it. + if is_py3 or isinstance(cleaned, unicode): + return cleaned + else: + return unicode(cleaned, 'UTF-8', 'replace') + return '' + + +def safe_literal_eval(value): + first_two = value[:2].lower() + if first_two[0] == 'f' or first_two in ('fr', 'rf'): + # literal_eval is not able to resovle f literals. We have to do that + # manually, but that's right now not implemented. + return '' + + try: + return literal_eval(value) + except SyntaxError: + # It's possible to create syntax errors with literals like rb'' in + # Python 2. This should not be possible and in that case just return an + # empty string. + # Before Python 3.3 there was a more strict definition in which order + # you could define literals. + return '' + + +def get_call_signature(funcdef, width=72, call_string=None): + """ + Generate call signature of this function. + + :param width: Fold lines if a line is longer than this value. + :type width: int + :arg func_name: Override function name when given. + :type func_name: str + + :rtype: str + """ + # Lambdas have no name. + if call_string is None: + if funcdef.type == 'lambdef': + call_string = '' + else: + call_string = funcdef.name.value + if funcdef.type == 'lambdef': + p = '(' + ''.join(param.get_code() for param in funcdef.get_params()).strip() + ')' + else: + p = funcdef.children[2].get_code() + code = call_string + p + + return '\n'.join(textwrap.wrap(code, width)) + + +def get_doc_with_call_signature(scope_node): + """ + Return a document string including call signature. + """ + call_signature = None + if scope_node.type == 'classdef': + for funcdef in scope_node.iter_funcdefs(): + if funcdef.name.value == '__init__': + call_signature = \ + get_call_signature(funcdef, call_string=scope_node.name.value) + elif scope_node.type in ('funcdef', 'lambdef'): + call_signature = get_call_signature(scope_node) + + doc = clean_scope_docstring(scope_node) + if call_signature is None: + return doc + return '%s\n\n%s' % (call_signature, doc) + + +def move(node, line_offset): + """ + Move the `Node` start_pos. + """ + try: + children = node.children + except AttributeError: + node.line += line_offset + else: + for c in children: + move(c, line_offset) + + +def get_following_comment_same_line(node): + """ + returns (as string) any comment that appears on the same line, + after the node, including the # + """ + try: + if node.type == 'for_stmt': + whitespace = node.children[5].get_first_leaf().prefix + elif node.type == 'with_stmt': + whitespace = node.children[3].get_first_leaf().prefix + else: + whitespace = node.get_last_leaf().get_next_leaf().prefix + except AttributeError: + return None + except ValueError: + # TODO in some particular cases, the tree doesn't seem to be linked + # correctly + return None + if "#" not in whitespace: + return None + comment = whitespace[whitespace.index("#"):] + if "\r" in comment: + comment = comment[:comment.index("\r")] + if "\n" in comment: + comment = comment[:comment.index("\n")] + return comment + + +def is_scope(node): + return node.type in ('file_input', 'classdef', 'funcdef', 'lambdef', 'comp_for') + + +def get_parent_scope(node, include_flows=False): + """ + Returns the underlying scope. + """ + scope = node.parent + while scope is not None: + if include_flows and isinstance(scope, tree.Flow): + return scope + if is_scope(scope): + break + scope = scope.parent + return scope diff --git a/pythonFiles/release/jedi/refactoring.py b/pythonFiles/release/jedi/refactoring.py index a342f08e2b8f..ee938427fcee 100755 --- a/pythonFiles/release/jedi/refactoring.py +++ b/pythonFiles/release/jedi/refactoring.py @@ -14,9 +14,8 @@ """ import difflib -from jedi import common +from parso import python_bytes_to_unicode, split_lines from jedi.evaluate import helpers -from jedi.parser import tree as pt class Refactoring(object): @@ -29,7 +28,7 @@ def __init__(self, change_dct): def old_files(self): dct = {} for old_path, (new_path, old_l, new_l) in self.change_dct.items(): - dct[new_path] = '\n'.join(new_l) + dct[old_path] = '\n'.join(old_l) return dct def new_files(self): @@ -83,7 +82,7 @@ def process(path, old_lines, new_lines): with open(current_path) as f: source = f.read() - new_lines = common.splitlines(common.source_to_unicode(source)) + new_lines = split_lines(python_bytes_to_unicode(source)) old_lines = new_lines[:] nr, indent = name.line, name.column @@ -101,7 +100,7 @@ def extract(script, new_name): :type source: str :return: list of changed lines/changed files """ - new_lines = common.splitlines(common.source_to_unicode(script.source)) + new_lines = split_lines(python_bytes_to_unicode(script.source)) old_lines = new_lines[:] user_stmt = script._parser.user_stmt() @@ -160,43 +159,42 @@ def inline(script): """ :type script: api.Script """ - new_lines = common.splitlines(common.source_to_unicode(script.source)) + new_lines = split_lines(python_bytes_to_unicode(script.source)) dct = {} definitions = script.goto_assignments() - with common.ignored(AssertionError): - assert len(definitions) == 1 - stmt = definitions[0]._definition - usages = script.usages() - inlines = [r for r in usages - if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos] - inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column), - reverse=True) - expression_list = stmt.expression_list() - # don't allow multiline refactorings for now. - assert stmt.start_pos[0] == stmt.end_pos[0] - index = stmt.start_pos[0] - 1 - - line = new_lines[index] - replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1] - replace_str = replace_str.strip() - # tuples need parentheses - if expression_list and isinstance(expression_list[0], pr.Array): - arr = expression_list[0] - if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1: - replace_str = '(%s)' % replace_str - - # if it's the only assignment, remove the statement - if len(stmt.get_defined_names()) == 1: - line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] - - dct = _rename(inlines, replace_str) - # remove the empty line - new_lines = dct[script.path][2] - if line.strip(): - new_lines[index] = line - else: - new_lines.pop(index) + assert len(definitions) == 1 + stmt = definitions[0]._definition + usages = script.usages() + inlines = [r for r in usages + if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos] + inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column), + reverse=True) + expression_list = stmt.expression_list() + # don't allow multiline refactorings for now. + assert stmt.start_pos[0] == stmt.end_pos[0] + index = stmt.start_pos[0] - 1 + + line = new_lines[index] + replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1] + replace_str = replace_str.strip() + # tuples need parentheses + if expression_list and isinstance(expression_list[0], pr.Array): + arr = expression_list[0] + if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1: + replace_str = '(%s)' % replace_str + + # if it's the only assignment, remove the statement + if len(stmt.get_defined_names()) == 1: + line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] + + dct = _rename(inlines, replace_str) + # remove the empty line + new_lines = dct[script.path][2] + if line.strip(): + new_lines[index] = line + else: + new_lines.pop(index) return Refactoring(dct) diff --git a/pythonFiles/release/jedi/settings.py b/pythonFiles/release/jedi/settings.py index fb0b38f55f81..f1ae6dbb77e7 100755 --- a/pythonFiles/release/jedi/settings.py +++ b/pythonFiles/release/jedi/settings.py @@ -16,7 +16,6 @@ ~~~~~~~~~~~~~~~~~ .. autodata:: case_insensitive_completion -.. autodata:: add_dot_after_module .. autodata:: add_bracket_after_function .. autodata:: no_completion_duplicates @@ -44,32 +43,9 @@ .. autodata:: auto_import_modules -.. _settings-recursion: - -Recursions -~~~~~~~~~~ - -Recursion settings are important if you don't want extremly -recursive python code to go absolutely crazy. First of there is a -global limit :data:`max_executions`. This limit is important, to set -a maximum amount of time, the completion may use. - -The default values are based on experiments while completing the |jedi| library -itself (inception!). But I don't think there's any other Python library that -uses recursion in a similarly extreme way. These settings make the completion -definitely worse in some cases. But a completion should also be fast. - -.. autodata:: max_until_execution_unique -.. autodata:: max_function_recursion_level -.. autodata:: max_executions_without_builtins -.. autodata:: max_executions -.. autodata:: scale_call_signatures - - Caching ~~~~~~~ -.. autodata:: star_import_cache_validity .. autodata:: call_signatures_validity @@ -86,13 +62,6 @@ The completion is by default case insensitive. """ -add_dot_after_module = False -""" -Adds a dot after a module, because a module that is not accessed this way is -definitely not the normal case. However, in VIM this doesn't work, that's why -it isn't used at the moment. -""" - add_bracket_after_function = False """ Adds an opening bracket after a function, because that's normal behaviour. @@ -124,7 +93,7 @@ 'jedi') cache_directory = os.path.expanduser(_cache_directory) """ -The path where all the caches can be found. +The path where the cache is stored. On Linux, this defaults to ``~/.cache/jedi/``, on OS X to ``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``. @@ -183,55 +152,10 @@ ``globals()`` modifications a lot. """ -# ---------------- -# recursions -# ---------------- - -max_until_execution_unique = 50 -""" -This limit is probably the most important one, because if this limit is -exceeded, functions can only be one time executed. So new functions will be -executed, complex recursions with the same functions again and again, are -ignored. -""" - -max_function_recursion_level = 5 -""" -`max_function_recursion_level` is more about whether the recursions are -stopped in deepth or in width. The ratio beetween this and -`max_until_execution_unique` is important here. It stops a recursion (after -the number of function calls in the recursion), if it was already used -earlier. -""" - -max_executions_without_builtins = 200 -""" -.. todo:: Document this. -""" - -max_executions = 250 -""" -A maximum amount of time, the completion may use. -""" - -scale_call_signatures = 0.1 -""" -Because call_signatures is normally used on every single key hit, it has -to be faster than a normal completion. This is the factor that is used to -scale `max_executions` and `max_until_execution_unique`: -""" - # ---------------- # caching validity (time) # ---------------- -star_import_cache_validity = 60.0 -""" -In huge packages like numpy, checking all star imports on every completion -might be slow, therefore we do a star import caching, that lasts a certain -time span (in seconds). -""" - call_signatures_validity = 3.0 """ Finding function calls might be slow (0.1-0.5s). This is not acceptible for diff --git a/pythonFiles/release/jedi/utils.py b/pythonFiles/release/jedi/utils.py index 6b48ef496a5e..177524c50168 100755 --- a/pythonFiles/release/jedi/utils.py +++ b/pythonFiles/release/jedi/utils.py @@ -5,13 +5,19 @@ from __future__ import absolute_import import __main__ from collections import namedtuple +import logging +import traceback import re import os import sys +from parso import split_lines + from jedi import Interpreter -from jedi.api.helpers import completion_parts -from jedi.parser.user_context import UserContext +from jedi.api.helpers import get_on_completion_name + + +READLINE_DEBUG = False def setup_readline(namespace_module=__main__): @@ -55,6 +61,13 @@ def setup_readline(namespace_module=__main__): bash). """ + if READLINE_DEBUG: + logging.basicConfig( + filename='/tmp/jedi.log', + filemode='a', + level=logging.DEBUG + ) + class JediRL(object): def complete(self, text, state): """ @@ -70,12 +83,21 @@ def complete(self, text, state): sys.path.insert(0, os.getcwd()) # Calling python doesn't have a path, so add to sys.path. try: + logging.debug("Start REPL completion: " + repr(text)) interpreter = Interpreter(text, [namespace_module.__dict__]) - path = UserContext(text, (1, len(text))).get_path_until_cursor() - path, dot, like = completion_parts(path) - before = text[:len(text) - len(like)] + lines = split_lines(text) + position = (len(lines), len(lines[-1])) + name = get_on_completion_name( + interpreter._get_module_node(), + lines, + position + ) + before = text[:len(text) - len(name)] completions = interpreter.completions() + except: + logging.error("REPL Completion error:\n" + traceback.format_exc()) + raise finally: sys.path.pop(0) @@ -88,7 +110,7 @@ def complete(self, text, state): try: import readline except ImportError: - print("Module readline not available.") + print("Jedi: Module readline not available.") else: readline.set_completer(JediRL().complete) readline.parse_and_bind("tab: complete") diff --git a/src/test/.vscode/settings.json b/src/test/.vscode/settings.json index e550760b5f05..a6a0df6d1f21 100644 --- a/src/test/.vscode/settings.json +++ b/src/test/.vscode/settings.json @@ -1,5 +1,5 @@ { - "python.linting.pylintEnabled": true, + "python.linting.pylintEnabled": false, "python.linting.flake8Enabled": false, "python.workspaceSymbols.enabled": false, "python.unitTest.nosetestArgs": [], @@ -21,5 +21,7 @@ "python.linting.pydocstyleEnabled": false, "python.linting.pylamaEnabled": false, "python.linting.mypyEnabled": false, - "python.formatting.provider": "yapf" + "python.formatting.provider": "yapf", + "python.pythonPath": "python", + "python.linting.pylintUseMinimalCheckers": false } \ No newline at end of file diff --git a/src/test/index.ts b/src/test/index.ts index 234d1046c161..4f9bc4bce1ef 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -18,7 +18,8 @@ const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, timeout: 25000, - retries: 3 + retries: 3, + grep: 'Extract' }; testRunner.configure(options, { coverageConfig: '../coverconfig.json' }); module.exports = testRunner; diff --git a/src/test/refactor/extension.refactor.extract.method.test.ts b/src/test/refactor/extension.refactor.extract.method.test.ts index ee3c2b34a9fc..3e3f67c1d896 100644 --- a/src/test/refactor/extension.refactor.extract.method.test.ts +++ b/src/test/refactor/extension.refactor.extract.method.test.ts @@ -87,15 +87,10 @@ suite('Method Extraction', () => { assert.equal(foundEdit.length, 1, 'Edit not found'); }); }).catch((error: any) => { - if (ignoreErrorHandling) { - return Promise.reject(error!); - } - if (shouldError) { + if (!ignoreErrorHandling && shouldError) { // Wait a minute this shouldn't work, what's going on assert.equal(true, true, 'Error raised as expected'); - return; } - return Promise.reject(error!); }); } @@ -112,58 +107,39 @@ suite('Method Extraction', () => { await testingMethodExtraction(true, startPos, endPos); }); - function testingMethodExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position) { + async function testingMethodExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position): Promise { const ch = new MockOutputChannel('Python'); - let textDocument: vscode.TextDocument; - let textEditor: vscode.TextEditor; const rangeOfTextToExtract = new vscode.Range(startPos, endPos); let ignoreErrorHandling = false; - return vscode.workspace.openTextDocument(refactorTargetFile).then(document => { - textDocument = document; - return vscode.window.showTextDocument(textDocument); - }).then(editor => { - assert(vscode.window.activeTextEditor, 'No active editor'); - editor.selections = [new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end)]; - editor.selection = new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end); - textEditor = editor; - return; - }).then(() => { - return extractMethod(EXTENSION_DIR, textEditor, rangeOfTextToExtract, ch, ioc.serviceContainer).then(() => { - if (shouldError) { - ignoreErrorHandling = true; - assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); - } - return textEditor.document.save(); - }).then(() => { - assert.equal(ch.output.length, 0, 'Output channel is not empty'); - assert.equal(textDocument.lineAt(241).text.trim().indexOf('def newmethod'), 0, 'New Method not created'); - assert.equal(textDocument.lineAt(239).text.trim().startsWith('self.newmethod'), true, 'New Method not being used'); - }).catch((error: any) => { - if (ignoreErrorHandling) { - return Promise.reject(error!); - } - if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - return; - } + const textDocument = await vscode.workspace.openTextDocument(refactorTargetFile); + const editor = await vscode.window.showTextDocument(textDocument); - return Promise.reject(error!); - }); - }, error => { + editor.selections = [new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end)]; + editor.selection = new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end); + + try { + await extractMethod(EXTENSION_DIR, editor, rangeOfTextToExtract, ch, ioc.serviceContainer); + if (shouldError) { + ignoreErrorHandling = true; + assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); + } + await editor.document.save(); + + assert.equal(ch.output.length, 0, 'Output channel is not empty'); + assert.equal(textDocument.lineAt(241).text.trim().indexOf('def newmethod'), 0, 'New Method not created'); + assert.equal(textDocument.lineAt(239).text.trim().startsWith('self.newmethod'), true, 'New Method not being used'); + } catch (error) { if (ignoreErrorHandling) { - return Promise.reject(error); + return Promise.reject(error!); } if (shouldError) { // Wait a minute this shouldn't work, what's going on assert.equal(true, true, 'Error raised as expected'); - } else { - // tslint:disable-next-line:prefer-template restrict-plus-operands - assert.fail(error, null, 'Method extraction failed\n' + ch.output, ''); - return Promise.reject(error); + return; } - }); + return Promise.reject(error!); + } } // This test fails on linux (text document not getting updated in time) From d43f097021df181924ab12e37414499d48b56b4c Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 12 Feb 2018 13:36:05 -0800 Subject: [PATCH 051/103] Enable Travis --- .../extension.refactor.extract.method.test.ts | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/test/refactor/extension.refactor.extract.method.test.ts b/src/test/refactor/extension.refactor.extract.method.test.ts index 3e3f67c1d896..040e63177442 100644 --- a/src/test/refactor/extension.refactor.extract.method.test.ts +++ b/src/test/refactor/extension.refactor.extract.method.test.ts @@ -10,7 +10,7 @@ import { getTextEditsFromPatch } from '../../client/common/editor'; import { extractMethod } from '../../client/providers/simpleRefactorProvider'; import { RefactorProxy } from '../../client/refactor/proxy'; import { UnitTestIocContainer } from '../unittests/serviceRegistry'; -import { closeActiveWindows, initialize, initializeTest, IS_TRAVIS, wait } from './../initialize'; +import { closeActiveWindows, initialize, initializeTest, wait } from './../initialize'; import { MockOutputChannel } from './../mockClasses'; const EXTENSION_DIR = path.join(__dirname, '..', '..', '..'); @@ -143,13 +143,11 @@ suite('Method Extraction', () => { } // This test fails on linux (text document not getting updated in time) - if (!IS_TRAVIS) { - test('Extract Method (end to end)', async () => { - const startPos = new vscode.Position(239, 0); - const endPos = new vscode.Position(241, 35); - await testingMethodExtractionEndToEnd(false, startPos, endPos); - }); - } + test('Extract Method (end to end)', async () => { + const startPos = new vscode.Position(239, 0); + const endPos = new vscode.Position(241, 35); + await testingMethodExtractionEndToEnd(false, startPos, endPos); + }); test('Extract Method will fail if complete statements are not selected', async () => { const startPos = new vscode.Position(239, 30); From f8db9355613b8d4fe126f24fe6a56f68a36f274f Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 12 Feb 2018 16:30:06 -0800 Subject: [PATCH 052/103] Test fixes --- .../extension.refactor.extract.method.test.ts | 69 ++++------ .../extension.refactor.extract.var.test.ts | 126 ++++++------------ 2 files changed, 69 insertions(+), 126 deletions(-) diff --git a/src/test/refactor/extension.refactor.extract.method.test.ts b/src/test/refactor/extension.refactor.extract.method.test.ts index 040e63177442..69c7262d1206 100644 --- a/src/test/refactor/extension.refactor.extract.method.test.ts +++ b/src/test/refactor/extension.refactor.extract.method.test.ts @@ -57,42 +57,33 @@ suite('Method Extraction', () => { ioc.registerVariableTypes(); } - function testingMethodExtraction(shouldError: boolean, startPos: Position, endPos: Position) { + async function testingMethodExtraction(shouldError: boolean, startPos: Position, endPos: Position): Promise { const pythonSettings = PythonSettings.getInstance(vscode.Uri.file(refactorTargetFile)); const rangeOfTextToExtract = new vscode.Range(startPos, endPos); const proxy = new RefactorProxy(EXTENSION_DIR, pythonSettings, path.dirname(refactorTargetFile), ioc.serviceContainer); - let expectedTextEdits: vscode.TextEdit[]; - let ignoreErrorHandling = false; - let mockTextDoc: vscode.TextDocument; + // tslint:disable-next-line:no-multiline-string const DIFF = `--- a/refactor.py\n+++ b/refactor.py\n@@ -237,9 +237,12 @@\n try:\n self._process_request(self._input.readline())\n except Exception as ex:\n- message = ex.message + ' \\n' + traceback.format_exc()\n- sys.stderr.write(str(len(message)) + ':' + message)\n- sys.stderr.flush()\n+ self.myNewMethod(ex)\n+\n+ def myNewMethod(self, ex):\n+ message = ex.message + ' \\n' + traceback.format_exc()\n+ sys.stderr.write(str(len(message)) + ':' + message)\n+ sys.stderr.flush()\n \n if __name__ == '__main__':\n RopeRefactoring().watch()\n`; - return new Promise((resolve, reject) => { - vscode.workspace.openTextDocument(refactorTargetFile).then(textDocument => { - mockTextDoc = textDocument; - expectedTextEdits = getTextEditsFromPatch(textDocument.getText(), DIFF); - resolve(); - }, reject); - }) - .then(() => proxy.extractMethod(mockTextDoc, 'myNewMethod', refactorTargetFile, rangeOfTextToExtract, options)) - .then(response => { - if (shouldError) { - ignoreErrorHandling = true; - assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); - } - const textEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); - assert.equal(response.results.length, 1, 'Invalid number of items in response'); - assert.equal(textEdits.length, expectedTextEdits.length, 'Invalid number of Text Edits'); - textEdits.forEach(edit => { - const foundEdit = expectedTextEdits.filter(item => item.newText === edit.newText && item.range.isEqual(edit.range)); - assert.equal(foundEdit.length, 1, 'Edit not found'); - }); - }).catch((error: any) => { - if (!ignoreErrorHandling && shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - } - return Promise.reject(error!); + const mockTextDoc = await vscode.workspace.openTextDocument(refactorTargetFile); + const expectedTextEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); + try { + const response = await proxy.extractMethod(mockTextDoc, 'myNewMethod', refactorTargetFile, rangeOfTextToExtract, options); + if (shouldError) { + assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); + } + const textEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); + assert.equal(response.results.length, 1, 'Invalid number of items in response'); + assert.equal(textEdits.length, expectedTextEdits.length, 'Invalid number of Text Edits'); + textEdits.forEach(edit => { + const foundEdit = expectedTextEdits.filter(item => item.newText === edit.newText && item.range.isEqual(edit.range)); + assert.equal(foundEdit.length, 1, 'Edit not found'); }); + } catch (error) { + if (!shouldError) { + // Wait a minute this shouldn't work, what's going on + assert.equal('Error', 'No error', `${error}`); + } + } } test('Extract Method', async () => { @@ -110,7 +101,6 @@ suite('Method Extraction', () => { async function testingMethodExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position): Promise { const ch = new MockOutputChannel('Python'); const rangeOfTextToExtract = new vscode.Range(startPos, endPos); - let ignoreErrorHandling = false; const textDocument = await vscode.workspace.openTextDocument(refactorTargetFile); const editor = await vscode.window.showTextDocument(textDocument); @@ -121,24 +111,17 @@ suite('Method Extraction', () => { try { await extractMethod(EXTENSION_DIR, editor, rangeOfTextToExtract, ch, ioc.serviceContainer); if (shouldError) { - ignoreErrorHandling = true; assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); } - await editor.document.save(); + const newMethodRefLine = textDocument.lineAt(editor.selection.start); assert.equal(ch.output.length, 0, 'Output channel is not empty'); - assert.equal(textDocument.lineAt(241).text.trim().indexOf('def newmethod'), 0, 'New Method not created'); - assert.equal(textDocument.lineAt(239).text.trim().startsWith('self.newmethod'), true, 'New Method not being used'); + assert.equal(textDocument.lineAt(newMethodRefLine.lineNumber + 2).text.trim().indexOf('def newmethod'), 0, 'New Method not created'); + assert.equal(newMethodRefLine.text.trim().startsWith('self.newmethod'), true, 'New Method not being used'); } catch (error) { - if (ignoreErrorHandling) { - return Promise.reject(error!); - } - if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - return; + if (!shouldError) { + assert.equal('Error', 'No error', `${error}`); } - return Promise.reject(error!); } } diff --git a/src/test/refactor/extension.refactor.extract.var.test.ts b/src/test/refactor/extension.refactor.extract.var.test.ts index d12283a74198..2fd0b3161930 100644 --- a/src/test/refactor/extension.refactor.extract.var.test.ts +++ b/src/test/refactor/extension.refactor.extract.var.test.ts @@ -56,46 +56,31 @@ suite('Variable Extraction', () => { ioc.registerVariableTypes(); } - function testingVariableExtraction(shouldError: boolean, startPos: Position, endPos: Position) { + async function testingVariableExtraction(shouldError: boolean, startPos: Position, endPos: Position): Promise { const pythonSettings = PythonSettings.getInstance(vscode.Uri.file(refactorTargetFile)); const rangeOfTextToExtract = new vscode.Range(startPos, endPos); const proxy = new RefactorProxy(EXTENSION_DIR, pythonSettings, path.dirname(refactorTargetFile), ioc.serviceContainer); - let expectedTextEdits: vscode.TextEdit[]; - let ignoreErrorHandling = false; - let mockTextDoc: vscode.TextDocument; + const DIFF = '--- a/refactor.py\n+++ b/refactor.py\n@@ -232,7 +232,8 @@\n sys.stdout.flush()\n \n def watch(self):\n- self._write_response("STARTED")\n+ myNewVariable = "STARTED"\n+ self._write_response(myNewVariable)\n while True:\n try:\n self._process_request(self._input.readline())\n'; - return new Promise((resolve, reject) => { - vscode.workspace.openTextDocument(refactorTargetFile).then(textDocument => { - mockTextDoc = textDocument; - expectedTextEdits = getTextEditsFromPatch(textDocument.getText(), DIFF); - resolve(); - }, reject); - }) - .then(() => proxy.extractVariable(mockTextDoc, 'myNewVariable', refactorTargetFile, rangeOfTextToExtract, options)) - .then(response => { - if (shouldError) { - ignoreErrorHandling = true; - assert.fail(null, null, 'Extraction should fail with an error', ''); - } - const textEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); - assert.equal(response.results.length, 1, 'Invalid number of items in response'); - assert.equal(textEdits.length, expectedTextEdits.length, 'Invalid number of Text Edits'); - textEdits.forEach(edit => { - const foundEdit = expectedTextEdits.filter(item => item.newText === edit.newText && item.range.isEqual(edit.range)); - assert.equal(foundEdit.length, 1, 'Edit not found'); - }); - }).catch((error: any) => { - if (ignoreErrorHandling) { - return Promise.reject(error!); - } - if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - return; - } - - return Promise.reject(error!); + const mockTextDoc = await vscode.workspace.openTextDocument(refactorTargetFile); + const expectedTextEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); + try { + const response = await proxy.extractVariable(mockTextDoc, 'myNewVariable', refactorTargetFile, rangeOfTextToExtract, options); + if (shouldError) { + assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); + } + const textEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); + assert.equal(response.results.length, 1, 'Invalid number of items in response'); + assert.equal(textEdits.length, expectedTextEdits.length, 'Invalid number of Text Edits'); + textEdits.forEach(edit => { + const foundEdit = expectedTextEdits.filter(item => item.newText === edit.newText && item.range.isEqual(edit.range)); + assert.equal(foundEdit.length, 1, 'Edit not found'); }); + } catch (error) { + if (!shouldError) { + assert.equal('Error', 'No error', `${error}`); + } + } } test('Extract Variable', async () => { @@ -110,58 +95,33 @@ suite('Variable Extraction', () => { await testingVariableExtraction(true, startPos, endPos); }); - function testingVariableExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position) { + async function testingVariableExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position): Promise { const ch = new MockOutputChannel('Python'); - let textDocument: vscode.TextDocument; - let textEditor: vscode.TextEditor; const rangeOfTextToExtract = new vscode.Range(startPos, endPos); - let ignoreErrorHandling = false; - return vscode.workspace.openTextDocument(refactorTargetFile).then(document => { - textDocument = document; - return vscode.window.showTextDocument(textDocument); - }).then(editor => { - assert(vscode.window.activeTextEditor, 'No active editor'); - editor.selections = [new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end)]; - editor.selection = new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end); - textEditor = editor; - return; - }).then(() => { - return extractVariable(EXTENSION_DIR, textEditor, rangeOfTextToExtract, ch, ioc.serviceContainer).then(() => { - if (shouldError) { - ignoreErrorHandling = true; - assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); - } - return textEditor.document.save(); - }).then(() => { - assert.equal(ch.output.length, 0, 'Output channel is not empty'); - assert.equal(textDocument.lineAt(234).text.trim().indexOf('newvariable'), 0, 'New Variable not created'); - assert.equal(textDocument.lineAt(234).text.trim().endsWith('= "STARTED"'), true, 'Started Text Assigned to variable'); - assert.equal(textDocument.lineAt(235).text.indexOf('(newvariable') >= 0, true, 'New Variable not being used'); - }).catch((error: any) => { - if (ignoreErrorHandling) { - return Promise.reject(error!); - } - if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - return; - } - - return Promise.reject(error)!; - }); - }, error => { - if (ignoreErrorHandling) { - return Promise.reject(error); - } + + const textDocument = await vscode.workspace.openTextDocument(refactorTargetFile); + const editor = await vscode.window.showTextDocument(textDocument); + + editor.selections = [new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end)]; + editor.selection = new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end); + try { + await extractVariable(EXTENSION_DIR, editor, rangeOfTextToExtract, ch, ioc.serviceContainer); if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - } else { - // tslint:disable-next-line:prefer-template restrict-plus-operands - assert.fail(error + '', null, 'Variable extraction failed\n' + ch.output, ''); - return Promise.reject(error); + assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); } - }); + assert.equal(ch.output.length, 0, 'Output channel is not empty'); + + const newVarDefLine = textDocument.lineAt(editor.selection.start); + const newVarRefLine = textDocument.lineAt(newVarDefLine.lineNumber + 1); + + assert.equal(newVarDefLine.text.trim().indexOf('newvariable'), 0, 'New Variable not created'); + assert.equal(newVarDefLine.text.trim().endsWith('= "STARTED"'), true, 'Started Text Assigned to variable'); + assert.equal(newVarRefLine.text.indexOf('(newvariable') >= 0, true, 'New Variable not being used'); + } catch (error) { + if (!shouldError) { + assert.fail('Error', 'No error', `${error}`); + } + } } // This test fails on linux (text document not getting updated in time) From a8dc597b21c992eadbbbf15e9a05223a990e81fc Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 12 Feb 2018 16:31:33 -0800 Subject: [PATCH 053/103] Undo change --- src/test/index.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/test/index.ts b/src/test/index.ts index 4f9bc4bce1ef..234d1046c161 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -18,8 +18,7 @@ const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, timeout: 25000, - retries: 3, - grep: 'Extract' + retries: 3 }; testRunner.configure(options, { coverageConfig: '../coverconfig.json' }); module.exports = testRunner; From 3f5d4922dadc6c326ad45e561b80fa4c36ab0656 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 13 Feb 2018 14:32:29 -0800 Subject: [PATCH 054/103] Jedi 0.11 with parser --- .vscode/settings.json | 8 +- pythonFiles/completion.py | 104 +- pythonFiles/{release => }/jedi/__init__.py | 0 pythonFiles/{preview => }/jedi/__main__.py | 0 .../{release => }/jedi/_compatibility.py | 0 .../{release => }/jedi/api/__init__.py | 0 pythonFiles/{release => }/jedi/api/classes.py | 0 .../{release => }/jedi/api/completion.py | 0 pythonFiles/{release => }/jedi/api/helpers.py | 0 .../{release => }/jedi/api/interpreter.py | 0 .../{release => }/jedi/api/keywords.py | 0 .../{preview => }/jedi/api/replstartup.py | 0 pythonFiles/{release => }/jedi/cache.py | 0 .../{release => }/jedi/common/__init__.py | 0 .../{release => }/jedi/common/context.py | 0 pythonFiles/{release => }/jedi/debug.py | 0 .../{release => }/jedi/evaluate/__init__.py | 0 .../{release => }/jedi/evaluate/analysis.py | 0 .../{release => }/jedi/evaluate/arguments.py | 0 .../jedi/evaluate/base_context.py | 0 .../{release => }/jedi/evaluate/cache.py | 0 .../jedi/evaluate/compiled/__init__.py | 0 .../jedi/evaluate/compiled/fake.py | 0 .../evaluate/compiled/fake/_functools.pym | 0 .../jedi/evaluate/compiled/fake/_sqlite3.pym | 0 .../jedi/evaluate/compiled/fake/_sre.pym | 0 .../jedi/evaluate/compiled/fake/_weakref.pym | 0 .../jedi/evaluate/compiled/fake/builtins.pym | 0 .../jedi/evaluate/compiled/fake/datetime.pym | 0 .../jedi/evaluate/compiled/fake/io.pym | 0 .../jedi/evaluate/compiled/fake/operator.pym | 0 .../jedi/evaluate/compiled/fake/posix.pym | 0 .../jedi/evaluate/compiled/getattr_static.py | 0 .../jedi/evaluate/compiled/mixed.py | 0 .../jedi/evaluate/context/__init__.py | 0 .../jedi/evaluate/context/function.py | 0 .../jedi/evaluate/context/instance.py | 0 .../jedi/evaluate/context/iterable.py | 0 .../jedi/evaluate/context/klass.py | 0 .../jedi/evaluate/context/module.py | 0 .../jedi/evaluate/context/namespace.py | 0 .../{release => }/jedi/evaluate/docstrings.py | 0 .../{release => }/jedi/evaluate/dynamic.py | 0 .../{release => }/jedi/evaluate/filters.py | 0 .../{release => }/jedi/evaluate/finder.py | 0 .../jedi/evaluate/flow_analysis.py | 0 .../{release => }/jedi/evaluate/helpers.py | 0 .../{release => }/jedi/evaluate/imports.py | 0 .../jedi/evaluate/jedi_typing.py | 0 .../jedi/evaluate/lazy_context.py | 0 .../{release => }/jedi/evaluate/param.py | 0 .../jedi/evaluate/parser_cache.py | 0 .../{release => }/jedi/evaluate/pep0484.py | 0 .../{release => }/jedi/evaluate/project.py | 0 .../{release => }/jedi/evaluate/recursion.py | 0 .../{preview => }/jedi/evaluate/site.py | 0 .../{release => }/jedi/evaluate/stdlib.py | 0 .../jedi/evaluate/syntax_tree.py | 0 .../{release => }/jedi/evaluate/sys_path.py | 0 .../{release => }/jedi/evaluate/usages.py | 0 .../{release => }/jedi/evaluate/utils.py | 0 .../{release => }/jedi/parser_utils.py | 0 pythonFiles/{release => }/jedi/refactoring.py | 0 pythonFiles/{release => }/jedi/settings.py | 0 pythonFiles/{release => }/jedi/utils.py | 0 pythonFiles/parso/__init__.py | 58 + pythonFiles/parso/_compatibility.py | 103 + pythonFiles/parso/cache.py | 162 ++ pythonFiles/parso/grammar.py | 283 +++ pythonFiles/parso/normalizer.py | 184 ++ pythonFiles/parso/parser.py | 78 + .../jedi/parser => parso}/pgen2/__init__.py | 0 .../jedi/parser => parso}/pgen2/grammar.py | 9 +- .../jedi/parser => parso}/pgen2/parse.py | 48 +- .../jedi/parser => parso}/pgen2/pgen.py | 157 +- pythonFiles/parso/python/__init__.py | 0 pythonFiles/parso/python/diff.py | 587 ++++++ pythonFiles/parso/python/errors.py | 1053 +++++++++++ pythonFiles/parso/python/fstring.py | 211 +++ .../python/grammar26.txt} | 108 +- .../python/grammar27.txt} | 82 +- .../python/grammar33.txt} | 6 +- .../python/grammar34.txt} | 10 +- .../python/grammar35.txt} | 8 +- .../python/grammar36.txt} | 17 +- pythonFiles/parso/python/grammar37.txt | 150 ++ pythonFiles/parso/python/issue_list.txt | 176 ++ pythonFiles/parso/python/parser.py | 261 +++ pythonFiles/parso/python/pep8.py | 727 +++++++ pythonFiles/parso/python/prefix.py | 97 + .../jedi/parser => parso/python}/token.py | 46 +- pythonFiles/parso/python/tokenize.py | 420 +++++ pythonFiles/parso/python/tree.py | 1165 ++++++++++++ pythonFiles/parso/tree.py | 364 ++++ pythonFiles/parso/utils.py | 156 ++ pythonFiles/preview/jedi/__init__.py | 43 - pythonFiles/preview/jedi/_compatibility.py | 267 --- pythonFiles/preview/jedi/api/__init__.py | 467 ----- pythonFiles/preview/jedi/api/classes.py | 764 -------- pythonFiles/preview/jedi/api/completion.py | 256 --- pythonFiles/preview/jedi/api/helpers.py | 315 ---- pythonFiles/preview/jedi/api/interpreter.py | 67 - pythonFiles/preview/jedi/api/keywords.py | 132 -- pythonFiles/preview/jedi/api/usages.py | 49 - pythonFiles/preview/jedi/cache.py | 127 -- pythonFiles/preview/jedi/common.py | 169 -- pythonFiles/preview/jedi/debug.py | 133 -- pythonFiles/preview/jedi/evaluate/__init__.py | 536 ------ pythonFiles/preview/jedi/evaluate/analysis.py | 216 --- pythonFiles/preview/jedi/evaluate/cache.py | 58 - .../jedi/evaluate/compiled/__init__.py | 544 ------ .../preview/jedi/evaluate/compiled/fake.py | 203 -- .../jedi/evaluate/compiled/fake/_weakref.pym | 8 - .../jedi/evaluate/compiled/fake/builtins.pym | 262 --- .../jedi/evaluate/compiled/fake/io.pym | 6 - .../preview/jedi/evaluate/compiled/mixed.py | 158 -- .../preview/jedi/evaluate/docstrings.py | 204 -- pythonFiles/preview/jedi/evaluate/dynamic.py | 149 -- pythonFiles/preview/jedi/evaluate/finder.py | 632 ------- .../preview/jedi/evaluate/flow_analysis.py | 91 - pythonFiles/preview/jedi/evaluate/helpers.py | 200 -- pythonFiles/preview/jedi/evaluate/imports.py | 517 ----- pythonFiles/preview/jedi/evaluate/iterable.py | 863 --------- pythonFiles/preview/jedi/evaluate/param.py | 438 ----- pythonFiles/preview/jedi/evaluate/pep0484.py | 195 -- .../preview/jedi/evaluate/precedence.py | 178 -- .../preview/jedi/evaluate/recursion.py | 157 -- .../preview/jedi/evaluate/representation.py | 974 ---------- pythonFiles/preview/jedi/evaluate/stdlib.py | 280 --- pythonFiles/preview/jedi/evaluate/sys_path.py | 283 --- pythonFiles/preview/jedi/parser/__init__.py | 409 ---- pythonFiles/preview/jedi/parser/fast.py | 621 ------ pythonFiles/preview/jedi/parser/pgen2/pgen.py | 394 ---- pythonFiles/preview/jedi/parser/tokenize.py | 329 ---- pythonFiles/preview/jedi/parser/tree.py | 1663 ----------------- pythonFiles/preview/jedi/parser/utils.py | 198 -- pythonFiles/preview/jedi/refactoring.py | 202 -- pythonFiles/preview/jedi/settings.py | 233 --- pythonFiles/preview/jedi/utils.py | 131 -- pythonFiles/release/jedi/__main__.py | 48 - pythonFiles/release/jedi/api/replstartup.py | 27 - .../evaluate/compiled/fake/_functools.pym | 9 - .../jedi/evaluate/compiled/fake/_sqlite3.pym | 26 - .../jedi/evaluate/compiled/fake/_sre.pym | 99 - .../jedi/evaluate/compiled/fake/datetime.pym | 4 - .../jedi/evaluate/compiled/fake/posix.pym | 5 - .../release/jedi/evaluate/jedi_typing.py | 100 - pythonFiles/release/jedi/evaluate/site.py | 110 -- pythonFiles/release/jedi/parser/__init__.py | 395 ---- pythonFiles/release/jedi/parser/fast.py | 580 ------ .../release/jedi/parser/pgen2/__init__.py | 8 - .../release/jedi/parser/pgen2/grammar.py | 125 -- .../release/jedi/parser/pgen2/parse.py | 205 -- pythonFiles/release/jedi/parser/token.py | 84 - pythonFiles/release/jedi/parser/tokenize.py | 290 --- pythonFiles/release/jedi/parser/tree.py | 1222 ------------ .../release/jedi/parser/user_context.py | 339 ---- src/client/language/tokenizer.ts | 6 +- src/client/providers/completionProvider.ts | 3 +- src/client/providers/itemInfoSource.ts | 56 +- src/client/providers/jediProxy.ts | 10 +- src/test/autocomplete/base.test.ts | 3 +- src/test/definitions/hover.test.ts | 10 +- src/test/format/extension.format.test.ts | 4 +- src/test/language/tokenizer.test.ts | 6 +- src/test/markdown/restTextConverter.test.ts | 19 +- src/test/pythonFiles/autocomp/hoverTest.py | 6 +- src/test/textUtils.ts | 21 + 168 files changed, 6575 insertions(+), 18204 deletions(-) rename pythonFiles/{release => }/jedi/__init__.py (100%) mode change 100755 => 100644 rename pythonFiles/{preview => }/jedi/__main__.py (100%) rename pythonFiles/{release => }/jedi/_compatibility.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/api/__init__.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/api/classes.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/api/completion.py (100%) rename pythonFiles/{release => }/jedi/api/helpers.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/api/interpreter.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/api/keywords.py (100%) mode change 100755 => 100644 rename pythonFiles/{preview => }/jedi/api/replstartup.py (100%) rename pythonFiles/{release => }/jedi/cache.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/common/__init__.py (100%) rename pythonFiles/{release => }/jedi/common/context.py (100%) rename pythonFiles/{release => }/jedi/debug.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/__init__.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/analysis.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/arguments.py (100%) rename pythonFiles/{release => }/jedi/evaluate/base_context.py (100%) rename pythonFiles/{release => }/jedi/evaluate/cache.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/compiled/__init__.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/compiled/fake.py (100%) mode change 100755 => 100644 rename pythonFiles/{preview => }/jedi/evaluate/compiled/fake/_functools.pym (100%) rename pythonFiles/{preview => }/jedi/evaluate/compiled/fake/_sqlite3.pym (100%) rename pythonFiles/{preview => }/jedi/evaluate/compiled/fake/_sre.pym (100%) rename pythonFiles/{release => }/jedi/evaluate/compiled/fake/_weakref.pym (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/compiled/fake/builtins.pym (100%) mode change 100755 => 100644 rename pythonFiles/{preview => }/jedi/evaluate/compiled/fake/datetime.pym (100%) rename pythonFiles/{release => }/jedi/evaluate/compiled/fake/io.pym (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/compiled/fake/operator.pym (100%) rename pythonFiles/{preview => }/jedi/evaluate/compiled/fake/posix.pym (100%) rename pythonFiles/{release => }/jedi/evaluate/compiled/getattr_static.py (100%) rename pythonFiles/{release => }/jedi/evaluate/compiled/mixed.py (100%) rename pythonFiles/{release => }/jedi/evaluate/context/__init__.py (100%) rename pythonFiles/{release => }/jedi/evaluate/context/function.py (100%) rename pythonFiles/{release => }/jedi/evaluate/context/instance.py (100%) rename pythonFiles/{release => }/jedi/evaluate/context/iterable.py (100%) rename pythonFiles/{release => }/jedi/evaluate/context/klass.py (100%) rename pythonFiles/{release => }/jedi/evaluate/context/module.py (100%) rename pythonFiles/{release => }/jedi/evaluate/context/namespace.py (100%) rename pythonFiles/{release => }/jedi/evaluate/docstrings.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/dynamic.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/filters.py (100%) rename pythonFiles/{release => }/jedi/evaluate/finder.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/flow_analysis.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/helpers.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/imports.py (100%) mode change 100755 => 100644 rename pythonFiles/{preview => }/jedi/evaluate/jedi_typing.py (100%) rename pythonFiles/{release => }/jedi/evaluate/lazy_context.py (100%) rename pythonFiles/{release => }/jedi/evaluate/param.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/parser_cache.py (100%) rename pythonFiles/{release => }/jedi/evaluate/pep0484.py (100%) rename pythonFiles/{release => }/jedi/evaluate/project.py (100%) rename pythonFiles/{release => }/jedi/evaluate/recursion.py (100%) mode change 100755 => 100644 rename pythonFiles/{preview => }/jedi/evaluate/site.py (100%) rename pythonFiles/{release => }/jedi/evaluate/stdlib.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/syntax_tree.py (100%) rename pythonFiles/{release => }/jedi/evaluate/sys_path.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/evaluate/usages.py (100%) rename pythonFiles/{release => }/jedi/evaluate/utils.py (100%) rename pythonFiles/{release => }/jedi/parser_utils.py (100%) rename pythonFiles/{release => }/jedi/refactoring.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/settings.py (100%) mode change 100755 => 100644 rename pythonFiles/{release => }/jedi/utils.py (100%) mode change 100755 => 100644 create mode 100644 pythonFiles/parso/__init__.py create mode 100644 pythonFiles/parso/_compatibility.py create mode 100644 pythonFiles/parso/cache.py create mode 100644 pythonFiles/parso/grammar.py create mode 100644 pythonFiles/parso/normalizer.py create mode 100644 pythonFiles/parso/parser.py rename pythonFiles/{preview/jedi/parser => parso}/pgen2/__init__.py (100%) rename pythonFiles/{preview/jedi/parser => parso}/pgen2/grammar.py (97%) rename pythonFiles/{preview/jedi/parser => parso}/pgen2/parse.py (87%) rename pythonFiles/{release/jedi/parser => parso}/pgen2/pgen.py (76%) mode change 100755 => 100644 create mode 100644 pythonFiles/parso/python/__init__.py create mode 100644 pythonFiles/parso/python/diff.py create mode 100644 pythonFiles/parso/python/errors.py create mode 100644 pythonFiles/parso/python/fstring.py rename pythonFiles/{release/jedi/parser/grammar2.7.txt => parso/python/grammar26.txt} (60%) mode change 100755 => 100644 rename pythonFiles/{preview/jedi/parser/grammar2.7.txt => parso/python/grammar27.txt} (65%) rename pythonFiles/{release/jedi/parser/grammar3.4.txt => parso/python/grammar33.txt} (96%) mode change 100755 => 100644 rename pythonFiles/{preview/jedi/parser/grammar3.4.txt => parso/python/grammar34.txt} (95%) rename pythonFiles/{preview/jedi/parser/grammar3.5.txt => parso/python/grammar35.txt} (96%) rename pythonFiles/{preview/jedi/parser/grammar3.6.txt => parso/python/grammar36.txt} (91%) create mode 100644 pythonFiles/parso/python/grammar37.txt create mode 100644 pythonFiles/parso/python/issue_list.txt create mode 100644 pythonFiles/parso/python/parser.py create mode 100644 pythonFiles/parso/python/pep8.py create mode 100644 pythonFiles/parso/python/prefix.py rename pythonFiles/{preview/jedi/parser => parso/python}/token.py (56%) create mode 100644 pythonFiles/parso/python/tokenize.py create mode 100644 pythonFiles/parso/python/tree.py create mode 100644 pythonFiles/parso/tree.py create mode 100644 pythonFiles/parso/utils.py delete mode 100644 pythonFiles/preview/jedi/__init__.py delete mode 100644 pythonFiles/preview/jedi/_compatibility.py delete mode 100644 pythonFiles/preview/jedi/api/__init__.py delete mode 100644 pythonFiles/preview/jedi/api/classes.py delete mode 100644 pythonFiles/preview/jedi/api/completion.py delete mode 100644 pythonFiles/preview/jedi/api/helpers.py delete mode 100644 pythonFiles/preview/jedi/api/interpreter.py delete mode 100644 pythonFiles/preview/jedi/api/keywords.py delete mode 100644 pythonFiles/preview/jedi/api/usages.py delete mode 100644 pythonFiles/preview/jedi/cache.py delete mode 100644 pythonFiles/preview/jedi/common.py delete mode 100644 pythonFiles/preview/jedi/debug.py delete mode 100644 pythonFiles/preview/jedi/evaluate/__init__.py delete mode 100644 pythonFiles/preview/jedi/evaluate/analysis.py delete mode 100644 pythonFiles/preview/jedi/evaluate/cache.py delete mode 100644 pythonFiles/preview/jedi/evaluate/compiled/__init__.py delete mode 100644 pythonFiles/preview/jedi/evaluate/compiled/fake.py delete mode 100644 pythonFiles/preview/jedi/evaluate/compiled/fake/_weakref.pym delete mode 100644 pythonFiles/preview/jedi/evaluate/compiled/fake/builtins.pym delete mode 100644 pythonFiles/preview/jedi/evaluate/compiled/fake/io.pym delete mode 100644 pythonFiles/preview/jedi/evaluate/compiled/mixed.py delete mode 100644 pythonFiles/preview/jedi/evaluate/docstrings.py delete mode 100644 pythonFiles/preview/jedi/evaluate/dynamic.py delete mode 100644 pythonFiles/preview/jedi/evaluate/finder.py delete mode 100644 pythonFiles/preview/jedi/evaluate/flow_analysis.py delete mode 100644 pythonFiles/preview/jedi/evaluate/helpers.py delete mode 100644 pythonFiles/preview/jedi/evaluate/imports.py delete mode 100644 pythonFiles/preview/jedi/evaluate/iterable.py delete mode 100644 pythonFiles/preview/jedi/evaluate/param.py delete mode 100644 pythonFiles/preview/jedi/evaluate/pep0484.py delete mode 100644 pythonFiles/preview/jedi/evaluate/precedence.py delete mode 100644 pythonFiles/preview/jedi/evaluate/recursion.py delete mode 100644 pythonFiles/preview/jedi/evaluate/representation.py delete mode 100644 pythonFiles/preview/jedi/evaluate/stdlib.py delete mode 100644 pythonFiles/preview/jedi/evaluate/sys_path.py delete mode 100644 pythonFiles/preview/jedi/parser/__init__.py delete mode 100644 pythonFiles/preview/jedi/parser/fast.py delete mode 100644 pythonFiles/preview/jedi/parser/pgen2/pgen.py delete mode 100644 pythonFiles/preview/jedi/parser/tokenize.py delete mode 100644 pythonFiles/preview/jedi/parser/tree.py delete mode 100644 pythonFiles/preview/jedi/parser/utils.py delete mode 100644 pythonFiles/preview/jedi/refactoring.py delete mode 100644 pythonFiles/preview/jedi/settings.py delete mode 100644 pythonFiles/preview/jedi/utils.py delete mode 100755 pythonFiles/release/jedi/__main__.py delete mode 100755 pythonFiles/release/jedi/api/replstartup.py delete mode 100755 pythonFiles/release/jedi/evaluate/compiled/fake/_functools.pym delete mode 100755 pythonFiles/release/jedi/evaluate/compiled/fake/_sqlite3.pym delete mode 100755 pythonFiles/release/jedi/evaluate/compiled/fake/_sre.pym delete mode 100755 pythonFiles/release/jedi/evaluate/compiled/fake/datetime.pym delete mode 100755 pythonFiles/release/jedi/evaluate/compiled/fake/posix.pym delete mode 100644 pythonFiles/release/jedi/evaluate/jedi_typing.py delete mode 100644 pythonFiles/release/jedi/evaluate/site.py delete mode 100755 pythonFiles/release/jedi/parser/__init__.py delete mode 100755 pythonFiles/release/jedi/parser/fast.py delete mode 100755 pythonFiles/release/jedi/parser/pgen2/__init__.py delete mode 100755 pythonFiles/release/jedi/parser/pgen2/grammar.py delete mode 100755 pythonFiles/release/jedi/parser/pgen2/parse.py delete mode 100755 pythonFiles/release/jedi/parser/token.py delete mode 100755 pythonFiles/release/jedi/parser/tokenize.py delete mode 100755 pythonFiles/release/jedi/parser/tree.py delete mode 100755 pythonFiles/release/jedi/parser/user_context.py diff --git a/.vscode/settings.json b/.vscode/settings.json index 17a82485a5ad..33a5c99438be 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -15,9 +15,13 @@ }, "typescript.tsdk": "./node_modules/typescript/lib", // we want to use the TS server from our node_modules folder to control its version "tslint.enable": true, - "python.linting.enabled": false, + "python.linting.enabled": true, "python.unitTest.promptToConfigure": false, "python.workspaceSymbols.enabled": false, "python.formatting.provider": "none", - "files.insertFinalNewline": true + "files.insertFinalNewline": true, + "python.linting.pep8Enabled": false, + "python.linting.prospectorEnabled": false, + "python.linting.pydocstyleEnabled": false, + "python.linting.pylintEnabled": true } diff --git a/pythonFiles/completion.py b/pythonFiles/completion.py index 4a0f915fd145..7a740f70a635 100644 --- a/pythonFiles/completion.py +++ b/pythonFiles/completion.py @@ -15,7 +15,7 @@ def __init__(self, new_stdout=None): def __enter__(self): sys.stdout.flush() - oldstdout_fno = self.oldstdout_fno = os.dup(sys.stdout.fileno()) + self.oldstdout_fno = os.dup(sys.stdout.fileno()) os.dup2(self._new_stdout.fileno(), 1) def __exit__(self, exc_type, exc_value, traceback): @@ -47,7 +47,6 @@ def __init__(self): self.drive_mount = '' def _get_definition_type(self, definition): - is_built_in = definition.in_builtin_module # if definition.type not in ['import', 'keyword'] and is_built_in(): # return 'builtin' try: @@ -89,7 +88,7 @@ def _generate_signature(self, completion): return '' return '%s(%s)' % ( completion.name, - ', '.join(p.description for p in completion.params if p)) + ', '.join(p.description[6:] for p in completion.params if p)) def _get_call_signatures(self, script): """Extract call signatures from jedi.api.Script object in failsafe way. @@ -108,18 +107,28 @@ def _get_call_signatures(self, script): for pos, param in enumerate(signature.params): if not param.name: continue + + name = self._get_param_name(param) if param.name == 'self' and pos == 0: continue - try: - name, value = param.description.split('=') - except ValueError: - name = param.description - value = None if name.startswith('*'): continue + + value = self._get_param_value(param) _signatures.append((signature, name, value)) return _signatures + def _get_param_name(self, p): + if(p.name.startswith('param ')): + return p.name[6:] # drop leading 'param ' + return p.name + + def _get_param_value(self, p): + pair = p.description.split('=') + if(len(pair) > 1): + return pair[1] + return None + def _get_call_signatures_with_args(self, script): """Extract call signatures from jedi.api.Script object in failsafe way. @@ -150,16 +159,12 @@ def _get_call_signatures_with_args(self, script): for pos, param in enumerate(signature.params): if not param.name: continue + + name = self._get_param_name(param) if param.name == 'self' and pos == 0: continue - try: - name, value = param.description.split('=') - except ValueError: - name = param.description - value = None - # if name.startswith('*'): - # continue - #_signatures.append((signature, name, value)) + + value = self._get_param_value(param) paramDocstring = '' try: paramDocstring = param.docstring() @@ -251,8 +256,7 @@ def _serialize_methods(self, script, identifier=None, prefix=''): for completion in completions: params = [] if hasattr(completion, 'params'): - params = [p.description for p in completion.params - if ARGUMENT_RE.match(p.description)] + params = [p.description for p in completion.params if p] if completion.parent().type == 'class': _methods.append({ 'parent': completion.parent().name, @@ -288,50 +292,8 @@ def _top_definition(self, definition): return d return definition - def _extract_range_jedi_0_9_0(self, definition): - from jedi import common - from jedi.parser.utils import load_parser - # get the scope range - try: - if definition.type in ['class', 'function'] and hasattr(definition, '_definition'): - scope = definition._definition - start_line = scope.start_pos[0] - 1 - start_column = scope.start_pos[1] - end_line = scope.end_pos[0] - 1 - end_column = scope.end_pos[1] - # get the lines - path = definition._definition.get_parent_until().path - parser = load_parser(path) - lines = common.splitlines(parser.source) - lines[end_line] = lines[end_line][:end_column] - # trim the lines - lines = lines[start_line:end_line + 1] - lines = '\n'.join(lines).rstrip().split('\n') - end_line = start_line + len(lines) - 1 - end_column = len(lines[-1]) - 1 - else: - symbol = definition._name - start_line = symbol.start_pos[0] - 1 - start_column = symbol.start_pos[1] - end_line = symbol.end_pos[0] - 1 - end_column = symbol.end_pos[1] - return { - 'start_line': start_line, - 'start_column': start_column, - 'end_line': end_line, - 'end_column': end_column - } - except Exception as e: - return { - 'start_line': definition.line - 1, - 'start_column': definition.column, - 'end_line': definition.line - 1, - 'end_column': definition.column - } - - def _extract_range_jedi_0_10_1(self, definition): - from jedi import common - from jedi.parser.python import parse + def _extract_range_jedi_0_11_1(self, definition): + from parso.utils import split_lines # get the scope range try: if definition.type in ['class', 'function']: @@ -341,7 +303,7 @@ def _extract_range_jedi_0_10_1(self, definition): start_column = scope.start_pos[1] # get the lines code = scope.get_code(include_prefix=False) - lines = common.splitlines(code) + lines = split_lines(code) # trim the lines lines = '\n'.join(lines).rstrip().split('\n') end_line = start_line + len(lines) - 1 @@ -380,10 +342,7 @@ def _extract_range(self, definition): last character of actual code. That's why we extract the lines that make up our scope and trim the trailing whitespace. """ - if jedi.__version__ in ('0.9.0', '0.10.0'): - return self._extract_range_jedi_0_9_0(definition) - else: - return self._extract_range_jedi_0_10_1(definition) + return self._extract_range_jedi_0_11_1(definition) def _get_definitionsx(self, definitions, identifier=None, ignoreNoModulePath=False): """Serialize response to be read from VSCode. @@ -680,20 +639,15 @@ def watch(self): if __name__ == '__main__': cachePrefix = 'v' modulesToLoad = '' - if len(sys.argv) > 0 and sys.argv[1] == 'preview': - jediPath = os.path.join(os.path.dirname(__file__), 'preview') - jediPreview = True - if len(sys.argv) > 2: - modulesToLoad = sys.argv[2] - elif len(sys.argv) > 0 and sys.argv[1] == 'custom': + if len(sys.argv) > 0 and sys.argv[1] == 'custom': jediPath = sys.argv[2] jediPreview = True cachePrefix = 'custom_v' if len(sys.argv) > 3: modulesToLoad = sys.argv[3] else: - #std - jediPath = os.path.join(os.path.dirname(__file__), 'release') + #release + jediPath = os.path.dirname(__file__) if len(sys.argv) > 2: modulesToLoad = sys.argv[2] diff --git a/pythonFiles/release/jedi/__init__.py b/pythonFiles/jedi/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/__init__.py rename to pythonFiles/jedi/__init__.py diff --git a/pythonFiles/preview/jedi/__main__.py b/pythonFiles/jedi/__main__.py similarity index 100% rename from pythonFiles/preview/jedi/__main__.py rename to pythonFiles/jedi/__main__.py diff --git a/pythonFiles/release/jedi/_compatibility.py b/pythonFiles/jedi/_compatibility.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/_compatibility.py rename to pythonFiles/jedi/_compatibility.py diff --git a/pythonFiles/release/jedi/api/__init__.py b/pythonFiles/jedi/api/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/api/__init__.py rename to pythonFiles/jedi/api/__init__.py diff --git a/pythonFiles/release/jedi/api/classes.py b/pythonFiles/jedi/api/classes.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/api/classes.py rename to pythonFiles/jedi/api/classes.py diff --git a/pythonFiles/release/jedi/api/completion.py b/pythonFiles/jedi/api/completion.py similarity index 100% rename from pythonFiles/release/jedi/api/completion.py rename to pythonFiles/jedi/api/completion.py diff --git a/pythonFiles/release/jedi/api/helpers.py b/pythonFiles/jedi/api/helpers.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/api/helpers.py rename to pythonFiles/jedi/api/helpers.py diff --git a/pythonFiles/release/jedi/api/interpreter.py b/pythonFiles/jedi/api/interpreter.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/api/interpreter.py rename to pythonFiles/jedi/api/interpreter.py diff --git a/pythonFiles/release/jedi/api/keywords.py b/pythonFiles/jedi/api/keywords.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/api/keywords.py rename to pythonFiles/jedi/api/keywords.py diff --git a/pythonFiles/preview/jedi/api/replstartup.py b/pythonFiles/jedi/api/replstartup.py similarity index 100% rename from pythonFiles/preview/jedi/api/replstartup.py rename to pythonFiles/jedi/api/replstartup.py diff --git a/pythonFiles/release/jedi/cache.py b/pythonFiles/jedi/cache.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/cache.py rename to pythonFiles/jedi/cache.py diff --git a/pythonFiles/release/jedi/common/__init__.py b/pythonFiles/jedi/common/__init__.py similarity index 100% rename from pythonFiles/release/jedi/common/__init__.py rename to pythonFiles/jedi/common/__init__.py diff --git a/pythonFiles/release/jedi/common/context.py b/pythonFiles/jedi/common/context.py similarity index 100% rename from pythonFiles/release/jedi/common/context.py rename to pythonFiles/jedi/common/context.py diff --git a/pythonFiles/release/jedi/debug.py b/pythonFiles/jedi/debug.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/debug.py rename to pythonFiles/jedi/debug.py diff --git a/pythonFiles/release/jedi/evaluate/__init__.py b/pythonFiles/jedi/evaluate/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/__init__.py rename to pythonFiles/jedi/evaluate/__init__.py diff --git a/pythonFiles/release/jedi/evaluate/analysis.py b/pythonFiles/jedi/evaluate/analysis.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/analysis.py rename to pythonFiles/jedi/evaluate/analysis.py diff --git a/pythonFiles/release/jedi/evaluate/arguments.py b/pythonFiles/jedi/evaluate/arguments.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/arguments.py rename to pythonFiles/jedi/evaluate/arguments.py diff --git a/pythonFiles/release/jedi/evaluate/base_context.py b/pythonFiles/jedi/evaluate/base_context.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/base_context.py rename to pythonFiles/jedi/evaluate/base_context.py diff --git a/pythonFiles/release/jedi/evaluate/cache.py b/pythonFiles/jedi/evaluate/cache.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/cache.py rename to pythonFiles/jedi/evaluate/cache.py diff --git a/pythonFiles/release/jedi/evaluate/compiled/__init__.py b/pythonFiles/jedi/evaluate/compiled/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/compiled/__init__.py rename to pythonFiles/jedi/evaluate/compiled/__init__.py diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake.py b/pythonFiles/jedi/evaluate/compiled/fake.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/compiled/fake.py rename to pythonFiles/jedi/evaluate/compiled/fake.py diff --git a/pythonFiles/preview/jedi/evaluate/compiled/fake/_functools.pym b/pythonFiles/jedi/evaluate/compiled/fake/_functools.pym similarity index 100% rename from pythonFiles/preview/jedi/evaluate/compiled/fake/_functools.pym rename to pythonFiles/jedi/evaluate/compiled/fake/_functools.pym diff --git a/pythonFiles/preview/jedi/evaluate/compiled/fake/_sqlite3.pym b/pythonFiles/jedi/evaluate/compiled/fake/_sqlite3.pym similarity index 100% rename from pythonFiles/preview/jedi/evaluate/compiled/fake/_sqlite3.pym rename to pythonFiles/jedi/evaluate/compiled/fake/_sqlite3.pym diff --git a/pythonFiles/preview/jedi/evaluate/compiled/fake/_sre.pym b/pythonFiles/jedi/evaluate/compiled/fake/_sre.pym similarity index 100% rename from pythonFiles/preview/jedi/evaluate/compiled/fake/_sre.pym rename to pythonFiles/jedi/evaluate/compiled/fake/_sre.pym diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym b/pythonFiles/jedi/evaluate/compiled/fake/_weakref.pym old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym rename to pythonFiles/jedi/evaluate/compiled/fake/_weakref.pym diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym b/pythonFiles/jedi/evaluate/compiled/fake/builtins.pym old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym rename to pythonFiles/jedi/evaluate/compiled/fake/builtins.pym diff --git a/pythonFiles/preview/jedi/evaluate/compiled/fake/datetime.pym b/pythonFiles/jedi/evaluate/compiled/fake/datetime.pym similarity index 100% rename from pythonFiles/preview/jedi/evaluate/compiled/fake/datetime.pym rename to pythonFiles/jedi/evaluate/compiled/fake/datetime.pym diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/io.pym b/pythonFiles/jedi/evaluate/compiled/fake/io.pym old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/compiled/fake/io.pym rename to pythonFiles/jedi/evaluate/compiled/fake/io.pym diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym b/pythonFiles/jedi/evaluate/compiled/fake/operator.pym similarity index 100% rename from pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym rename to pythonFiles/jedi/evaluate/compiled/fake/operator.pym diff --git a/pythonFiles/preview/jedi/evaluate/compiled/fake/posix.pym b/pythonFiles/jedi/evaluate/compiled/fake/posix.pym similarity index 100% rename from pythonFiles/preview/jedi/evaluate/compiled/fake/posix.pym rename to pythonFiles/jedi/evaluate/compiled/fake/posix.pym diff --git a/pythonFiles/release/jedi/evaluate/compiled/getattr_static.py b/pythonFiles/jedi/evaluate/compiled/getattr_static.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/compiled/getattr_static.py rename to pythonFiles/jedi/evaluate/compiled/getattr_static.py diff --git a/pythonFiles/release/jedi/evaluate/compiled/mixed.py b/pythonFiles/jedi/evaluate/compiled/mixed.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/compiled/mixed.py rename to pythonFiles/jedi/evaluate/compiled/mixed.py diff --git a/pythonFiles/release/jedi/evaluate/context/__init__.py b/pythonFiles/jedi/evaluate/context/__init__.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/context/__init__.py rename to pythonFiles/jedi/evaluate/context/__init__.py diff --git a/pythonFiles/release/jedi/evaluate/context/function.py b/pythonFiles/jedi/evaluate/context/function.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/context/function.py rename to pythonFiles/jedi/evaluate/context/function.py diff --git a/pythonFiles/release/jedi/evaluate/context/instance.py b/pythonFiles/jedi/evaluate/context/instance.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/context/instance.py rename to pythonFiles/jedi/evaluate/context/instance.py diff --git a/pythonFiles/release/jedi/evaluate/context/iterable.py b/pythonFiles/jedi/evaluate/context/iterable.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/context/iterable.py rename to pythonFiles/jedi/evaluate/context/iterable.py diff --git a/pythonFiles/release/jedi/evaluate/context/klass.py b/pythonFiles/jedi/evaluate/context/klass.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/context/klass.py rename to pythonFiles/jedi/evaluate/context/klass.py diff --git a/pythonFiles/release/jedi/evaluate/context/module.py b/pythonFiles/jedi/evaluate/context/module.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/context/module.py rename to pythonFiles/jedi/evaluate/context/module.py diff --git a/pythonFiles/release/jedi/evaluate/context/namespace.py b/pythonFiles/jedi/evaluate/context/namespace.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/context/namespace.py rename to pythonFiles/jedi/evaluate/context/namespace.py diff --git a/pythonFiles/release/jedi/evaluate/docstrings.py b/pythonFiles/jedi/evaluate/docstrings.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/docstrings.py rename to pythonFiles/jedi/evaluate/docstrings.py diff --git a/pythonFiles/release/jedi/evaluate/dynamic.py b/pythonFiles/jedi/evaluate/dynamic.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/dynamic.py rename to pythonFiles/jedi/evaluate/dynamic.py diff --git a/pythonFiles/release/jedi/evaluate/filters.py b/pythonFiles/jedi/evaluate/filters.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/filters.py rename to pythonFiles/jedi/evaluate/filters.py diff --git a/pythonFiles/release/jedi/evaluate/finder.py b/pythonFiles/jedi/evaluate/finder.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/finder.py rename to pythonFiles/jedi/evaluate/finder.py diff --git a/pythonFiles/release/jedi/evaluate/flow_analysis.py b/pythonFiles/jedi/evaluate/flow_analysis.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/flow_analysis.py rename to pythonFiles/jedi/evaluate/flow_analysis.py diff --git a/pythonFiles/release/jedi/evaluate/helpers.py b/pythonFiles/jedi/evaluate/helpers.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/helpers.py rename to pythonFiles/jedi/evaluate/helpers.py diff --git a/pythonFiles/release/jedi/evaluate/imports.py b/pythonFiles/jedi/evaluate/imports.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/imports.py rename to pythonFiles/jedi/evaluate/imports.py diff --git a/pythonFiles/preview/jedi/evaluate/jedi_typing.py b/pythonFiles/jedi/evaluate/jedi_typing.py similarity index 100% rename from pythonFiles/preview/jedi/evaluate/jedi_typing.py rename to pythonFiles/jedi/evaluate/jedi_typing.py diff --git a/pythonFiles/release/jedi/evaluate/lazy_context.py b/pythonFiles/jedi/evaluate/lazy_context.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/lazy_context.py rename to pythonFiles/jedi/evaluate/lazy_context.py diff --git a/pythonFiles/release/jedi/evaluate/param.py b/pythonFiles/jedi/evaluate/param.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/param.py rename to pythonFiles/jedi/evaluate/param.py diff --git a/pythonFiles/release/jedi/evaluate/parser_cache.py b/pythonFiles/jedi/evaluate/parser_cache.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/parser_cache.py rename to pythonFiles/jedi/evaluate/parser_cache.py diff --git a/pythonFiles/release/jedi/evaluate/pep0484.py b/pythonFiles/jedi/evaluate/pep0484.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/pep0484.py rename to pythonFiles/jedi/evaluate/pep0484.py diff --git a/pythonFiles/release/jedi/evaluate/project.py b/pythonFiles/jedi/evaluate/project.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/project.py rename to pythonFiles/jedi/evaluate/project.py diff --git a/pythonFiles/release/jedi/evaluate/recursion.py b/pythonFiles/jedi/evaluate/recursion.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/recursion.py rename to pythonFiles/jedi/evaluate/recursion.py diff --git a/pythonFiles/preview/jedi/evaluate/site.py b/pythonFiles/jedi/evaluate/site.py similarity index 100% rename from pythonFiles/preview/jedi/evaluate/site.py rename to pythonFiles/jedi/evaluate/site.py diff --git a/pythonFiles/release/jedi/evaluate/stdlib.py b/pythonFiles/jedi/evaluate/stdlib.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/stdlib.py rename to pythonFiles/jedi/evaluate/stdlib.py diff --git a/pythonFiles/release/jedi/evaluate/syntax_tree.py b/pythonFiles/jedi/evaluate/syntax_tree.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/syntax_tree.py rename to pythonFiles/jedi/evaluate/syntax_tree.py diff --git a/pythonFiles/release/jedi/evaluate/sys_path.py b/pythonFiles/jedi/evaluate/sys_path.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/evaluate/sys_path.py rename to pythonFiles/jedi/evaluate/sys_path.py diff --git a/pythonFiles/release/jedi/evaluate/usages.py b/pythonFiles/jedi/evaluate/usages.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/usages.py rename to pythonFiles/jedi/evaluate/usages.py diff --git a/pythonFiles/release/jedi/evaluate/utils.py b/pythonFiles/jedi/evaluate/utils.py similarity index 100% rename from pythonFiles/release/jedi/evaluate/utils.py rename to pythonFiles/jedi/evaluate/utils.py diff --git a/pythonFiles/release/jedi/parser_utils.py b/pythonFiles/jedi/parser_utils.py similarity index 100% rename from pythonFiles/release/jedi/parser_utils.py rename to pythonFiles/jedi/parser_utils.py diff --git a/pythonFiles/release/jedi/refactoring.py b/pythonFiles/jedi/refactoring.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/refactoring.py rename to pythonFiles/jedi/refactoring.py diff --git a/pythonFiles/release/jedi/settings.py b/pythonFiles/jedi/settings.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/settings.py rename to pythonFiles/jedi/settings.py diff --git a/pythonFiles/release/jedi/utils.py b/pythonFiles/jedi/utils.py old mode 100755 new mode 100644 similarity index 100% rename from pythonFiles/release/jedi/utils.py rename to pythonFiles/jedi/utils.py diff --git a/pythonFiles/parso/__init__.py b/pythonFiles/parso/__init__.py new file mode 100644 index 000000000000..f0a0fc4f5015 --- /dev/null +++ b/pythonFiles/parso/__init__.py @@ -0,0 +1,58 @@ +r""" +Parso is a Python parser that supports error recovery and round-trip parsing +for different Python versions (in multiple Python versions). Parso is also able +to list multiple syntax errors in your python file. + +Parso has been battle-tested by jedi_. It was pulled out of jedi to be useful +for other projects as well. + +Parso consists of a small API to parse Python and analyse the syntax tree. + +.. _jedi: https://github.com/davidhalter/jedi + +A simple example: + +>>> import parso +>>> module = parso.parse('hello + 1', version="3.6") +>>> expr = module.children[0] +>>> expr +PythonNode(arith_expr, [, , ]) +>>> print(expr.get_code()) +hello + 1 +>>> name = expr.children[0] +>>> name + +>>> name.end_pos +(1, 5) +>>> expr.end_pos +(1, 9) + +To list multiple issues: + +>>> grammar = parso.load_grammar() +>>> module = grammar.parse('foo +\nbar\ncontinue') +>>> error1, error2 = grammar.iter_errors(module) +>>> error1.message +'SyntaxError: invalid syntax' +>>> error2.message +"SyntaxError: 'continue' not properly in loop" +""" + +from parso.parser import ParserSyntaxError +from parso.grammar import Grammar, load_grammar +from parso.utils import split_lines, python_bytes_to_unicode + + +__version__ = '0.1.1' + + +def parse(code=None, **kwargs): + """ + A utility function to avoid loading grammars. + Params are documented in :py:meth:`parso.Grammar.parse`. + + :param str version: The version used by :py:func:`parso.load_grammar`. + """ + version = kwargs.pop('version', None) + grammar = load_grammar(version=version) + return grammar.parse(code, **kwargs) diff --git a/pythonFiles/parso/_compatibility.py b/pythonFiles/parso/_compatibility.py new file mode 100644 index 000000000000..9ddf23dc6786 --- /dev/null +++ b/pythonFiles/parso/_compatibility.py @@ -0,0 +1,103 @@ +""" +To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been +created. Clearly there is huge need to use conforming syntax. +""" +import sys +import platform + +# Cannot use sys.version.major and minor names, because in Python 2.6 it's not +# a namedtuple. +py_version = int(str(sys.version_info[0]) + str(sys.version_info[1])) + +# unicode function +try: + unicode = unicode +except NameError: + unicode = str + +is_pypy = platform.python_implementation() == 'PyPy' + + +def use_metaclass(meta, *bases): + """ Create a class with a metaclass. """ + if not bases: + bases = (object,) + return meta("HackClass", bases, {}) + + +try: + encoding = sys.stdout.encoding + if encoding is None: + encoding = 'utf-8' +except AttributeError: + encoding = 'ascii' + + +def u(string): + """Cast to unicode DAMMIT! + Written because Python2 repr always implicitly casts to a string, so we + have to cast back to a unicode (and we now that we always deal with valid + unicode, because we check that in the beginning). + """ + if py_version >= 30: + return str(string) + + if not isinstance(string, unicode): + return unicode(str(string), 'UTF-8') + return string + + +try: + FileNotFoundError = FileNotFoundError +except NameError: + FileNotFoundError = IOError + + +def utf8_repr(func): + """ + ``__repr__`` methods in Python 2 don't allow unicode objects to be + returned. Therefore cast them to utf-8 bytes in this decorator. + """ + def wrapper(self): + result = func(self) + if isinstance(result, unicode): + return result.encode('utf-8') + else: + return result + + if py_version >= 30: + return func + else: + return wrapper + + +try: + from functools import total_ordering +except ImportError: + # Python 2.6 + def total_ordering(cls): + """Class decorator that fills in missing ordering methods""" + convert = { + '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), + ('__le__', lambda self, other: self < other or self == other), + ('__ge__', lambda self, other: not self < other)], + '__le__': [('__ge__', lambda self, other: not self <= other or self == other), + ('__lt__', lambda self, other: self <= other and not self == other), + ('__gt__', lambda self, other: not self <= other)], + '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), + ('__ge__', lambda self, other: self > other or self == other), + ('__le__', lambda self, other: not self > other)], + '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), + ('__gt__', lambda self, other: self >= other and not self == other), + ('__lt__', lambda self, other: not self >= other)] + } + roots = set(dir(cls)) & set(convert) + if not roots: + raise ValueError('must define at least one ordering operation: < > <= >=') + root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ + for opname, opfunc in convert[root]: + if opname not in roots: + opfunc.__name__ = opname + opfunc.__doc__ = getattr(int, opname).__doc__ + setattr(cls, opname, opfunc) + return cls diff --git a/pythonFiles/parso/cache.py b/pythonFiles/parso/cache.py new file mode 100644 index 000000000000..d0465d023086 --- /dev/null +++ b/pythonFiles/parso/cache.py @@ -0,0 +1,162 @@ +import time +import os +import sys +import hashlib +import gc +import shutil +import platform +import errno +import logging + +try: + import cPickle as pickle +except: + import pickle + +from parso._compatibility import FileNotFoundError + +LOG = logging.getLogger(__name__) + + +_PICKLE_VERSION = 30 +""" +Version number (integer) for file system cache. + +Increment this number when there are any incompatible changes in +the parser tree classes. For example, the following changes +are regarded as incompatible. + +- A class name is changed. +- A class is moved to another module. +- A __slot__ of a class is changed. +""" + +_VERSION_TAG = '%s-%s%s-%s' % ( + platform.python_implementation(), + sys.version_info[0], + sys.version_info[1], + _PICKLE_VERSION +) +""" +Short name for distinguish Python implementations and versions. + +It's like `sys.implementation.cache_tag` but for Python < 3.3 +we generate something similar. See: +http://docs.python.org/3/library/sys.html#sys.implementation +""" + +def _get_default_cache_path(): + if platform.system().lower() == 'windows': + dir_ = os.path.join(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso') + elif platform.system().lower() == 'darwin': + dir_ = os.path.join('~', 'Library', 'Caches', 'Parso') + else: + dir_ = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'parso') + return os.path.expanduser(dir_) + +_default_cache_path = _get_default_cache_path() +""" +The path where the cache is stored. + +On Linux, this defaults to ``~/.cache/parso/``, on OS X to +``~/Library/Caches/Parso/`` and on Windows to ``%LOCALAPPDATA%\\Parso\\Parso\\``. +On Linux, if environment variable ``$XDG_CACHE_HOME`` is set, +``$XDG_CACHE_HOME/parso`` is used instead of the default one. +""" + +parser_cache = {} + + +class _NodeCacheItem(object): + def __init__(self, node, lines, change_time=None): + self.node = node + self.lines = lines + if change_time is None: + change_time = time.time() + self.change_time = change_time + + +def load_module(hashed_grammar, path, cache_path=None): + """ + Returns a module or None, if it fails. + """ + try: + p_time = os.path.getmtime(path) + except FileNotFoundError: + return None + + try: + module_cache_item = parser_cache[hashed_grammar][path] + if p_time <= module_cache_item.change_time: + return module_cache_item.node + except KeyError: + return _load_from_file_system(hashed_grammar, path, p_time, cache_path=cache_path) + + +def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None): + cache_path = _get_hashed_path(hashed_grammar, path, cache_path=cache_path) + try: + try: + if p_time > os.path.getmtime(cache_path): + # Cache is outdated + return None + except OSError as e: + if e.errno == errno.ENOENT: + # In Python 2 instead of an IOError here we get an OSError. + raise FileNotFoundError + else: + raise + + with open(cache_path, 'rb') as f: + gc.disable() + try: + module_cache_item = pickle.load(f) + finally: + gc.enable() + except FileNotFoundError: + return None + else: + parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item + LOG.debug('pickle loaded: %s', path) + return module_cache_item.node + + +def save_module(hashed_grammar, path, module, lines, pickling=True, cache_path=None): + try: + p_time = None if path is None else os.path.getmtime(path) + except OSError: + p_time = None + pickling = False + + item = _NodeCacheItem(module, lines, p_time) + parser_cache.setdefault(hashed_grammar, {})[path] = item + if pickling and path is not None: + _save_to_file_system(hashed_grammar, path, item, cache_path=cache_path) + + +def _save_to_file_system(hashed_grammar, path, item, cache_path=None): + with open(_get_hashed_path(hashed_grammar, path, cache_path=cache_path), 'wb') as f: + pickle.dump(item, f, pickle.HIGHEST_PROTOCOL) + + +def clear_cache(cache_path=None): + if cache_path is None: + cache_path = _default_cache_path + shutil.rmtree(cache_path) + parser_cache.clear() + + +def _get_hashed_path(hashed_grammar, path, cache_path=None): + directory = _get_cache_directory_path(cache_path=cache_path) + + file_hash = hashlib.sha256(path.encode("utf-8")).hexdigest() + return os.path.join(directory, '%s-%s.pkl' % (hashed_grammar, file_hash)) + + +def _get_cache_directory_path(cache_path=None): + if cache_path is None: + cache_path = _default_cache_path + directory = os.path.join(cache_path, _VERSION_TAG) + if not os.path.exists(directory): + os.makedirs(directory) + return directory diff --git a/pythonFiles/parso/grammar.py b/pythonFiles/parso/grammar.py new file mode 100644 index 000000000000..2cf26d77fb27 --- /dev/null +++ b/pythonFiles/parso/grammar.py @@ -0,0 +1,283 @@ +import hashlib +import os + +from parso._compatibility import FileNotFoundError, is_pypy +from parso.pgen2.pgen import generate_grammar +from parso.utils import split_lines, python_bytes_to_unicode, parse_version_string +from parso.python.diff import DiffParser +from parso.python.tokenize import tokenize_lines, tokenize +from parso.python import token +from parso.cache import parser_cache, load_module, save_module +from parso.parser import BaseParser +from parso.python.parser import Parser as PythonParser +from parso.python.errors import ErrorFinderConfig +from parso.python import pep8 +from parso.python import fstring + +_loaded_grammars = {} + + +class Grammar(object): + """ + :py:func:`parso.load_grammar` returns instances of this class. + + Creating custom grammars by calling this is not supported, yet. + """ + #:param text: A BNF representation of your grammar. + _error_normalizer_config = None + _token_namespace = None + _default_normalizer_config = pep8.PEP8NormalizerConfig() + + def __init__(self, text, tokenizer, parser=BaseParser, diff_parser=None): + self._pgen_grammar = generate_grammar( + text, + token_namespace=self._get_token_namespace() + ) + self._parser = parser + self._tokenizer = tokenizer + self._diff_parser = diff_parser + self._hashed = hashlib.sha256(text.encode("utf-8")).hexdigest() + + def parse(self, code=None, **kwargs): + """ + If you want to parse a Python file you want to start here, most likely. + + If you need finer grained control over the parsed instance, there will be + other ways to access it. + + :param str code: A unicode or bytes string. When it's not possible to + decode bytes to a string, returns a + :py:class:`UnicodeDecodeError`. + :param bool error_recovery: If enabled, any code will be returned. If + it is invalid, it will be returned as an error node. If disabled, + you will get a ParseError when encountering syntax errors in your + code. + :param str start_symbol: The grammar symbol that you want to parse. Only + allowed to be used when error_recovery is False. + :param str path: The path to the file you want to open. Only needed for caching. + :param bool cache: Keeps a copy of the parser tree in RAM and on disk + if a path is given. Returns the cached trees if the corresponding + files on disk have not changed. + :param bool diff_cache: Diffs the cached python module against the new + code and tries to parse only the parts that have changed. Returns + the same (changed) module that is found in cache. Using this option + requires you to not do anything anymore with the cached modules + under that path, because the contents of it might change. This + option is still somewhat experimental. If you want stability, + please don't use it. + :param bool cache_path: If given saves the parso cache in this + directory. If not given, defaults to the default cache places on + each platform. + + :return: A subclass of :py:class:`parso.tree.NodeOrLeaf`. Typically a + :py:class:`parso.python.tree.Module`. + """ + if 'start_pos' in kwargs: + raise TypeError("parse() got an unexpected keyworda argument.") + return self._parse(code=code, **kwargs) + + def _parse(self, code=None, error_recovery=True, path=None, + start_symbol=None, cache=False, diff_cache=False, + cache_path=None, start_pos=(1, 0)): + """ + Wanted python3.5 * operator and keyword only arguments. Therefore just + wrap it all. + start_pos here is just a parameter internally used. Might be public + sometime in the future. + """ + if code is None and path is None: + raise TypeError("Please provide either code or a path.") + + if start_symbol is None: + start_symbol = self._start_symbol + + if error_recovery and start_symbol != 'file_input': + raise NotImplementedError("This is currently not implemented.") + + if cache and path is not None: + module_node = load_module(self._hashed, path, cache_path=cache_path) + if module_node is not None: + return module_node + + if code is None: + with open(path, 'rb') as f: + code = f.read() + + code = python_bytes_to_unicode(code) + + lines = split_lines(code, keepends=True) + if diff_cache: + if self._diff_parser is None: + raise TypeError("You have to define a diff parser to be able " + "to use this option.") + try: + module_cache_item = parser_cache[self._hashed][path] + except KeyError: + pass + else: + module_node = module_cache_item.node + old_lines = module_cache_item.lines + if old_lines == lines: + return module_node + + new_node = self._diff_parser( + self._pgen_grammar, self._tokenizer, module_node + ).update( + old_lines=old_lines, + new_lines=lines + ) + save_module(self._hashed, path, new_node, lines, + # Never pickle in pypy, it's slow as hell. + pickling=cache and not is_pypy, + cache_path=cache_path) + return new_node + + tokens = self._tokenizer(lines, start_pos) + + p = self._parser( + self._pgen_grammar, + error_recovery=error_recovery, + start_symbol=start_symbol + ) + root_node = p.parse(tokens=tokens) + + if cache or diff_cache: + save_module(self._hashed, path, root_node, lines, + # Never pickle in pypy, it's slow as hell. + pickling=cache and not is_pypy, + cache_path=cache_path) + return root_node + + def _get_token_namespace(self): + ns = self._token_namespace + if ns is None: + raise ValueError("The token namespace should be set.") + return ns + + def iter_errors(self, node): + """ + Given a :py:class:`parso.tree.NodeOrLeaf` returns a generator of + :py:class:`parso.normalizer.Issue` objects. For Python this is + a list of syntax/indentation errors. + """ + if self._error_normalizer_config is None: + raise ValueError("No error normalizer specified for this grammar.") + + return self._get_normalizer_issues(node, self._error_normalizer_config) + + def _get_normalizer(self, normalizer_config): + if normalizer_config is None: + normalizer_config = self._default_normalizer_config + if normalizer_config is None: + raise ValueError("You need to specify a normalizer, because " + "there's no default normalizer for this tree.") + return normalizer_config.create_normalizer(self) + + def _normalize(self, node, normalizer_config=None): + """ + TODO this is not public, yet. + The returned code will be normalized, e.g. PEP8 for Python. + """ + normalizer = self._get_normalizer(normalizer_config) + return normalizer.walk(node) + + def _get_normalizer_issues(self, node, normalizer_config=None): + normalizer = self._get_normalizer(normalizer_config) + normalizer.walk(node) + return normalizer.issues + + + def __repr__(self): + labels = self._pgen_grammar.number2symbol.values() + txt = ' '.join(list(labels)[:3]) + ' ...' + return '<%s:%s>' % (self.__class__.__name__, txt) + + +class PythonGrammar(Grammar): + _error_normalizer_config = ErrorFinderConfig() + _token_namespace = token + _start_symbol = 'file_input' + + def __init__(self, version_info, bnf_text): + super(PythonGrammar, self).__init__( + bnf_text, + tokenizer=self._tokenize_lines, + parser=PythonParser, + diff_parser=DiffParser + ) + self.version_info = version_info + + def _tokenize_lines(self, lines, start_pos): + return tokenize_lines(lines, self.version_info, start_pos=start_pos) + + def _tokenize(self, code): + # Used by Jedi. + return tokenize(code, self.version_info) + + +class PythonFStringGrammar(Grammar): + _token_namespace = fstring.TokenNamespace + _start_symbol = 'fstring' + + def __init__(self): + super(PythonFStringGrammar, self).__init__( + text=fstring.GRAMMAR, + tokenizer=fstring.tokenize, + parser=fstring.Parser + ) + + def parse(self, code, **kwargs): + return self._parse(code, **kwargs) + + def _parse(self, code, error_recovery=True, start_pos=(1, 0)): + tokens = self._tokenizer(code, start_pos=start_pos) + p = self._parser( + self._pgen_grammar, + error_recovery=error_recovery, + start_symbol=self._start_symbol, + ) + return p.parse(tokens=tokens) + + def parse_leaf(self, leaf, error_recovery=True): + code = leaf._get_payload() + return self.parse(code, error_recovery=True, start_pos=leaf.start_pos) + + +def load_grammar(**kwargs): + """ + Loads a :py:class:`parso.Grammar`. The default version is the current Python + version. + + :param str version: A python version string, e.g. ``version='3.3'``. + """ + def load_grammar(language='python', version=None): + if language == 'python': + version_info = parse_version_string(version) + + file = os.path.join( + 'python', + 'grammar%s%s.txt' % (version_info.major, version_info.minor) + ) + + global _loaded_grammars + path = os.path.join(os.path.dirname(__file__), file) + try: + return _loaded_grammars[path] + except KeyError: + try: + with open(path) as f: + bnf_text = f.read() + + grammar = PythonGrammar(version_info, bnf_text) + return _loaded_grammars.setdefault(path, grammar) + except FileNotFoundError: + message = "Python version %s is currently not supported." % version + raise NotImplementedError(message) + elif language == 'python-f-string': + if version is not None: + raise NotImplementedError("Currently different versions are not supported.") + return PythonFStringGrammar() + else: + raise NotImplementedError("No support for language %s." % language) + + return load_grammar(**kwargs) diff --git a/pythonFiles/parso/normalizer.py b/pythonFiles/parso/normalizer.py new file mode 100644 index 000000000000..9a3e82e24c87 --- /dev/null +++ b/pythonFiles/parso/normalizer.py @@ -0,0 +1,184 @@ +from contextlib import contextmanager + +from parso._compatibility import use_metaclass + + +class _NormalizerMeta(type): + def __new__(cls, name, bases, dct): + new_cls = type.__new__(cls, name, bases, dct) + new_cls.rule_value_classes = {} + new_cls.rule_type_classes = {} + return new_cls + + +class Normalizer(use_metaclass(_NormalizerMeta)): + def __init__(self, grammar, config): + self.grammar = grammar + self._config = config + self.issues = [] + + self._rule_type_instances = self._instantiate_rules('rule_type_classes') + self._rule_value_instances = self._instantiate_rules('rule_value_classes') + + def _instantiate_rules(self, attr): + dct = {} + for base in type(self).mro(): + rules_map = getattr(base, attr, {}) + for type_, rule_classes in rules_map.items(): + new = [rule_cls(self) for rule_cls in rule_classes] + dct.setdefault(type_, []).extend(new) + return dct + + def walk(self, node): + self.initialize(node) + value = self.visit(node) + self.finalize() + return value + + def visit(self, node): + try: + children = node.children + except AttributeError: + return self.visit_leaf(node) + else: + with self.visit_node(node): + return ''.join(self.visit(child) for child in children) + + @contextmanager + def visit_node(self, node): + self._check_type_rules(node) + yield + + def _check_type_rules(self, node): + for rule in self._rule_type_instances.get(node.type, []): + rule.feed_node(node) + + def visit_leaf(self, leaf): + self._check_type_rules(leaf) + + for rule in self._rule_value_instances.get(leaf.value, []): + rule.feed_node(leaf) + + return leaf.prefix + leaf.value + + def initialize(self, node): + pass + + def finalize(self): + pass + + def add_issue(self, node, code, message): + issue = Issue(node, code, message) + if issue not in self.issues: + self.issues.append(issue) + return True + + @classmethod + def register_rule(cls, **kwargs): + """ + Use it as a class decorator:: + + normalizer = Normalizer('grammar', 'config') + @normalizer.register_rule(value='foo') + class MyRule(Rule): + error_code = 42 + """ + return cls._register_rule(**kwargs) + + @classmethod + def _register_rule(cls, value=None, values=(), type=None, types=()): + values = list(values) + types = list(types) + if value is not None: + values.append(value) + if type is not None: + types.append(type) + + if not values and not types: + raise ValueError("You must register at least something.") + + def decorator(rule_cls): + for v in values: + cls.rule_value_classes.setdefault(v, []).append(rule_cls) + for t in types: + cls.rule_type_classes.setdefault(t, []).append(rule_cls) + return rule_cls + + return decorator + + +class NormalizerConfig(object): + normalizer_class = Normalizer + + def create_normalizer(self, grammar): + if self.normalizer_class is None: + return None + + return self.normalizer_class(grammar, self) + + +class Issue(object): + def __init__(self, node, code, message): + self._node = node + self.code = code + """ + An integer code that stands for the type of error. + """ + self.message = message + """ + A message (string) for the issue. + """ + self.start_pos = node.start_pos + """ + The start position position of the error as a tuple (line, column). As + always in |parso| the first line is 1 and the first column 0. + """ + + def __eq__(self, other): + return self.start_pos == other.start_pos and self.code == other.code + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.code, self.start_pos)) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.code) + + + +class Rule(object): + code = None + message = None + + def __init__(self, normalizer): + self._normalizer = normalizer + + def is_issue(self, node): + raise NotImplementedError() + + def get_node(self, node): + return node + + def _get_message(self, message): + if message is None: + message = self.message + if message is None: + raise ValueError("The message on the class is not set.") + return message + + def add_issue(self, node, code=None, message=None): + if code is None: + code = self.code + if code is None: + raise ValueError("The error code on the class is not set.") + + message = self._get_message(message) + + self._normalizer.add_issue(node, code, message) + + def feed_node(self, node): + if self.is_issue(node): + issue_node = self.get_node(node) + self.add_issue(issue_node) diff --git a/pythonFiles/parso/parser.py b/pythonFiles/parso/parser.py new file mode 100644 index 000000000000..555ebc712f73 --- /dev/null +++ b/pythonFiles/parso/parser.py @@ -0,0 +1,78 @@ +""" +The ``Parser`` tries to convert the available Python code in an easy to read +format, something like an abstract syntax tree. The classes who represent this +tree, are sitting in the :mod:`parso.tree` module. + +The Python module ``tokenize`` is a very important part in the ``Parser``, +because it splits the code into different words (tokens). Sometimes it looks a +bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast`` +module for this? Well, ``ast`` does a very good job understanding proper Python +code, but fails to work as soon as there's a single line of broken code. + +There's one important optimization that needs to be known: Statements are not +being parsed completely. ``Statement`` is just a representation of the tokens +within the statement. This lowers memory usage and cpu time and reduces the +complexity of the ``Parser`` (there's another parser sitting inside +``Statement``, which produces ``Array`` and ``Call``). +""" +from parso import tree +from parso.pgen2.parse import PgenParser + + +class ParserSyntaxError(Exception): + """ + Contains error information about the parser tree. + + May be raised as an exception. + """ + def __init__(self, message, error_leaf): + self.message = message + self.error_leaf = error_leaf + + +class BaseParser(object): + node_map = {} + default_node = tree.Node + + leaf_map = { + } + default_leaf = tree.Leaf + + def __init__(self, pgen_grammar, start_symbol='file_input', error_recovery=False): + self._pgen_grammar = pgen_grammar + self._start_symbol = start_symbol + self._error_recovery = error_recovery + + def parse(self, tokens): + start_number = self._pgen_grammar.symbol2number[self._start_symbol] + self.pgen_parser = PgenParser( + self._pgen_grammar, self.convert_node, self.convert_leaf, + self.error_recovery, start_number + ) + + node = self.pgen_parser.parse(tokens) + # The stack is empty now, we don't need it anymore. + del self.pgen_parser + return node + + def error_recovery(self, pgen_grammar, stack, arcs, typ, value, start_pos, prefix, + add_token_callback): + if self._error_recovery: + raise NotImplementedError("Error Recovery is not implemented") + else: + error_leaf = tree.ErrorLeaf('TODO %s' % typ, value, start_pos, prefix) + raise ParserSyntaxError('SyntaxError: invalid syntax', error_leaf) + + def convert_node(self, pgen_grammar, type_, children): + # TODO REMOVE symbol, we don't want type here. + symbol = pgen_grammar.number2symbol[type_] + try: + return self.node_map[symbol](children) + except KeyError: + return self.default_node(symbol, children) + + def convert_leaf(self, pgen_grammar, type_, value, prefix, start_pos): + try: + return self.leaf_map[type_](value, start_pos, prefix) + except KeyError: + return self.default_leaf(value, start_pos, prefix) diff --git a/pythonFiles/preview/jedi/parser/pgen2/__init__.py b/pythonFiles/parso/pgen2/__init__.py similarity index 100% rename from pythonFiles/preview/jedi/parser/pgen2/__init__.py rename to pythonFiles/parso/pgen2/__init__.py diff --git a/pythonFiles/preview/jedi/parser/pgen2/grammar.py b/pythonFiles/parso/pgen2/grammar.py similarity index 97% rename from pythonFiles/preview/jedi/parser/pgen2/grammar.py rename to pythonFiles/parso/pgen2/grammar.py index 414c0dbe9f01..e5f211426fad 100644 --- a/pythonFiles/preview/jedi/parser/pgen2/grammar.py +++ b/pythonFiles/parso/pgen2/grammar.py @@ -16,8 +16,10 @@ """ -# Python imports -import pickle +try: + import cPickle as pickle +except: + import pickle class Grammar(object): @@ -74,7 +76,7 @@ class Grammar(object): """ - def __init__(self): + def __init__(self, bnf_text): self.symbol2number = {} self.number2symbol = {} self.states = [] @@ -83,6 +85,7 @@ def __init__(self): self.keywords = {} self.tokens = {} self.symbol2label = {} + self.label2symbol = {} self.start = 256 def dump(self, filename): diff --git a/pythonFiles/preview/jedi/parser/pgen2/parse.py b/pythonFiles/parso/pgen2/parse.py similarity index 87% rename from pythonFiles/preview/jedi/parser/pgen2/parse.py rename to pythonFiles/parso/pgen2/parse.py index 9e74838c7c99..aaacfcebe44e 100644 --- a/pythonFiles/preview/jedi/parser/pgen2/parse.py +++ b/pythonFiles/parso/pgen2/parse.py @@ -14,8 +14,7 @@ how this parsing engine works. """ -# Local imports -from jedi.parser import tokenize +from parso.python import tokenize class InternalParseError(Exception): @@ -34,6 +33,12 @@ def __init__(self, msg, type, value, start_pos): self.start_pos = start_pos +class Stack(list): + def get_tos_nodes(self): + tos = self[-1] + return tos[2][1] + + def token_to_ilabel(grammar, type_, value): # Map from token to label if type_ == tokenize.NAME: @@ -57,7 +62,7 @@ class PgenParser(object): p = Parser(grammar, [converter]) # create instance p.setup([start]) # prepare for parsing : - if p.addtoken(...): # parse a token + if p.add_token(...): # parse a token break root = p.rootnode # root of abstract syntax tree @@ -70,7 +75,7 @@ class PgenParser(object): See driver.py for how to get input tokens by tokenizing a file or string. - Parsing is complete when addtoken() returns True; the root of the + Parsing is complete when add_token() returns True; the root of the abstract syntax tree can then be retrieved from the rootnode instance variable. When a syntax error occurs, error_recovery() is called. There is no error recovery; the parser cannot be used @@ -114,13 +119,13 @@ def __init__(self, grammar, convert_node, convert_leaf, error_recovery, start): # where children is a list of nodes or None newnode = (start, []) stackentry = (self.grammar.dfas[start], 0, newnode) - self.stack = [stackentry] + self.stack = Stack([stackentry]) self.rootnode = None self.error_recovery = error_recovery - def parse(self, tokenizer): - for type_, value, start_pos, prefix in tokenizer: - if self.addtoken(type_, value, start_pos, prefix): + def parse(self, tokens): + for type_, value, start_pos, prefix in tokens: + if self.add_token(type_, value, start_pos, prefix): break else: # We never broke out -- EOF is too soon -- Unfinished statement. @@ -130,27 +135,32 @@ def parse(self, tokenizer): raise InternalParseError("incomplete input", type_, value, start_pos) return self.rootnode - def addtoken(self, type_, value, start_pos, prefix): + def add_token(self, type_, value, start_pos, prefix): """Add a token; return True if this is the end of the program.""" ilabel = token_to_ilabel(self.grammar, type_, value) # Loop until the token is shifted; may raise exceptions + _gram = self.grammar + _labels = _gram.labels + _push = self._push + _pop = self._pop + _shift = self._shift while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: - t, v = self.grammar.labels[i] + t, v = _labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it - self.shift(type_, value, newstate, prefix, start_pos) + _shift(type_, value, newstate, prefix, start_pos) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: - self.pop() + _pop() if not self.stack: # Done parsing! return True @@ -160,39 +170,39 @@ def addtoken(self, type_, value, start_pos, prefix): return False elif t >= 256: # See if it's a symbol and if we're in its first set - itsdfa = self.grammar.dfas[t] + itsdfa = _gram.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol - self.push(t, itsdfa, newstate) + _push(t, itsdfa, newstate) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else - self.pop() + _pop() if not self.stack: # Done parsing, but another token is input raise InternalParseError("too much input", type_, value, start_pos) else: self.error_recovery(self.grammar, self.stack, arcs, type_, - value, start_pos, prefix, self.addtoken) + value, start_pos, prefix, self.add_token) break - def shift(self, type_, value, newstate, prefix, start_pos): + def _shift(self, type_, value, newstate, prefix, start_pos): """Shift a token. (Internal)""" dfa, state, node = self.stack[-1] newnode = self.convert_leaf(self.grammar, type_, value, prefix, start_pos) node[-1].append(newnode) self.stack[-1] = (dfa, newstate, node) - def push(self, type_, newdfa, newstate): + def _push(self, type_, newdfa, newstate): """Push a nonterminal. (Internal)""" dfa, state, node = self.stack[-1] newnode = (type_, []) self.stack[-1] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode)) - def pop(self): + def _pop(self): """Pop a nonterminal. (Internal)""" popdfa, popstate, (type_, children) = self.stack.pop() # If there's exactly one child, return that child instead of creating a diff --git a/pythonFiles/release/jedi/parser/pgen2/pgen.py b/pythonFiles/parso/pgen2/pgen.py old mode 100755 new mode 100644 similarity index 76% rename from pythonFiles/release/jedi/parser/pgen2/pgen.py rename to pythonFiles/parso/pgen2/pgen.py index fa2742dd5dc4..10ef6ffd1532 --- a/pythonFiles/release/jedi/parser/pgen2/pgen.py +++ b/pythonFiles/parso/pgen2/pgen.py @@ -5,30 +5,27 @@ # Copyright 2014 David Halter. Integration into Jedi. # Modifications are dual-licensed: MIT and PSF. -# Pgen imports -from . import grammar -from jedi.parser import token -from jedi.parser import tokenize +from parso.pgen2 import grammar +from parso.python import token +from parso.python import tokenize +from parso.utils import parse_version_string class ParserGenerator(object): - def __init__(self, filename, stream=None): - close_stream = None - if stream is None: - stream = open(filename) - close_stream = stream.close - self.filename = filename - self.stream = stream - self.generator = tokenize.generate_tokens(stream.readline) - self.gettoken() # Initialize lookahead - self.dfas, self.startsymbol = self.parse() - if close_stream is not None: - close_stream() + def __init__(self, bnf_text, token_namespace): + self._bnf_text = bnf_text + self.generator = tokenize.tokenize( + bnf_text, + version_info=parse_version_string('3.6') + ) + self._gettoken() # Initialize lookahead + self.dfas, self.startsymbol = self._parse() self.first = {} # map from symbol name to set of tokens - self.addfirstsets() + self._addfirstsets() + self._token_namespace = token_namespace def make_grammar(self): - c = grammar.Grammar() + c = grammar.Grammar(self._bnf_text) names = list(self.dfas.keys()) names.sort() names.remove(self.startsymbol) @@ -43,25 +40,25 @@ def make_grammar(self): for state in dfa: arcs = [] for label, next in state.arcs.items(): - arcs.append((self.make_label(c, label), dfa.index(next))) + arcs.append((self._make_label(c, label), dfa.index(next))) if state.isfinal: arcs.append((0, dfa.index(state))) states.append(arcs) c.states.append(states) - c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) + c.dfas[c.symbol2number[name]] = (states, self._make_first(c, name)) c.start = c.symbol2number[self.startsymbol] return c - def make_first(self, c, name): + def _make_first(self, c, name): rawfirst = self.first[name] first = {} for label in rawfirst: - ilabel = self.make_label(c, label) + ilabel = self._make_label(c, label) ##assert ilabel not in first # XXX failed on <> ... != first[ilabel] = 1 return first - def make_label(self, c, label): + def _make_label(self, c, label): # XXX Maybe this should be a method on a subclass of converter? ilabel = len(c.labels) if label[0].isalpha(): @@ -73,12 +70,12 @@ def make_label(self, c, label): else: c.labels.append((c.symbol2number[label], None)) c.symbol2label[label] = ilabel + c.label2symbol[ilabel] = label return ilabel else: # A named token (NAME, NUMBER, STRING) - itoken = getattr(token, label, None) + itoken = getattr(self._token_namespace, label, None) assert isinstance(itoken, int), label - assert itoken in token.tok_name, label if itoken in c.tokens: return c.tokens[itoken] else: @@ -94,12 +91,13 @@ def make_label(self, c, label): if value in c.keywords: return c.keywords[value] else: + # TODO this might be an issue?! Using token.NAME here? c.labels.append((token.NAME, value)) c.keywords[value] = ilabel return ilabel else: # An operator (any non-numeric token) - itoken = token.opmap[value] # Fails if unknown token + itoken = self._token_namespace.generate_token_id(value) if itoken in c.tokens: return c.tokens[itoken] else: @@ -107,15 +105,15 @@ def make_label(self, c, label): c.tokens[itoken] = ilabel return ilabel - def addfirstsets(self): + def _addfirstsets(self): names = list(self.dfas.keys()) names.sort() for name in names: if name not in self.first: - self.calcfirst(name) + self._calcfirst(name) #print name, self.first[name].keys() - def calcfirst(self, name): + def _calcfirst(self, name): dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] @@ -128,7 +126,7 @@ def calcfirst(self, name): if fset is None: raise ValueError("recursion for rule %r" % name) else: - self.calcfirst(label) + self._calcfirst(label) fset = self.first[label] totalset.update(fset) overlapcheck[label] = fset @@ -145,23 +143,23 @@ def calcfirst(self, name): inverse[symbol] = label self.first[name] = totalset - def parse(self): + def _parse(self): dfas = {} startsymbol = None # MSTART: (NEWLINE | RULE)* ENDMARKER while self.type != token.ENDMARKER: while self.type == token.NEWLINE: - self.gettoken() + self._gettoken() # RULE: NAME ':' RHS NEWLINE - name = self.expect(token.NAME) - self.expect(token.OP, ":") - a, z = self.parse_rhs() - self.expect(token.NEWLINE) - #self.dump_nfa(name, a, z) - dfa = self.make_dfa(a, z) - #self.dump_dfa(name, dfa) + name = self._expect(token.NAME) + self._expect(token.COLON) + a, z = self._parse_rhs() + self._expect(token.NEWLINE) + #self._dump_nfa(name, a, z) + dfa = self._make_dfa(a, z) + #self._dump_dfa(name, dfa) # oldlen = len(dfa) - self.simplify_dfa(dfa) + self._simplify_dfa(dfa) # newlen = len(dfa) dfas[name] = dfa #print name, oldlen, newlen @@ -169,7 +167,7 @@ def parse(self): startsymbol = name return dfas, startsymbol - def make_dfa(self, start, finish): + def _make_dfa(self, start, finish): # To turn an NFA into a DFA, we define the states of the DFA # to correspond to *sets* of states of the NFA. Then do some # state reduction. Let's represent sets as dicts with 1 for @@ -208,7 +206,7 @@ def addclosure(state, base): state.addarc(st, label) return states # List of DFAState instances; first one is start - def dump_nfa(self, name, start, finish): + def _dump_nfa(self, name, start, finish): print("Dump of NFA for", name) todo = [start] for i, state in enumerate(todo): @@ -224,14 +222,14 @@ def dump_nfa(self, name, start, finish): else: print(" %s -> %d" % (label, j)) - def dump_dfa(self, name, dfa): + def _dump_dfa(self, name, dfa): print("Dump of DFA for", name) for i, state in enumerate(dfa): print(" State", i, state.isfinal and "(final)" or "") for label, next in state.arcs.items(): print(" %s -> %d" % (label, dfa.index(next))) - def simplify_dfa(self, dfa): + def _simplify_dfa(self, dfa): # This is not theoretically optimal, but works well enough. # Algorithm: repeatedly look for two states that have the same # set of arcs (same labels pointing to the same nodes) and @@ -252,9 +250,9 @@ def simplify_dfa(self, dfa): changes = True break - def parse_rhs(self): + def _parse_rhs(self): # RHS: ALT ('|' ALT)* - a, z = self.parse_alt() + a, z = self._parse_alt() if self.value != "|": return a, z else: @@ -263,82 +261,81 @@ def parse_rhs(self): aa.addarc(a) z.addarc(zz) while self.value == "|": - self.gettoken() - a, z = self.parse_alt() + self._gettoken() + a, z = self._parse_alt() aa.addarc(a) z.addarc(zz) return aa, zz - def parse_alt(self): + def _parse_alt(self): # ALT: ITEM+ - a, b = self.parse_item() + a, b = self._parse_item() while (self.value in ("(", "[") or self.type in (token.NAME, token.STRING)): - c, d = self.parse_item() + c, d = self._parse_item() b.addarc(c) b = d return a, b - def parse_item(self): + def _parse_item(self): # ITEM: '[' RHS ']' | ATOM ['+' | '*'] if self.value == "[": - self.gettoken() - a, z = self.parse_rhs() - self.expect(token.OP, "]") + self._gettoken() + a, z = self._parse_rhs() + self._expect(token.RSQB) a.addarc(z) return a, z else: - a, z = self.parse_atom() + a, z = self._parse_atom() value = self.value if value not in ("+", "*"): return a, z - self.gettoken() + self._gettoken() z.addarc(a) if value == "+": return a, z else: return a, a - def parse_atom(self): + def _parse_atom(self): # ATOM: '(' RHS ')' | NAME | STRING if self.value == "(": - self.gettoken() - a, z = self.parse_rhs() - self.expect(token.OP, ")") + self._gettoken() + a, z = self._parse_rhs() + self._expect(token.RPAR) return a, z elif self.type in (token.NAME, token.STRING): a = NFAState() z = NFAState() a.addarc(z, self.value) - self.gettoken() + self._gettoken() return a, z else: - self.raise_error("expected (...) or NAME or STRING, got %s/%s", - self.type, self.value) + self._raise_error("expected (...) or NAME or STRING, got %s/%s", + self.type, self.value) - def expect(self, type, value=None): - if self.type != type or (value is not None and self.value != value): - self.raise_error("expected %s/%s, got %s/%s", - type, value, self.type, self.value) + def _expect(self, type): + if self.type != type: + self._raise_error("expected %s, got %s(%s)", + type, self.type, self.value) value = self.value - self.gettoken() + self._gettoken() return value - def gettoken(self): + def _gettoken(self): tup = next(self.generator) while tup[0] in (token.COMMENT, token.NL): tup = next(self.generator) self.type, self.value, self.begin, prefix = tup - #print tokenize.tok_name[self.type], repr(self.value) - def raise_error(self, msg, *args): + def _raise_error(self, msg, *args): if args: try: msg = msg % args except: msg = " ".join([msg] + list(map(str, args))) - line = open(self.filename).readlines()[self.begin[0]] - raise SyntaxError(msg, (self.filename, self.begin[0], + line = self._bnf_text.splitlines()[self.begin[0] - 1] + raise SyntaxError(msg, ('', self.begin[0], self.begin[1], line)) @@ -389,6 +386,14 @@ def __eq__(self, other): __hash__ = None # For Py3 compatibility. -def generate_grammar(filename="Grammar.txt"): - p = ParserGenerator(filename) +def generate_grammar(bnf_text, token_namespace): + """ + ``bnf_text`` is a grammar in extended BNF (using * for repetition, + for + at-least-once repetition, [] for optional parts, | for alternatives and () + for grouping). + + It's not EBNF according to ISO/IEC 14977. It's a dialect Python uses in its + own parser. + """ + p = ParserGenerator(bnf_text, token_namespace) return p.make_grammar() diff --git a/pythonFiles/parso/python/__init__.py b/pythonFiles/parso/python/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pythonFiles/parso/python/diff.py b/pythonFiles/parso/python/diff.py new file mode 100644 index 000000000000..c2e44fd3cb21 --- /dev/null +++ b/pythonFiles/parso/python/diff.py @@ -0,0 +1,587 @@ +""" +Basically a contains parser that is faster, because it tries to parse only +parts and if anything changes, it only reparses the changed parts. + +It works with a simple diff in the beginning and will try to reuse old parser +fragments. +""" +import re +import difflib +from collections import namedtuple +import logging + +from parso.utils import split_lines +from parso.python.parser import Parser +from parso.python.tree import EndMarker +from parso.python.tokenize import (NEWLINE, PythonToken, ERROR_DEDENT, + ENDMARKER, INDENT, DEDENT) + +LOG = logging.getLogger(__name__) + + +def _get_last_line(node_or_leaf): + last_leaf = node_or_leaf.get_last_leaf() + if _ends_with_newline(last_leaf): + return last_leaf.start_pos[0] + else: + return last_leaf.end_pos[0] + + +def _ends_with_newline(leaf, suffix=''): + if leaf.type == 'error_leaf': + typ = leaf.original_type + else: + typ = leaf.type + + return typ == 'newline' or suffix.endswith('\n') + + +def _flows_finished(pgen_grammar, stack): + """ + if, while, for and try might not be finished, because another part might + still be parsed. + """ + for dfa, newstate, (symbol_number, nodes) in stack: + if pgen_grammar.number2symbol[symbol_number] in ('if_stmt', 'while_stmt', + 'for_stmt', 'try_stmt'): + return False + return True + + +def suite_or_file_input_is_valid(pgen_grammar, stack): + if not _flows_finished(pgen_grammar, stack): + return False + + for dfa, newstate, (symbol_number, nodes) in reversed(stack): + if pgen_grammar.number2symbol[symbol_number] == 'suite': + # If only newline is in the suite, the suite is not valid, yet. + return len(nodes) > 1 + # Not reaching a suite means that we're dealing with file_input levels + # where there's no need for a valid statement in it. It can also be empty. + return True + + +def _is_flow_node(node): + try: + value = node.children[0].value + except AttributeError: + return False + return value in ('if', 'for', 'while', 'try') + + +class _PositionUpdatingFinished(Exception): + pass + + +def _update_positions(nodes, line_offset, last_leaf): + for node in nodes: + try: + children = node.children + except AttributeError: + # Is a leaf + node.line += line_offset + if node is last_leaf: + raise _PositionUpdatingFinished + else: + _update_positions(children, line_offset, last_leaf) + + +class DiffParser(object): + """ + An advanced form of parsing a file faster. Unfortunately comes with huge + side effects. It changes the given module. + """ + def __init__(self, pgen_grammar, tokenizer, module): + self._pgen_grammar = pgen_grammar + self._tokenizer = tokenizer + self._module = module + + def _reset(self): + self._copy_count = 0 + self._parser_count = 0 + + self._nodes_stack = _NodesStack(self._module) + + def update(self, old_lines, new_lines): + ''' + The algorithm works as follows: + + Equal: + - Assure that the start is a newline, otherwise parse until we get + one. + - Copy from parsed_until_line + 1 to max(i2 + 1) + - Make sure that the indentation is correct (e.g. add DEDENT) + - Add old and change positions + Insert: + - Parse from parsed_until_line + 1 to min(j2 + 1), hopefully not + much more. + + Returns the new module node. + ''' + LOG.debug('diff parser start') + # Reset the used names cache so they get regenerated. + self._module._used_names = None + + self._parser_lines_new = new_lines + + self._reset() + + line_length = len(new_lines) + sm = difflib.SequenceMatcher(None, old_lines, self._parser_lines_new) + opcodes = sm.get_opcodes() + LOG.debug('diff parser calculated') + LOG.debug('diff: line_lengths old: %s, new: %s' % (len(old_lines), line_length)) + + for operation, i1, i2, j1, j2 in opcodes: + LOG.debug('diff %s old[%s:%s] new[%s:%s]', + operation, i1 + 1, i2, j1 + 1, j2) + + if j2 == line_length and new_lines[-1] == '': + # The empty part after the last newline is not relevant. + j2 -= 1 + + if operation == 'equal': + line_offset = j1 - i1 + self._copy_from_old_parser(line_offset, i2, j2) + elif operation == 'replace': + self._parse(until_line=j2) + elif operation == 'insert': + self._parse(until_line=j2) + else: + assert operation == 'delete' + + # With this action all change will finally be applied and we have a + # changed module. + self._nodes_stack.close() + + last_pos = self._module.end_pos[0] + if last_pos != line_length: + current_lines = split_lines(self._module.get_code(), keepends=True) + diff = difflib.unified_diff(current_lines, new_lines) + raise Exception( + "There's an issue (%s != %s) with the diff parser. Please report:\n%s" + % (last_pos, line_length, ''.join(diff)) + ) + + LOG.debug('diff parser end') + return self._module + + def _enabled_debugging(self, old_lines, lines_new): + if self._module.get_code() != ''.join(lines_new): + LOG.warning('parser issue:\n%s\n%s', ''.join(old_lines), + ''.join(lines_new)) + + def _copy_from_old_parser(self, line_offset, until_line_old, until_line_new): + copied_nodes = [None] + + last_until_line = -1 + while until_line_new > self._nodes_stack.parsed_until_line: + parsed_until_line_old = self._nodes_stack.parsed_until_line - line_offset + line_stmt = self._get_old_line_stmt(parsed_until_line_old + 1) + if line_stmt is None: + # Parse 1 line at least. We don't need more, because we just + # want to get into a state where the old parser has statements + # again that can be copied (e.g. not lines within parentheses). + self._parse(self._nodes_stack.parsed_until_line + 1) + elif not copied_nodes: + # We have copied as much as possible (but definitely not too + # much). Therefore we just parse the rest. + # We might not reach the end, because there's a statement + # that is not finished. + self._parse(until_line_new) + else: + p_children = line_stmt.parent.children + index = p_children.index(line_stmt) + + copied_nodes = self._nodes_stack.copy_nodes( + p_children[index:], + until_line_old, + line_offset + ) + # Match all the nodes that are in the wanted range. + if copied_nodes: + self._copy_count += 1 + + from_ = copied_nodes[0].get_start_pos_of_prefix()[0] + line_offset + to = self._nodes_stack.parsed_until_line + + LOG.debug('diff actually copy %s to %s', from_, to) + # Since there are potential bugs that might loop here endlessly, we + # just stop here. + assert last_until_line != self._nodes_stack.parsed_until_line \ + or not copied_nodes, last_until_line + last_until_line = self._nodes_stack.parsed_until_line + + def _get_old_line_stmt(self, old_line): + leaf = self._module.get_leaf_for_position((old_line, 0), include_prefixes=True) + + if _ends_with_newline(leaf): + leaf = leaf.get_next_leaf() + if leaf.get_start_pos_of_prefix()[0] == old_line: + node = leaf + while node.parent.type not in ('file_input', 'suite'): + node = node.parent + return node + # Must be on the same line. Otherwise we need to parse that bit. + return None + + def _get_before_insertion_node(self): + if self._nodes_stack.is_empty(): + return None + + line = self._nodes_stack.parsed_until_line + 1 + node = self._new_module.get_last_leaf() + while True: + parent = node.parent + if parent.type in ('suite', 'file_input'): + assert node.end_pos[0] <= line + assert node.end_pos[1] == 0 or '\n' in self._prefix + return node + node = parent + + def _parse(self, until_line): + """ + Parses at least until the given line, but might just parse more until a + valid state is reached. + """ + last_until_line = 0 + while until_line > self._nodes_stack.parsed_until_line: + node = self._try_parse_part(until_line) + nodes = node.children + + self._nodes_stack.add_parsed_nodes(nodes) + LOG.debug( + 'parse_part from %s to %s (to %s in part parser)', + nodes[0].get_start_pos_of_prefix()[0], + self._nodes_stack.parsed_until_line, + node.end_pos[0] - 1 + ) + # Since the tokenizer sometimes has bugs, we cannot be sure that + # this loop terminates. Therefore assert that there's always a + # change. + assert last_until_line != self._nodes_stack.parsed_until_line, last_until_line + last_until_line = self._nodes_stack.parsed_until_line + + def _try_parse_part(self, until_line): + """ + Sets up a normal parser that uses a spezialized tokenizer to only parse + until a certain position (or a bit longer if the statement hasn't + ended. + """ + self._parser_count += 1 + # TODO speed up, shouldn't copy the whole list all the time. + # memoryview? + parsed_until_line = self._nodes_stack.parsed_until_line + lines_after = self._parser_lines_new[parsed_until_line:] + #print('parse_content', parsed_until_line, lines_after, until_line) + tokens = self._diff_tokenize( + lines_after, + until_line, + line_offset=parsed_until_line + ) + self._active_parser = Parser( + self._pgen_grammar, + error_recovery=True + ) + return self._active_parser.parse(tokens=tokens) + + def _diff_tokenize(self, lines, until_line, line_offset=0): + is_first_token = True + omitted_first_indent = False + indents = [] + tokens = self._tokenizer(lines, (1, 0)) + stack = self._active_parser.pgen_parser.stack + for typ, string, start_pos, prefix in tokens: + start_pos = start_pos[0] + line_offset, start_pos[1] + if typ == INDENT: + indents.append(start_pos[1]) + if is_first_token: + omitted_first_indent = True + # We want to get rid of indents that are only here because + # we only parse part of the file. These indents would only + # get parsed as error leafs, which doesn't make any sense. + is_first_token = False + continue + is_first_token = False + + # In case of omitted_first_indent, it might not be dedented fully. + # However this is a sign for us that a dedent happened. + if typ == DEDENT \ + or typ == ERROR_DEDENT and omitted_first_indent and len(indents) == 1: + indents.pop() + if omitted_first_indent and not indents: + # We are done here, only thing that can come now is an + # endmarker or another dedented code block. + typ, string, start_pos, prefix = next(tokens) + if '\n' in prefix: + prefix = re.sub(r'(<=\n)[^\n]+$', '', prefix) + else: + prefix = '' + yield PythonToken(ENDMARKER, '', (start_pos[0] + line_offset, 0), prefix) + break + elif typ == NEWLINE and start_pos[0] >= until_line: + yield PythonToken(typ, string, start_pos, prefix) + # Check if the parser is actually in a valid suite state. + if suite_or_file_input_is_valid(self._pgen_grammar, stack): + start_pos = start_pos[0] + 1, 0 + while len(indents) > int(omitted_first_indent): + indents.pop() + yield PythonToken(DEDENT, '', start_pos, '') + + yield PythonToken(ENDMARKER, '', start_pos, '') + break + else: + continue + + yield PythonToken(typ, string, start_pos, prefix) + + +class _NodesStackNode(object): + ChildrenGroup = namedtuple('ChildrenGroup', 'children line_offset last_line_offset_leaf') + + def __init__(self, tree_node, parent=None): + self.tree_node = tree_node + self.children_groups = [] + self.parent = parent + + def close(self): + children = [] + for children_part, line_offset, last_line_offset_leaf in self.children_groups: + if line_offset != 0: + try: + _update_positions( + children_part, line_offset, last_line_offset_leaf) + except _PositionUpdatingFinished: + pass + children += children_part + self.tree_node.children = children + # Reset the parents + for node in children: + node.parent = self.tree_node + + def add(self, children, line_offset=0, last_line_offset_leaf=None): + group = self.ChildrenGroup(children, line_offset, last_line_offset_leaf) + self.children_groups.append(group) + + def get_last_line(self, suffix): + line = 0 + if self.children_groups: + children_group = self.children_groups[-1] + last_leaf = children_group.children[-1].get_last_leaf() + line = last_leaf.end_pos[0] + + # Calculate the line offsets + offset = children_group.line_offset + if offset: + # In case the line_offset is not applied to this specific leaf, + # just ignore it. + if last_leaf.line <= children_group.last_line_offset_leaf.line: + line += children_group.line_offset + + # Newlines end on the next line, which means that they would cover + # the next line. That line is not fully parsed at this point. + if _ends_with_newline(last_leaf, suffix): + line -= 1 + line += suffix.count('\n') + if suffix and not suffix.endswith('\n'): + # This is the end of a file (that doesn't end with a newline). + line += 1 + return line + + +class _NodesStack(object): + endmarker_type = 'endmarker' + + def __init__(self, module): + # Top of stack + self._tos = self._base_node = _NodesStackNode(module) + self._module = module + self._last_prefix = '' + self.prefix = '' + + def is_empty(self): + return not self._base_node.children + + @property + def parsed_until_line(self): + return self._tos.get_last_line(self.prefix) + + def _get_insertion_node(self, indentation_node): + indentation = indentation_node.start_pos[1] + + # find insertion node + node = self._tos + while True: + tree_node = node.tree_node + if tree_node.type == 'suite': + # A suite starts with NEWLINE, ... + node_indentation = tree_node.children[1].start_pos[1] + + if indentation >= node_indentation: # Not a Dedent + # We might be at the most outer layer: modules. We + # don't want to depend on the first statement + # having the right indentation. + return node + + elif tree_node.type == 'file_input': + return node + + node = self._close_tos() + + def _close_tos(self): + self._tos.close() + self._tos = self._tos.parent + return self._tos + + def add_parsed_nodes(self, tree_nodes): + tree_nodes = self._remove_endmarker(tree_nodes) + if not tree_nodes: + return + + assert tree_nodes[0].type != 'newline' + + node = self._get_insertion_node(tree_nodes[0]) + assert node.tree_node.type in ('suite', 'file_input') + node.add(tree_nodes) + self._update_tos(tree_nodes[-1]) + + def _remove_endmarker(self, tree_nodes): + """ + Helps cleaning up the tree nodes that get inserted. + """ + last_leaf = tree_nodes[-1].get_last_leaf() + is_endmarker = last_leaf.type == self.endmarker_type + self._last_prefix = '' + if is_endmarker: + try: + separation = last_leaf.prefix.rindex('\n') + except ValueError: + pass + else: + # Remove the whitespace part of the prefix after a newline. + # That is not relevant if parentheses were opened. Always parse + # until the end of a line. + last_leaf.prefix, self._last_prefix = \ + last_leaf.prefix[:separation + 1], last_leaf.prefix[separation + 1:] + + first_leaf = tree_nodes[0].get_first_leaf() + first_leaf.prefix = self.prefix + first_leaf.prefix + self.prefix = '' + + if is_endmarker: + self.prefix = last_leaf.prefix + + tree_nodes = tree_nodes[:-1] + + return tree_nodes + + def copy_nodes(self, tree_nodes, until_line, line_offset): + """ + Copies tree nodes from the old parser tree. + + Returns the number of tree nodes that were copied. + """ + tos = self._get_insertion_node(tree_nodes[0]) + + new_nodes, self._tos = self._copy_nodes(tos, tree_nodes, until_line, line_offset) + return new_nodes + + def _copy_nodes(self, tos, nodes, until_line, line_offset): + new_nodes = [] + + new_tos = tos + for node in nodes: + if node.type == 'endmarker': + # Endmarkers just distort all the checks below. Remove them. + break + + if node.start_pos[0] > until_line: + break + # TODO this check might take a bit of time for large files. We + # might want to change this to do more intelligent guessing or + # binary search. + if _get_last_line(node) > until_line: + # We can split up functions and classes later. + if node.type in ('classdef', 'funcdef') and node.children[-1].type == 'suite': + new_nodes.append(node) + break + + new_nodes.append(node) + + if not new_nodes: + return [], tos + + last_node = new_nodes[-1] + line_offset_index = -1 + if last_node.type in ('classdef', 'funcdef'): + suite = last_node.children[-1] + if suite.type == 'suite': + suite_tos = _NodesStackNode(suite) + # Don't need to pass line_offset here, it's already done by the + # parent. + suite_nodes, recursive_tos = self._copy_nodes( + suite_tos, suite.children, until_line, line_offset) + if len(suite_nodes) < 2: + # A suite only with newline is not valid. + new_nodes.pop() + else: + suite_tos.parent = tos + new_tos = recursive_tos + line_offset_index = -2 + + elif (new_nodes[-1].type in ('error_leaf', 'error_node') or + _is_flow_node(new_nodes[-1])): + # Error leafs/nodes don't have a defined start/end. Error + # nodes might not end with a newline (e.g. if there's an + # open `(`). Therefore ignore all of them unless they are + # succeeded with valid parser state. + # If we copy flows at the end, they might be continued + # after the copy limit (in the new parser). + # In this while loop we try to remove until we find a newline. + new_nodes.pop() + while new_nodes: + last_node = new_nodes[-1] + if last_node.get_last_leaf().type == 'newline': + break + new_nodes.pop() + + if new_nodes: + try: + last_line_offset_leaf = new_nodes[line_offset_index].get_last_leaf() + except IndexError: + line_offset = 0 + # In this case we don't have to calculate an offset, because + # there's no children to be managed. + last_line_offset_leaf = None + tos.add(new_nodes, line_offset, last_line_offset_leaf) + return new_nodes, new_tos + + def _update_tos(self, tree_node): + if tree_node.type in ('suite', 'file_input'): + self._tos = _NodesStackNode(tree_node, self._tos) + self._tos.add(list(tree_node.children)) + self._update_tos(tree_node.children[-1]) + elif tree_node.type in ('classdef', 'funcdef'): + self._update_tos(tree_node.children[-1]) + + def close(self): + while self._tos is not None: + self._close_tos() + + # Add an endmarker. + try: + last_leaf = self._module.get_last_leaf() + end_pos = list(last_leaf.end_pos) + except IndexError: + end_pos = [1, 0] + lines = split_lines(self.prefix) + assert len(lines) > 0 + if len(lines) == 1: + end_pos[1] += len(lines[0]) + else: + end_pos[0] += len(lines) - 1 + end_pos[1] = len(lines[-1]) + + endmarker = EndMarker('', tuple(end_pos), self.prefix + self._last_prefix) + endmarker.parent = self._module + self._module.children.append(endmarker) diff --git a/pythonFiles/parso/python/errors.py b/pythonFiles/parso/python/errors.py new file mode 100644 index 000000000000..65296568b54c --- /dev/null +++ b/pythonFiles/parso/python/errors.py @@ -0,0 +1,1053 @@ +# -*- coding: utf-8 -*- +import codecs +import warnings +import re +from contextlib import contextmanager + +from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule +from parso.python.tree import search_ancestor +from parso.parser import ParserSyntaxError + +_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt') +_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist') +# This is the maximal block size given by python. +_MAX_BLOCK_SIZE = 20 +_MAX_INDENT_COUNT = 100 +ALLOWED_FUTURES = ( + 'all_feature_names', 'nested_scopes', 'generators', 'division', + 'absolute_import', 'with_statement', 'print_function', 'unicode_literals', +) + + +def _iter_stmts(scope): + """ + Iterates over all statements and splits up simple_stmt. + """ + for child in scope.children: + if child.type == 'simple_stmt': + for child2 in child.children: + if child2.type == 'newline' or child2 == ';': + continue + yield child2 + else: + yield child + + +def _get_comprehension_type(atom): + first, second = atom.children[:2] + if second.type == 'testlist_comp' and second.children[1].type == 'comp_for': + if first == '[': + return 'list comprehension' + else: + return 'generator expression' + elif second.type == 'dictorsetmaker' and second.children[-1].type == 'comp_for': + if second.children[1] == ':': + return 'dict comprehension' + else: + return 'set comprehension' + return None + + +def _is_future_import(import_from): + # It looks like a __future__ import that is relative is still a future + # import. That feels kind of odd, but whatever. + # if import_from.level != 0: + # return False + from_names = import_from.get_from_names() + return [n.value for n in from_names] == ['__future__'] + + +def _remove_parens(atom): + """ + Returns the inner part of an expression like `(foo)`. Also removes nested + parens. + """ + try: + children = atom.children + except AttributeError: + pass + else: + if len(children) == 3 and children[0] == '(': + return _remove_parens(atom.children[1]) + return atom + + +def _iter_params(parent_node): + return (n for n in parent_node.children if n.type == 'param') + + +def _is_future_import_first(import_from): + """ + Checks if the import is the first statement of a file. + """ + found_docstring = False + for stmt in _iter_stmts(import_from.get_root_node()): + if stmt.type == 'string' and not found_docstring: + continue + found_docstring = True + + if stmt == import_from: + return True + if stmt.type == 'import_from' and _is_future_import(stmt): + continue + return False + + +def _iter_definition_exprs_from_lists(exprlist): + for child in exprlist.children[::2]: + if child.type == 'atom' and child.children[0] in ('(', '['): + testlist_comp = child.children[0] + if testlist_comp.type == 'testlist_comp': + for expr in _iter_definition_exprs_from_lists(testlist_comp): + yield expr + continue + elif child.children[0] == '[': + yield testlist_comp + continue + + yield child + +def _get_expr_stmt_definition_exprs(expr_stmt): + exprs = [] + for list_ in expr_stmt.children[:-2:2]: + if list_.type in ('testlist_star_expr', 'testlist'): + exprs += _iter_definition_exprs_from_lists(list_) + else: + exprs.append(list_) + return exprs + + +def _get_for_stmt_definition_exprs(for_stmt): + exprlist = for_stmt.children[1] + if exprlist.type != 'exprlist': + return [exprlist] + return list(_iter_definition_exprs_from_lists(exprlist)) + + +class _Context(object): + def __init__(self, node, add_syntax_error, parent_context=None): + self.node = node + self.blocks = [] + self.parent_context = parent_context + self._used_name_dict = {} + self._global_names = [] + self._nonlocal_names = [] + self._nonlocal_names_in_subscopes = [] + self._add_syntax_error = add_syntax_error + + def is_async_funcdef(self): + # Stupidly enough async funcdefs can have two different forms, + # depending if a decorator is used or not. + return self.is_function() \ + and self.node.parent.type in ('async_funcdef', 'async_stmt') + + def is_function(self): + return self.node.type == 'funcdef' + + def add_name(self, name): + parent_type = name.parent.type + if parent_type == 'trailer': + # We are only interested in first level names. + return + + if parent_type == 'global_stmt': + self._global_names.append(name) + elif parent_type == 'nonlocal_stmt': + self._nonlocal_names.append(name) + else: + self._used_name_dict.setdefault(name.value, []).append(name) + + def finalize(self): + """ + Returns a list of nonlocal names that need to be part of that scope. + """ + self._analyze_names(self._global_names, 'global') + self._analyze_names(self._nonlocal_names, 'nonlocal') + + # Python2.6 doesn't have dict comprehensions. + global_name_strs = dict((n.value, n) for n in self._global_names) + for nonlocal_name in self._nonlocal_names: + try: + global_name = global_name_strs[nonlocal_name.value] + except KeyError: + continue + + message = "name '%s' is nonlocal and global" % global_name.value + if global_name.start_pos < nonlocal_name.start_pos: + error_name = global_name + else: + error_name = nonlocal_name + self._add_syntax_error(error_name, message) + + nonlocals_not_handled = [] + for nonlocal_name in self._nonlocal_names_in_subscopes: + search = nonlocal_name.value + if search in global_name_strs or self.parent_context is None: + message = "no binding for nonlocal '%s' found" % nonlocal_name.value + self._add_syntax_error(nonlocal_name, message) + elif not self.is_function() or \ + nonlocal_name.value not in self._used_name_dict: + nonlocals_not_handled.append(nonlocal_name) + return self._nonlocal_names + nonlocals_not_handled + + def _analyze_names(self, globals_or_nonlocals, type_): + def raise_(message): + self._add_syntax_error(base_name, message % (base_name.value, type_)) + + params = [] + if self.node.type == 'funcdef': + params = self.node.get_params() + + for base_name in globals_or_nonlocals: + found_global_or_nonlocal = False + # Somehow Python does it the reversed way. + for name in reversed(self._used_name_dict.get(base_name.value, [])): + if name.start_pos > base_name.start_pos: + # All following names don't have to be checked. + found_global_or_nonlocal = True + + parent = name.parent + if parent.type == 'param' and parent.name == name: + # Skip those here, these definitions belong to the next + # scope. + continue + + if name.is_definition(): + if parent.type == 'expr_stmt' \ + and parent.children[1].type == 'annassign': + if found_global_or_nonlocal: + # If it's after the global the error seems to be + # placed there. + base_name = name + raise_("annotated name '%s' can't be %s") + break + else: + message = "name '%s' is assigned to before %s declaration" + else: + message = "name '%s' is used prior to %s declaration" + + if not found_global_or_nonlocal: + raise_(message) + # Only add an error for the first occurence. + break + + for param in params: + if param.name.value == base_name.value: + raise_("name '%s' is parameter and %s"), + + @contextmanager + def add_block(self, node): + self.blocks.append(node) + yield + self.blocks.pop() + + def add_context(self, node): + return _Context(node, self._add_syntax_error, parent_context=self) + + def close_child_context(self, child_context): + self._nonlocal_names_in_subscopes += child_context.finalize() + + +class ErrorFinder(Normalizer): + """ + Searches for errors in the syntax tree. + """ + def __init__(self, *args, **kwargs): + super(ErrorFinder, self).__init__(*args, **kwargs) + self._error_dict = {} + self.version = self.grammar.version_info + + def initialize(self, node): + def create_context(node): + if node is None: + return None + + parent_context = create_context(node.parent) + if node.type in ('classdef', 'funcdef', 'file_input'): + return _Context(node, self._add_syntax_error, parent_context) + return parent_context + + self.context = create_context(node) or _Context(node, self._add_syntax_error) + self._indentation_count = 0 + + def visit(self, node): + if node.type == 'error_node': + with self.visit_node(node): + # Don't need to investigate the inners of an error node. We + # might find errors in there that should be ignored, because + # the error node itself already shows that there's an issue. + return '' + return super(ErrorFinder, self).visit(node) + + + @contextmanager + def visit_node(self, node): + self._check_type_rules(node) + + if node.type in _BLOCK_STMTS: + with self.context.add_block(node): + if len(self.context.blocks) == _MAX_BLOCK_SIZE: + self._add_syntax_error(node, "too many statically nested blocks") + yield + return + elif node.type == 'suite': + self._indentation_count += 1 + if self._indentation_count == _MAX_INDENT_COUNT: + self._add_indentation_error(node.children[1], "too many levels of indentation") + + yield + + if node.type == 'suite': + self._indentation_count -= 1 + elif node.type in ('classdef', 'funcdef'): + context = self.context + self.context = context.parent_context + self.context.close_child_context(context) + + def visit_leaf(self, leaf): + if leaf.type == 'error_leaf': + if leaf.original_type in ('indent', 'error_dedent'): + # Indents/Dedents itself never have a prefix. They are just + # "pseudo" tokens that get removed by the syntax tree later. + # Therefore in case of an error we also have to check for this. + spacing = list(leaf.get_next_leaf()._split_prefix())[-1] + if leaf.original_type == 'indent': + message = 'unexpected indent' + else: + message = 'unindent does not match any outer indentation level' + self._add_indentation_error(spacing, message) + else: + if leaf.value.startswith('\\'): + message = 'unexpected character after line continuation character' + else: + match = re.match('\\w{,2}("{1,3}|\'{1,3})', leaf.value) + if match is None: + message = 'invalid syntax' + else: + if len(match.group(1)) == 1: + message = 'EOL while scanning string literal' + else: + message = 'EOF while scanning triple-quoted string literal' + self._add_syntax_error(leaf, message) + return '' + elif leaf.value == ':': + parent = leaf.parent + if parent.type in ('classdef', 'funcdef'): + self.context = self.context.add_context(parent) + + # The rest is rule based. + return super(ErrorFinder, self).visit_leaf(leaf) + + def _add_indentation_error(self, spacing, message): + self.add_issue(spacing, 903, "IndentationError: " + message) + + def _add_syntax_error(self, node, message): + self.add_issue(node, 901, "SyntaxError: " + message) + + def add_issue(self, node, code, message): + # Overwrite the default behavior. + # Check if the issues are on the same line. + line = node.start_pos[0] + args = (code, message, node) + self._error_dict.setdefault(line, args) + + def finalize(self): + self.context.finalize() + + for code, message, node in self._error_dict.values(): + self.issues.append(Issue(node, code, message)) + + +class IndentationRule(Rule): + code = 903 + + def _get_message(self, message): + message = super(IndentationRule, self)._get_message(message) + return "IndentationError: " + message + + +@ErrorFinder.register_rule(type='error_node') +class _ExpectIndentedBlock(IndentationRule): + message = 'expected an indented block' + + def get_node(self, node): + leaf = node.get_next_leaf() + return list(leaf._split_prefix())[-1] + + def is_issue(self, node): + # This is the beginning of a suite that is not indented. + return node.children[-1].type == 'newline' + + +class ErrorFinderConfig(NormalizerConfig): + normalizer_class = ErrorFinder + + +class SyntaxRule(Rule): + code = 901 + + def _get_message(self, message): + message = super(SyntaxRule, self)._get_message(message) + return "SyntaxError: " + message + + +@ErrorFinder.register_rule(type='error_node') +class _InvalidSyntaxRule(SyntaxRule): + message = "invalid syntax" + + def get_node(self, node): + return node.get_next_leaf() + + def is_issue(self, node): + # Error leafs will be added later as an error. + return node.get_next_leaf().type != 'error_leaf' + + +@ErrorFinder.register_rule(value='await') +class _AwaitOutsideAsync(SyntaxRule): + message = "'await' outside async function" + + def is_issue(self, leaf): + return not self._normalizer.context.is_async_funcdef() + + def get_error_node(self, node): + # Return the whole await statement. + return node.parent + + +@ErrorFinder.register_rule(value='break') +class _BreakOutsideLoop(SyntaxRule): + message = "'break' outside loop" + + def is_issue(self, leaf): + in_loop = False + for block in self._normalizer.context.blocks: + if block.type in ('for_stmt', 'while_stmt'): + in_loop = True + return not in_loop + + +@ErrorFinder.register_rule(value='continue') +class _ContinueChecks(SyntaxRule): + message = "'continue' not properly in loop" + message_in_finally = "'continue' not supported inside 'finally' clause" + + def is_issue(self, leaf): + in_loop = False + for block in self._normalizer.context.blocks: + if block.type in ('for_stmt', 'while_stmt'): + in_loop = True + if block.type == 'try_stmt': + last_block = block.children[-3] + if last_block == 'finally' and leaf.start_pos > last_block.start_pos: + self.add_issue(leaf, message=self.message_in_finally) + return False # Error already added + if not in_loop: + return True + + +@ErrorFinder.register_rule(value='from') +class _YieldFromCheck(SyntaxRule): + message = "'yield from' inside async function" + + def get_node(self, leaf): + return leaf.parent.parent # This is the actual yield statement. + + def is_issue(self, leaf): + return leaf.parent.type == 'yield_arg' \ + and self._normalizer.context.is_async_funcdef() + + +@ErrorFinder.register_rule(type='name') +class _NameChecks(SyntaxRule): + message = 'cannot assign to __debug__' + message_keyword = 'assignment to keyword' + message_none = 'cannot assign to None' + + def is_issue(self, leaf): + self._normalizer.context.add_name(leaf) + + if leaf.value == '__debug__' and leaf.is_definition(): + if self._normalizer.version < (3, 0): + return True + else: + self.add_issue(leaf, message=self.message_keyword) + if leaf.value == 'None' and self._normalizer.version < (3, 0) \ + and leaf.is_definition(): + self.add_issue(leaf, message=self.message_none) + + +@ErrorFinder.register_rule(type='string') +class _StringChecks(SyntaxRule): + message = "bytes can only contain ASCII literal characters." + + def is_issue(self, leaf): + string_prefix = leaf.string_prefix.lower() + if 'b' in string_prefix \ + and self._normalizer.version >= (3, 0) \ + and any(c for c in leaf.value if ord(c) > 127): + # b'ä' + return True + + if 'r' not in string_prefix: + # Raw strings don't need to be checked if they have proper + # escaping. + is_bytes = self._normalizer.version < (3, 0) + if 'b' in string_prefix: + is_bytes = True + if 'u' in string_prefix: + is_bytes = False + + payload = leaf._get_payload() + if is_bytes: + payload = payload.encode('utf-8') + func = codecs.escape_decode + else: + func = codecs.unicode_escape_decode + + try: + with warnings.catch_warnings(): + # The warnings from parsing strings are not relevant. + warnings.filterwarnings('ignore') + func(payload) + except UnicodeDecodeError as e: + self.add_issue(leaf, message='(unicode error) ' + str(e)) + except ValueError as e: + self.add_issue(leaf, message='(value error) ' + str(e)) + + +@ErrorFinder.register_rule(value='*') +class _StarCheck(SyntaxRule): + message = "named arguments must follow bare *" + + def is_issue(self, leaf): + params = leaf.parent + if params.type == 'parameters' and params: + after = params.children[params.children.index(leaf) + 1:] + after = [child for child in after + if child not in (',', ')') and not child.star_count] + return len(after) == 0 + + +@ErrorFinder.register_rule(value='**') +class _StarStarCheck(SyntaxRule): + # e.g. {**{} for a in [1]} + # TODO this should probably get a better end_pos including + # the next sibling of leaf. + message = "dict unpacking cannot be used in dict comprehension" + + def is_issue(self, leaf): + if leaf.parent.type == 'dictorsetmaker': + comp_for = leaf.get_next_sibling().get_next_sibling() + return comp_for is not None and comp_for.type == 'comp_for' + + +@ErrorFinder.register_rule(value='yield') +@ErrorFinder.register_rule(value='return') +class _ReturnAndYieldChecks(SyntaxRule): + message = "'return' with value in async generator" + message_async_yield = "'yield' inside async function" + + def get_node(self, leaf): + return leaf.parent + + def is_issue(self, leaf): + if self._normalizer.context.node.type != 'funcdef': + self.add_issue(self.get_node(leaf), message="'%s' outside function" % leaf.value) + elif self._normalizer.context.is_async_funcdef() \ + and any(self._normalizer.context.node.iter_yield_exprs()): + if leaf.value == 'return' and leaf.parent.type == 'return_stmt': + return True + elif leaf.value == 'yield' \ + and leaf.get_next_leaf() != 'from' \ + and self._normalizer.version == (3, 5): + self.add_issue(self.get_node(leaf), message=self.message_async_yield) + +@ErrorFinder.register_rule(type='atom') +class _BytesAndStringMix(SyntaxRule): + # e.g. 's' b'' + message = "cannot mix bytes and nonbytes literals" + + def _is_bytes_literal(self, string): + return 'b' in string.string_prefix.lower() + + def is_issue(self, node): + first = node.children[0] + if first.type == 'string' and self._normalizer.version >= (3, 0): + first_is_bytes = self._is_bytes_literal(first) + for string in node.children[1:]: + if first_is_bytes != self._is_bytes_literal(string): + return True + + +@ErrorFinder.register_rule(type='import_as_names') +class _TrailingImportComma(SyntaxRule): + # e.g. from foo import a, + message = "trailing comma not allowed without surrounding parentheses" + + def is_issue(self, node): + if node.children[-1] == ',': + return True + + +@ErrorFinder.register_rule(type='import_from') +class _ImportStarInFunction(SyntaxRule): + message = "import * only allowed at module level" + + def is_issue(self, node): + return node.is_star_import() and self._normalizer.context.parent_context is not None + + +@ErrorFinder.register_rule(type='import_from') +class _FutureImportRule(SyntaxRule): + message = "from __future__ imports must occur at the beginning of the file" + + def is_issue(self, node): + if _is_future_import(node): + if not _is_future_import_first(node): + return True + + for from_name, future_name in node.get_paths(): + name = future_name.value + allowed_futures = list(ALLOWED_FUTURES) + if self._normalizer.version >= (3, 5): + allowed_futures.append('generator_stop') + + if name == 'braces': + self.add_issue(node, message = "not a chance") + elif name == 'barry_as_FLUFL': + m = "Seriously I'm not implementing this :) ~ Dave" + self.add_issue(node, message=m) + elif name not in ALLOWED_FUTURES: + message = "future feature %s is not defined" % name + self.add_issue(node, message=message) + + +@ErrorFinder.register_rule(type='star_expr') +class _StarExprRule(SyntaxRule): + message = "starred assignment target must be in a list or tuple" + message_iterable_unpacking = "iterable unpacking cannot be used in comprehension" + message_assignment = "can use starred expression only as assignment target" + + def is_issue(self, node): + if node.parent.type not in _STAR_EXPR_PARENTS: + return True + if node.parent.type == 'testlist_comp': + # [*[] for a in [1]] + if node.parent.children[1].type == 'comp_for': + self.add_issue(node, message=self.message_iterable_unpacking) + if self._normalizer.version <= (3, 4): + n = search_ancestor(node, 'for_stmt', 'expr_stmt') + found_definition = False + if n is not None: + if n.type == 'expr_stmt': + exprs = _get_expr_stmt_definition_exprs(n) + else: + exprs = _get_for_stmt_definition_exprs(n) + if node in exprs: + found_definition = True + + if not found_definition: + self.add_issue(node, message=self.message_assignment) + + +@ErrorFinder.register_rule(types=_STAR_EXPR_PARENTS) +class _StarExprParentRule(SyntaxRule): + def is_issue(self, node): + if node.parent.type == 'del_stmt': + self.add_issue(node.parent, message="can't use starred expression here") + else: + def is_definition(node, ancestor): + if ancestor is None: + return False + + type_ = ancestor.type + if type_ == 'trailer': + return False + + if type_ == 'expr_stmt': + return node.start_pos < ancestor.children[-1].start_pos + + return is_definition(node, ancestor.parent) + + if is_definition(node, node.parent): + args = [c for c in node.children if c != ','] + starred = [c for c in args if c.type == 'star_expr'] + if len(starred) > 1: + message = "two starred expressions in assignment" + self.add_issue(starred[1], message=message) + elif starred: + count = args.index(starred[0]) + if count >= 256: + message = "too many expressions in star-unpacking assignment" + self.add_issue(starred[0], message=message) + + +@ErrorFinder.register_rule(type='annassign') +class _AnnotatorRule(SyntaxRule): + # True: int + # {}: float + message = "illegal target for annotation" + + def get_node(self, node): + return node.parent + + def is_issue(self, node): + type_ = None + lhs = node.parent.children[0] + lhs = _remove_parens(lhs) + try: + children = lhs.children + except AttributeError: + pass + else: + if ',' in children or lhs.type == 'atom' and children[0] == '(': + type_ = 'tuple' + elif lhs.type == 'atom' and children[0] == '[': + type_ = 'list' + trailer = children[-1] + + if type_ is None: + if not (lhs.type == 'name' + # subscript/attributes are allowed + or lhs.type in ('atom_expr', 'power') + and trailer.type == 'trailer' + and trailer.children[0] != '('): + return True + else: + # x, y: str + message = "only single target (not %s) can be annotated" + self.add_issue(lhs.parent, message=message % type_) + + +@ErrorFinder.register_rule(type='argument') +class _ArgumentRule(SyntaxRule): + def is_issue(self, node): + first = node.children[0] + if node.children[1] == '=' and first.type != 'name': + if first.type == 'lambdef': + # f(lambda: 1=1) + message = "lambda cannot contain assignment" + else: + # f(+x=1) + message = "keyword can't be an expression" + self.add_issue(first, message=message) + + +@ErrorFinder.register_rule(type='nonlocal_stmt') +class _NonlocalModuleLevelRule(SyntaxRule): + message = "nonlocal declaration not allowed at module level" + + def is_issue(self, node): + return self._normalizer.context.parent_context is None + + +@ErrorFinder.register_rule(type='arglist') +class _ArglistRule(SyntaxRule): + message = "Generator expression must be parenthesized if not sole argument" + + def is_issue(self, node): + first_arg = node.children[0] + if first_arg.type == 'argument' \ + and first_arg.children[1].type == 'comp_for': + # e.g. foo(x for x in [], b) + return len(node.children) >= 2 + else: + arg_set = set() + kw_only = False + kw_unpacking_only = False + is_old_starred = False + # In python 3 this would be a bit easier (stars are part of + # argument), but we have to understand both. + for argument in node.children: + if argument == ',': + continue + + if argument in ('*', '**'): + # Python < 3.5 has the order engraved in the grammar + # file. No need to do anything here. + is_old_starred = True + continue + if is_old_starred: + is_old_starred = False + continue + + if argument.type == 'argument': + first = argument.children[0] + if first in ('*', '**'): + if first == '*': + if kw_unpacking_only: + # foo(**kwargs, *args) + message = "iterable argument unpacking follows keyword argument unpacking" + self.add_issue(argument, message=message) + else: + kw_unpacking_only = True + else: # Is a keyword argument. + kw_only = True + if first.type == 'name': + if first.value in arg_set: + # f(x=1, x=2) + self.add_issue(first, message="keyword argument repeated") + else: + arg_set.add(first.value) + else: + if kw_unpacking_only: + # f(**x, y) + message = "positional argument follows keyword argument unpacking" + self.add_issue(argument, message=message) + elif kw_only: + # f(x=2, y) + message = "positional argument follows keyword argument" + self.add_issue(argument, message=message) + +@ErrorFinder.register_rule(type='parameters') +@ErrorFinder.register_rule(type='lambdef') +class _ParameterRule(SyntaxRule): + # def f(x=3, y): pass + message = "non-default argument follows default argument" + + def is_issue(self, node): + param_names = set() + default_only = False + for p in _iter_params(node): + if p.name.value in param_names: + message = "duplicate argument '%s' in function definition" + self.add_issue(p.name, message=message % p.name.value) + param_names.add(p.name.value) + + if p.default is None and not p.star_count: + if default_only: + return True + else: + default_only = True + + +@ErrorFinder.register_rule(type='try_stmt') +class _TryStmtRule(SyntaxRule): + message = "default 'except:' must be last" + + def is_issue(self, try_stmt): + default_except = None + for except_clause in try_stmt.children[3::3]: + if except_clause in ('else', 'finally'): + break + if except_clause == 'except': + default_except = except_clause + elif default_except is not None: + self.add_issue(default_except, message=self.message) + + +@ErrorFinder.register_rule(type='string') +class _FStringRule(SyntaxRule): + _fstring_grammar = None + message_empty = "f-string: empty expression not allowed" # f'{}' + message_single_closing = "f-string: single '}' is not allowed" # f'}' + message_nested = "f-string: expressions nested too deeply" + message_backslash = "f-string expression part cannot include a backslash" # f'{"\"}' or f'{"\\"}' + message_comment = "f-string expression part cannot include '#'" # f'{#}' + message_unterminated_string = "f-string: unterminated string" # f'{"}' + message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'" + message_incomplete = "f-string: expecting '}'" # f'{' + message_syntax = "invalid syntax" + + @classmethod + def _load_grammar(cls): + import parso + + if cls._fstring_grammar is None: + cls._fstring_grammar = parso.load_grammar(language='python-f-string') + return cls._fstring_grammar + + def is_issue(self, fstring): + if 'f' not in fstring.string_prefix.lower(): + return + + parsed = self._load_grammar().parse_leaf(fstring) + for child in parsed.children: + if child.type == 'expression': + self._check_expression(child) + elif child.type == 'error_node': + next_ = child.get_next_leaf() + if next_.type == 'error_leaf' and next_.original_type == 'unterminated_string': + self.add_issue(next_, message=self.message_unterminated_string) + # At this point nothing more is comming except the error + # leaf that we've already checked here. + break + self.add_issue(child, message=self.message_incomplete) + elif child.type == 'error_leaf': + self.add_issue(child, message=self.message_single_closing) + + def _check_python_expr(self, python_expr): + value = python_expr.value + if '\\' in value: + self.add_issue(python_expr, message=self.message_backslash) + return + if '#' in value: + self.add_issue(python_expr, message=self.message_comment) + return + if re.match('\s*$', value) is not None: + self.add_issue(python_expr, message=self.message_empty) + return + + # This is now nested parsing. We parsed the fstring and now + # we're parsing Python again. + try: + # CPython has a bit of a special ways to parse Python code within + # f-strings. It wraps the code in brackets to make sure that + # whitespace doesn't make problems (indentation/newlines). + # Just use that algorithm as well here and adapt start positions. + start_pos = python_expr.start_pos + start_pos = start_pos[0], start_pos[1] - 1 + eval_input = self._normalizer.grammar._parse( + '(%s)' % value, + start_symbol='eval_input', + start_pos=start_pos, + error_recovery=False + ) + except ParserSyntaxError as e: + self.add_issue(e.error_leaf, message=self.message_syntax) + return + + issues = self._normalizer.grammar.iter_errors(eval_input) + self._normalizer.issues += issues + + def _check_format_spec(self, format_spec): + for expression in format_spec.children[1:]: + nested_format_spec = expression.children[-2] + if nested_format_spec.type == 'format_spec': + if len(nested_format_spec.children) > 1: + self.add_issue( + nested_format_spec.children[1], + message=self.message_nested + ) + + self._check_expression(expression) + + def _check_expression(self, expression): + for c in expression.children: + if c.type == 'python_expr': + self._check_python_expr(c) + elif c.type == 'conversion': + if c.value not in ('s', 'r', 'a'): + self.add_issue(c, message=self.message_conversion) + elif c.type == 'format_spec': + self._check_format_spec(c) + + +class _CheckAssignmentRule(SyntaxRule): + def _check_assignment(self, node, is_deletion=False): + error = None + type_ = node.type + if type_ == 'lambdef': + error = 'lambda' + elif type_ == 'atom': + first, second = node.children[:2] + error = _get_comprehension_type(node) + if error is None: + if second.type in ('dictorsetmaker', 'string'): + error = 'literal' + elif first in ('(', '['): + if second.type == 'yield_expr': + error = 'yield expression' + elif second.type == 'testlist_comp': + # This is not a comprehension, they were handled + # further above. + for child in second.children[::2]: + self._check_assignment(child, is_deletion) + else: # Everything handled, must be useless brackets. + self._check_assignment(second, is_deletion) + elif type_ == 'keyword': + error = 'keyword' + elif type_ == 'operator': + if node.value == '...': + error = 'Ellipsis' + elif type_ == 'comparison': + error = 'comparison' + elif type_ in ('string', 'number'): + error = 'literal' + elif type_ == 'yield_expr': + # This one seems to be a slightly different warning in Python. + message = 'assignment to yield expression not possible' + self.add_issue(node, message=message) + elif type_ == 'test': + error = 'conditional expression' + elif type_ in ('atom_expr', 'power'): + if node.children[0] == 'await': + error = 'await expression' + elif node.children[-2] == '**': + error = 'operator' + else: + # Has a trailer + trailer = node.children[-1] + assert trailer.type == 'trailer' + if trailer.children[0] == '(': + error = 'function call' + elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'): + for child in node.children[::2]: + self._check_assignment(child, is_deletion) + elif ('expr' in type_ and type_ != 'star_expr' # is a substring + or '_test' in type_ + or type_ in ('term', 'factor')): + error = 'operator' + + if error is not None: + message = "can't %s %s" % ("delete" if is_deletion else "assign to", error) + self.add_issue(node, message=message) + + +@ErrorFinder.register_rule(type='comp_for') +class _CompForRule(_CheckAssignmentRule): + message = "asynchronous comprehension outside of an asynchronous function" + + def is_issue(self, node): + # Some of the nodes here are already used, so no else if + expr_list = node.children[1 + int(node.children[0] == 'async')] + if expr_list.type != 'expr_list': # Already handled. + self._check_assignment(expr_list) + + return node.children[0] == 'async' \ + and not self._normalizer.context.is_async_funcdef() + + +@ErrorFinder.register_rule(type='expr_stmt') +class _ExprStmtRule(_CheckAssignmentRule): + message = "illegal expression for augmented assignment" + + def is_issue(self, node): + for before_equal in node.children[:-2:2]: + self._check_assignment(before_equal) + + augassign = node.children[1] + if augassign != '=' and augassign.type != 'annassign': # Is augassign. + return node.children[0].type in ('testlist_star_expr', 'atom', 'testlist') + + +@ErrorFinder.register_rule(type='with_item') +class _WithItemRule(_CheckAssignmentRule): + def is_issue(self, with_item): + self._check_assignment(with_item.children[2]) + + +@ErrorFinder.register_rule(type='del_stmt') +class _DelStmtRule(_CheckAssignmentRule): + def is_issue(self, del_stmt): + child = del_stmt.children[1] + + if child.type != 'expr_list': # Already handled. + self._check_assignment(child, is_deletion=True) + + +@ErrorFinder.register_rule(type='expr_list') +class _ExprListRule(_CheckAssignmentRule): + def is_issue(self, expr_list): + for expr in expr_list.children[::2]: + self._check_assignment(expr) + + +@ErrorFinder.register_rule(type='for_stmt') +class _ForStmtRule(_CheckAssignmentRule): + def is_issue(self, for_stmt): + # Some of the nodes here are already used, so no else if + expr_list = for_stmt.children[1] + if expr_list.type != 'expr_list': # Already handled. + self._check_assignment(expr_list) diff --git a/pythonFiles/parso/python/fstring.py b/pythonFiles/parso/python/fstring.py new file mode 100644 index 000000000000..a8fe7b452df5 --- /dev/null +++ b/pythonFiles/parso/python/fstring.py @@ -0,0 +1,211 @@ +import re + +from itertools import count +from parso.utils import PythonVersionInfo +from parso.utils import split_lines +from parso.python.tokenize import Token +from parso import parser +from parso.tree import TypedLeaf, ErrorNode, ErrorLeaf + +version36 = PythonVersionInfo(3, 6) + + +class TokenNamespace: + _c = count() + LBRACE = next(_c) + RBRACE = next(_c) + ENDMARKER = next(_c) + COLON = next(_c) + CONVERSION = next(_c) + PYTHON_EXPR = next(_c) + EXCLAMATION_MARK = next(_c) + UNTERMINATED_STRING = next(_c) + + token_map = dict((v, k) for k, v in locals().items() if not k.startswith('_')) + + @classmethod + def generate_token_id(cls, string): + if string == '{': + return cls.LBRACE + elif string == '}': + return cls.RBRACE + elif string == '!': + return cls.EXCLAMATION_MARK + elif string == ':': + return cls.COLON + return getattr(cls, string) + + +GRAMMAR = """ +fstring: expression* ENDMARKER +format_spec: ':' expression* +expression: '{' PYTHON_EXPR [ '!' CONVERSION ] [ format_spec ] '}' +""" + +_prefix = r'((?:[^{}]+)*)' +_expr = _prefix + r'(\{|\}|$)' +_in_expr = r'([^{}\[\]:"\'!]*)(.?)' +# There's only one conversion character allowed. But the rules have to be +# checked later anyway, so allow more here. This makes error recovery nicer. +_conversion = r'([^={}:]*)(.?)' + +_compiled_expr = re.compile(_expr) +_compiled_in_expr = re.compile(_in_expr) +_compiled_conversion = re.compile(_conversion) + + +def tokenize(code, start_pos=(1, 0)): + def add_to_pos(string): + lines = split_lines(string) + l = len(lines[-1]) + if len(lines) > 1: + start_pos[0] += len(lines) - 1 + start_pos[1] = l + else: + start_pos[1] += l + + def tok(value, type=None, prefix=''): + if type is None: + type = TokenNamespace.generate_token_id(value) + + add_to_pos(prefix) + token = Token(type, value, tuple(start_pos), prefix) + add_to_pos(value) + return token + + start = 0 + recursion_level = 0 + added_prefix = '' + start_pos = list(start_pos) + while True: + match = _compiled_expr.match(code, start) + prefix = added_prefix + match.group(1) + found = match.group(2) + start = match.end() + if not found: + # We're at the end. + break + + if found == '}': + if recursion_level == 0 and len(code) > start and code[start] == '}': + # This is a }} escape. + added_prefix = prefix + '}}' + start += 1 + continue + + recursion_level = max(0, recursion_level - 1) + yield tok(found, prefix=prefix) + added_prefix = '' + else: + assert found == '{' + if recursion_level == 0 and len(code) > start and code[start] == '{': + # This is a {{ escape. + added_prefix = prefix + '{{' + start += 1 + continue + + recursion_level += 1 + yield tok(found, prefix=prefix) + added_prefix = '' + + expression = '' + squared_count = 0 + curly_count = 0 + while True: + expr_match = _compiled_in_expr.match(code, start) + expression += expr_match.group(1) + found = expr_match.group(2) + start = expr_match.end() + + if found == '{': + curly_count += 1 + expression += found + elif found == '}' and curly_count > 0: + curly_count -= 1 + expression += found + elif found == '[': + squared_count += 1 + expression += found + elif found == ']': + # Use a max function here, because the Python code might + # just have syntax errors. + squared_count = max(0, squared_count - 1) + expression += found + elif found == ':' and (squared_count or curly_count): + expression += found + elif found in ('"', "'"): + search = found + if len(code) > start + 1 and \ + code[start] == found == code[start+1]: + search *= 3 + start += 2 + + index = code.find(search, start) + if index == -1: + yield tok(expression, type=TokenNamespace.PYTHON_EXPR) + yield tok( + found + code[start:], + type=TokenNamespace.UNTERMINATED_STRING, + ) + start = len(code) + break + expression += found + code[start:index+1] + start = index + 1 + elif found == '!' and len(code) > start and code[start] == '=': + # This is a python `!=` and not a conversion. + expression += found + else: + yield tok(expression, type=TokenNamespace.PYTHON_EXPR) + if found: + yield tok(found) + break + + if found == '!': + conversion_match = _compiled_conversion.match(code, start) + found = conversion_match.group(2) + start = conversion_match.end() + yield tok(conversion_match.group(1), type=TokenNamespace.CONVERSION) + if found: + yield tok(found) + if found == '}': + recursion_level -= 1 + + # We don't need to handle everything after ':', because that is + # basically new tokens. + + yield tok('', type=TokenNamespace.ENDMARKER, prefix=prefix) + + +class Parser(parser.BaseParser): + def parse(self, tokens): + node = super(Parser, self).parse(tokens) + if isinstance(node, self.default_leaf): # Is an endmarker. + # If there's no curly braces we get back a non-module. We always + # want an fstring. + node = self.default_node('fstring', [node]) + + return node + + def convert_leaf(self, pgen_grammar, type, value, prefix, start_pos): + # TODO this is so ugly. + leaf_type = TokenNamespace.token_map[type].lower() + return TypedLeaf(leaf_type, value, start_pos, prefix) + + def error_recovery(self, pgen_grammar, stack, arcs, typ, value, start_pos, prefix, + add_token_callback): + if not self._error_recovery: + return super(Parser, self).error_recovery( + pgen_grammar, stack, arcs, typ, value, start_pos, prefix, + add_token_callback + ) + + token_type = TokenNamespace.token_map[typ].lower() + if len(stack) == 1: + error_leaf = ErrorLeaf(token_type, value, start_pos, prefix) + stack[0][2][1].append(error_leaf) + else: + dfa, state, (type_, nodes) = stack[1] + stack[0][2][1].append(ErrorNode(nodes)) + stack[1:] = [] + + add_token_callback(typ, value, start_pos, prefix) diff --git a/pythonFiles/release/jedi/parser/grammar2.7.txt b/pythonFiles/parso/python/grammar26.txt old mode 100755 new mode 100644 similarity index 60% rename from pythonFiles/release/jedi/parser/grammar2.7.txt rename to pythonFiles/parso/python/grammar26.txt index b29501436b5c..b972a41d6a4a --- a/pythonFiles/release/jedi/parser/grammar2.7.txt +++ b/pythonFiles/parso/python/grammar26.txt @@ -1,4 +1,4 @@ -# Grammar for 2to3. This grammar supports Python 2.x and 3.x. +# Grammar for Python # Note: Changing the grammar specified in this file will most likely # require corresponding changes in the parser module @@ -10,41 +10,44 @@ # NOTE WELL: You should also follow all the steps listed in PEP 306, # "How to Change Python's Grammar" +# Commands for Kees Blom's railroad program +#diagram:token NAME +#diagram:token NUMBER +#diagram:token STRING +#diagram:token NEWLINE +#diagram:token ENDMARKER +#diagram:token INDENT +#diagram:output\input python.bla +#diagram:token DEDENT +#diagram:output\textwidth 20.04cm\oddsidemargin 0.0cm\evensidemargin 0.0cm +#diagram:rules # Start symbols for the grammar: -# file_input is a module or sequence of commands read from an input file; -# single_input is a single interactive statement; -# eval_input is the input for the eval() and input() functions. +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() and input() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! -file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: (NEWLINE | stmt)* ENDMARKER eval_input: testlist NEWLINE* ENDMARKER decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef) -funcdef: 'def' NAME parameters ['->' test] ':' suite -parameters: '(' [typedargslist] ')' -typedargslist: ((tfpdef ['=' test] ',')* - ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname) - | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) -tname: NAME [':' test] -tfpdef: tname | '(' tfplist ')' -tfplist: tfpdef (',' tfpdef)* [','] -varargslist: ((vfpdef ['=' test] ',')* - ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname) - | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) -vname: NAME -vfpdef: vname | '(' vfplist ')' -vfplist: vfpdef (',' vfpdef)* [','] +funcdef: 'def' NAME parameters ':' suite +parameters: '(' [varargslist] ')' +varargslist: ((fpdef ['=' test] ',')* + ('*' NAME [',' '**' NAME] | '**' NAME) | + fpdef ['=' test] (',' fpdef ['=' test])* [',']) +fpdef: NAME | '(' fplist ')' +fplist: fpdef (',' fpdef)* [','] stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | exec_stmt | assert_stmt) -expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | - ('=' (yield_expr|testlist_star_expr))*) -testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +expr_stmt: testlist (augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist))*) augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') # For normal assignments, additional restrictions enforced by the interpreter @@ -57,18 +60,17 @@ break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr -raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] +raise_stmt: 'raise' [test [',' test [',' test]]] import_stmt: import_name | import_from import_name: 'import' dotted_as_names -# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS -import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) +import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] import_as_names: import_as_name (',' import_as_name)* [','] dotted_as_names: dotted_as_name (',' dotted_as_name)* dotted_name: NAME ('.' NAME)* -global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* +global_stmt: 'global' NAME (',' NAME)* exec_stmt: 'exec' expr ['in' test [',' test]] assert_stmt: 'assert' test [',' test] @@ -78,17 +80,17 @@ while_stmt: 'while' test ':' suite ['else' ':' suite] for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] try_stmt: ('try' ':' suite ((except_clause ':' suite)+ - ['else' ':' suite] - ['finally' ':' suite] | - 'finally' ':' suite)) -with_stmt: 'with' with_item (',' with_item)* ':' suite + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item ':' suite +# Dave: Python2.6 actually defines a little bit of a different label called +# 'with_var'. However in 2.7+ this is the default. Apply it for +# consistency reasons. with_item: test ['as' expr] -with_var: 'as' expr # NB compile.c makes sure that the default except clause is last -except_clause: 'except' [test [(',' | 'as') test]] -# Edit by David Halter: The stmt is now optional. This reflects how Jedi allows -# classes and functions to be empty, which is beneficial for autocompletion. -suite: simple_stmt | NEWLINE INDENT stmt* DEDENT +except_clause: 'except' [test [('as' | ',') test]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT # Backward compatibility cruft to support: # [ x for x in lambda: True, lambda: False if x() ] @@ -105,7 +107,6 @@ and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' -star_expr: '*' expr expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* @@ -115,34 +116,39 @@ term: factor (('*'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom trailer* ['**' factor] atom: ('(' [yield_expr|testlist_comp] ')' | - '[' [testlist_comp] ']' | + '[' [listmaker] ']' | '{' [dictorsetmaker] '}' | '`' testlist1 '`' | - NAME | NUMBER | STRING+ | '.' '.' '.') -# Modification by David Halter, remove `testlist_gexp` and `listmaker` -testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) + NAME | NUMBER | STRING+) +listmaker: test ( list_for | (',' test)* [','] ) +# Dave: Renamed testlist_gexpr to testlist_comp, because in 2.7+ this is the +# default. It's more consistent like this. +testlist_comp: test ( gen_for | (',' test)* [','] ) lambdef: 'lambda' [varargslist] ':' test trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] -subscript: test | [test] ':' [test] [sliceop] +subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop] sliceop: ':' [test] -exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +exprlist: expr (',' expr)* [','] testlist: test (',' test)* [','] -# Modification by David Halter, dictsetmaker -> dictorsetmaker (so that it's -# the same as in the 3.4 grammar). -dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | - (test (comp_for | (',' test)* [','])) ) +# Dave: Rename from dictmaker to dictorsetmaker, because this is more +# consistent with the following grammars. +dictorsetmaker: test ':' test (',' test ':' test)* [','] -classdef: 'class' NAME ['(' [arglist] ')'] ':' suite +classdef: 'class' NAME ['(' [testlist] ')'] ':' suite arglist: (argument ',')* (argument [','] |'*' test (',' argument)* [',' '**' test] |'**' test) -argument: test [comp_for] | test '=' test # Really [keyword '='] test +argument: test [gen_for] | test '=' test # Really [keyword '='] test + +list_iter: list_for | list_if +list_for: 'for' exprlist 'in' testlist_safe [list_iter] +list_if: 'if' old_test [list_iter] -comp_iter: comp_for | comp_if -comp_for: 'for' exprlist 'in' testlist_safe [comp_iter] -comp_if: 'if' old_test [comp_iter] +gen_iter: gen_for | gen_if +gen_for: 'for' exprlist 'in' or_test [gen_iter] +gen_if: 'if' old_test [gen_iter] testlist1: test (',' test)* diff --git a/pythonFiles/preview/jedi/parser/grammar2.7.txt b/pythonFiles/parso/python/grammar27.txt similarity index 65% rename from pythonFiles/preview/jedi/parser/grammar2.7.txt rename to pythonFiles/parso/python/grammar27.txt index 515dea646236..4c3f33da32d5 100644 --- a/pythonFiles/preview/jedi/parser/grammar2.7.txt +++ b/pythonFiles/parso/python/grammar27.txt @@ -1,4 +1,4 @@ -# Grammar for 2to3. This grammar supports Python 2.x and 3.x. +# Grammar for Python # Note: Changing the grammar specified in this file will most likely # require corresponding changes in the parser module @@ -10,41 +10,32 @@ # NOTE WELL: You should also follow all the steps listed in PEP 306, # "How to Change Python's Grammar" - # Start symbols for the grammar: -# file_input is a module or sequence of commands read from an input file; -# single_input is a single interactive statement; -# eval_input is the input for the eval() and input() functions. +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() and input() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! -file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: (NEWLINE | stmt)* ENDMARKER eval_input: testlist NEWLINE* ENDMARKER decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef) -funcdef: 'def' NAME parameters ['->' test] ':' suite -parameters: '(' [typedargslist] ')' -typedargslist: ((tfpdef ['=' test] ',')* - ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname) - | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) -tname: NAME [':' test] -tfpdef: tname | '(' tfplist ')' -tfplist: tfpdef (',' tfpdef)* [','] -varargslist: ((vfpdef ['=' test] ',')* - ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname) - | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) -vname: NAME -vfpdef: vname | '(' vfplist ')' -vfplist: vfpdef (',' vfpdef)* [','] +funcdef: 'def' NAME parameters ':' suite +parameters: '(' [varargslist] ')' +varargslist: ((fpdef ['=' test] ',')* + ('*' NAME [',' '**' NAME] | '**' NAME) | + fpdef ['=' test] (',' fpdef ['=' test])* [',']) +fpdef: NAME | '(' fplist ')' +fplist: fpdef (',' fpdef)* [','] stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | exec_stmt | assert_stmt) -expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | - ('=' (yield_expr|testlist_star_expr))*) -testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +expr_stmt: testlist (augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist))*) augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') # For normal assignments, additional restrictions enforced by the interpreter @@ -60,8 +51,7 @@ yield_stmt: yield_expr raise_stmt: 'raise' [test [',' test [',' test]]] import_stmt: import_name | import_from import_name: 'import' dotted_as_names -# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS -import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) +import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] @@ -78,17 +68,14 @@ while_stmt: 'while' test ':' suite ['else' ':' suite] for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] try_stmt: ('try' ':' suite ((except_clause ':' suite)+ - ['else' ':' suite] - ['finally' ':' suite] | - 'finally' ':' suite)) + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) with_stmt: 'with' with_item (',' with_item)* ':' suite with_item: test ['as' expr] -with_var: 'as' expr # NB compile.c makes sure that the default except clause is last -except_clause: 'except' [test [(',' | 'as') test]] -# Edit by David Halter: The stmt is now optional. This reflects how Jedi allows -# classes and functions to be empty, which is beneficial for autocompletion. -suite: simple_stmt | NEWLINE INDENT stmt* DEDENT +except_clause: 'except' [test [('as' | ',') test]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT # Backward compatibility cruft to support: # [ x for x in lambda: True, lambda: False if x() ] @@ -105,7 +92,6 @@ and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' -star_expr: '*' expr expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* @@ -115,33 +101,37 @@ term: factor (('*'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom trailer* ['**' factor] atom: ('(' [yield_expr|testlist_comp] ')' | - '[' [testlist_comp] ']' | + '[' [listmaker] ']' | '{' [dictorsetmaker] '}' | '`' testlist1 '`' | - NAME | NUMBER | STRING+ | '.' '.' '.') -# Modification by David Halter, remove `testlist_gexp` and `listmaker` -testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) + NAME | NUMBER | STRING+) +listmaker: test ( list_for | (',' test)* [','] ) +testlist_comp: test ( comp_for | (',' test)* [','] ) lambdef: 'lambda' [varargslist] ':' test trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] -subscript: test | [test] ':' [test] [sliceop] +subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop] sliceop: ':' [test] -exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +exprlist: expr (',' expr)* [','] testlist: test (',' test)* [','] -# Modification by David Halter, dictsetmaker -> dictorsetmaker (so that it's -# the same as in the 3.4 grammar). dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | - (test (comp_for | (',' test)* [','])) ) + (test (comp_for | (',' test)* [','])) ) -classdef: 'class' NAME ['(' [arglist] ')'] ':' suite +classdef: 'class' NAME ['(' [testlist] ')'] ':' suite arglist: (argument ',')* (argument [','] |'*' test (',' argument)* [',' '**' test] |'**' test) -argument: test [comp_for] | test '=' test # Really [keyword '='] test +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +argument: test [comp_for] | test '=' test + +list_iter: list_for | list_if +list_for: 'for' exprlist 'in' testlist_safe [list_iter] +list_if: 'if' old_test [list_iter] comp_iter: comp_for | comp_if -comp_for: 'for' exprlist 'in' testlist_safe [comp_iter] +comp_for: 'for' exprlist 'in' or_test [comp_iter] comp_if: 'if' old_test [comp_iter] testlist1: test (',' test)* diff --git a/pythonFiles/release/jedi/parser/grammar3.4.txt b/pythonFiles/parso/python/grammar33.txt old mode 100755 new mode 100644 similarity index 96% rename from pythonFiles/release/jedi/parser/grammar3.4.txt rename to pythonFiles/parso/python/grammar33.txt index d4a32b8e4ee8..d7aaffd60e14 --- a/pythonFiles/release/jedi/parser/grammar3.4.txt +++ b/pythonFiles/parso/python/grammar33.txt @@ -15,8 +15,8 @@ # file_input is a module or sequence of commands read from an input file; # eval_input is the input for the eval() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! -file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: (NEWLINE | stmt)* ENDMARKER eval_input: testlist NEWLINE* ENDMARKER decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE @@ -78,9 +78,7 @@ with_stmt: 'with' with_item (',' with_item)* ':' suite with_item: test ['as' expr] # NB compile.c makes sure that the default except clause is last except_clause: 'except' [test ['as' NAME]] -# Edit by David Halter: The stmt is now optional. This reflects how Jedi allows -# classes and functions to be empty, which is beneficial for autocompletion. -suite: simple_stmt | NEWLINE INDENT stmt* DEDENT +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond diff --git a/pythonFiles/preview/jedi/parser/grammar3.4.txt b/pythonFiles/parso/python/grammar34.txt similarity index 95% rename from pythonFiles/preview/jedi/parser/grammar3.4.txt rename to pythonFiles/parso/python/grammar34.txt index d4a32b8e4ee8..05c3181627db 100644 --- a/pythonFiles/preview/jedi/parser/grammar3.4.txt +++ b/pythonFiles/parso/python/grammar34.txt @@ -7,16 +7,16 @@ # with someone who can; ask around on python-dev for help. Fred # Drake will probably be listening there. -# NOTE WELL: You should also follow all the steps listed in PEP 306, -# "How to Change Python's Grammar" +# NOTE WELL: You should also follow all the steps listed at +# https://docs.python.org/devguide/grammar.html # Start symbols for the grammar: # single_input is a single interactive statement; # file_input is a module or sequence of commands read from an input file; # eval_input is the input for the eval() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! -file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: (NEWLINE | stmt)* ENDMARKER eval_input: testlist NEWLINE* ENDMARKER decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE @@ -78,9 +78,7 @@ with_stmt: 'with' with_item (',' with_item)* ':' suite with_item: test ['as' expr] # NB compile.c makes sure that the default except clause is last except_clause: 'except' [test ['as' NAME]] -# Edit by David Halter: The stmt is now optional. This reflects how Jedi allows -# classes and functions to be empty, which is beneficial for autocompletion. -suite: simple_stmt | NEWLINE INDENT stmt* DEDENT +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond diff --git a/pythonFiles/preview/jedi/parser/grammar3.5.txt b/pythonFiles/parso/python/grammar35.txt similarity index 96% rename from pythonFiles/preview/jedi/parser/grammar3.5.txt rename to pythonFiles/parso/python/grammar35.txt index 96a727187177..c38217f3f97f 100644 --- a/pythonFiles/preview/jedi/parser/grammar3.5.txt +++ b/pythonFiles/parso/python/grammar35.txt @@ -15,8 +15,8 @@ # file_input is a module or sequence of commands read from an input file; # eval_input is the input for the eval() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! -file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: (NEWLINE | stmt)* ENDMARKER eval_input: testlist NEWLINE* ENDMARKER decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE @@ -84,9 +84,7 @@ with_stmt: 'with' with_item (',' with_item)* ':' suite with_item: test ['as' expr] # NB compile.c makes sure that the default except clause is last except_clause: 'except' [test ['as' NAME]] -# Edit by David Halter: The stmt is now optional. This reflects how Jedi allows -# classes and functions to be empty, which is beneficial for autocompletion. -suite: simple_stmt | NEWLINE INDENT stmt* DEDENT +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond @@ -136,7 +134,7 @@ arglist: argument (',' argument)* [','] # to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, # we explicitly match '*' here, too, to give it proper precedence. # Illegal combinations and orderings are blocked in ast.c: -# multiple (test comp_for) arguements are blocked; keyword unpackings +# multiple (test comp_for) arguments are blocked; keyword unpackings # that precede iterable unpackings are blocked; etc. argument: ( test [comp_for] | test '=' test | diff --git a/pythonFiles/preview/jedi/parser/grammar3.6.txt b/pythonFiles/parso/python/grammar36.txt similarity index 91% rename from pythonFiles/preview/jedi/parser/grammar3.6.txt rename to pythonFiles/parso/python/grammar36.txt index b44a56981564..e76147e9e4fc 100644 --- a/pythonFiles/preview/jedi/parser/grammar3.6.txt +++ b/pythonFiles/parso/python/grammar36.txt @@ -1,24 +1,16 @@ # Grammar for Python -# Note: Changing the grammar specified in this file will most likely -# require corresponding changes in the parser module -# (../Modules/parsermodule.c). If you can't make the changes to -# that module yourself, please co-ordinate the required changes -# with someone who can; ask around on python-dev for help. Fred -# Drake will probably be listening there. - # NOTE WELL: You should also follow all the steps listed at # https://docs.python.org/devguide/grammar.html # Start symbols for the grammar: -# file_input is a module or sequence of commands read from an input file; # single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; # eval_input is the input for the eval() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! -file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: (NEWLINE | stmt)* ENDMARKER eval_input: testlist NEWLINE* ENDMARKER - decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef | async_funcdef) @@ -90,10 +82,7 @@ with_stmt: 'with' with_item (',' with_item)* ':' suite with_item: test ['as' expr] # NB compile.c makes sure that the default except clause is last except_clause: 'except' [test ['as' NAME]] -# Edit by Francisco Souza/David Halter: The stmt is now optional. This reflects -# how Jedi allows classes and functions to be empty, which is beneficial for -# autocompletion. -suite: simple_stmt | NEWLINE INDENT stmt* DEDENT +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond diff --git a/pythonFiles/parso/python/grammar37.txt b/pythonFiles/parso/python/grammar37.txt new file mode 100644 index 000000000000..e76147e9e4fc --- /dev/null +++ b/pythonFiles/parso/python/grammar37.txt @@ -0,0 +1,150 @@ +# Grammar for Python + +# NOTE WELL: You should also follow all the steps listed at +# https://docs.python.org/devguide/grammar.html + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: (NEWLINE | stmt)* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +# NOTE: Francisco Souza/Reinoud Elhorst, using ASYNC/'await' keywords instead of +# skipping python3.5+ compatibility, in favour of 3.7 solution +async_funcdef: 'async' funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite + +parameters: '(' [typedargslist] ')' +typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']) +tfpdef: NAME [':' test] +varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [','] +) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' test] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: 'async' (funcdef | with_stmt | for_stmt) +if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] +while_stmt: 'while' test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +test: or_test ['if' or_test 'else' test] | lambdef +test_nocond: or_test | lambdef_nocond +lambdef: 'lambda' [varargslist] ':' test +lambdef_nocond: 'lambda' [varargslist] ':' test_nocond +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 (which really works :-) +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False') +testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test | star_expr) + (comp_for | (',' (test | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +comp_for: ['async'] 'for' exprlist 'in' or_test [comp_iter] +comp_if: 'if' test_nocond [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist diff --git a/pythonFiles/parso/python/issue_list.txt b/pythonFiles/parso/python/issue_list.txt new file mode 100644 index 000000000000..e5e2c9dda764 --- /dev/null +++ b/pythonFiles/parso/python/issue_list.txt @@ -0,0 +1,176 @@ +A list of syntax/indentation errors I've encountered in CPython. + +# Python/compile.c + "'continue' not properly in loop" + "'continue' not supported inside 'finally' clause" # Until loop + "default 'except:' must be last" + "from __future__ imports must occur at the beginning of the file" + "'return' outside function" + "'return' with value in async generator" + "'break' outside loop" + "two starred expressions in assignment" + "asynchronous comprehension outside of an asynchronous function" + "'yield' outside function" # For both yield and yield from + "'yield from' inside async function" + "'await' outside function" + "'await' outside async function" + "starred assignment target must be in a list or tuple" + "can't use starred expression here" + "too many statically nested blocks" # Max. 20 + # This is one of the few places in the cpython code base that I really + # don't understand. It feels a bit hacky if you look at the implementation + # of UNPACK_EX. + "too many expressions in star-unpacking assignment" + + # Just ignore this one, newer versions will not be affected anymore and + # it's a limit of 2^16 - 1. + "too many annotations" # Only python 3.0 - 3.5, 3.6 is not affected. + +# Python/ast.c + # used with_item exprlist expr_stmt + "can't %s %s" % ("assign to" or "delete", + "lambda" + "function call" # foo() + "generator expression" + "list comprehension" + "set comprehension" + "dict comprehension" + "keyword" + "Ellipsis" + "comparison" + Dict: Set: Num: Str: Bytes: JoinedStr: FormattedValue: + "literal" + BoolOp: BinOp: UnaryOp: + "operator" + Yield: YieldFrom: + "yield expression" + Await: + "await expression" + IfExp: + "conditional expression" + "assignment to keyword" # (keywords + __debug__) # None = 2 + "named arguments must follow bare *" # def foo(*): pass + "non-default argument follows default argument" # def f(x=3, y): pass + "iterable unpacking cannot be used in comprehension" # [*[] for a in [1]] + "dict unpacking cannot be used in dict comprehension" # {**{} for a in [1]} + "Generator expression must be parenthesized if not sole argument" # foo(x for x in [], b) + "positional argument follows keyword argument unpacking" # f(**x, y) >= 3.5 + "positional argument follows keyword argument" # f(x=2, y) >= 3.5 + "iterable argument unpacking follows keyword argument unpacking" # foo(**kwargs, *args) + "lambda cannot contain assignment" # f(lambda: 1=1) + "keyword can't be an expression" # f(+x=1) + "keyword argument repeated" # f(x=1, x=2) + "illegal expression for augmented assignment" # x, y += 1 + "only single target (not list) can be annotated" # [x, y]: int + "only single target (not tuple) can be annotated" # x, y: str + "illegal target for annotation" # True: 1` + "trailing comma not allowed without surrounding parentheses" # from foo import a, + "bytes can only contain ASCII literal characters." # b'ä' # prob. only python 3 + "cannot mix bytes and nonbytes literals" # 's' b'' + "assignment to yield expression not possible" # x = yield 1 = 3 + + "f-string: empty expression not allowed" # f'{}' + "f-string: single '}' is not allowed" # f'}' + "f-string: expressions nested too deeply" # f'{1:{5:{3}}}' + "f-string expression part cannot include a backslash" # f'{"\"}' or f'{"\\"}' + "f-string expression part cannot include '#'" # f'{#}' + "f-string: unterminated string" # f'{"}' + "f-string: mismatched '(', '{', or '['" + "f-string: invalid conversion character: expected 's', 'r', or 'a'" # f'{1!b}' + "f-string: unexpected end of string" # Doesn't really happen?! + "f-string: expecting '}'" # f'{' + "(unicode error) unknown error + "(value error) unknown error + "(unicode error) MESSAGE + MESSAGES = { + "\\ at end of string" + "truncated \\xXX escape" + "truncated \\uXXXX escape" + "truncated \\UXXXXXXXX escape" + "illegal Unicode character" # '\Uffffffff' + "malformed \\N character escape" # '\N{}' + "unknown Unicode character name" # '\N{foo}' + } + "(value error) MESSAGE # bytes + MESSAGES = { + "Trailing \\ in string" + "invalid \\x escape at position %d" + } + + "invalid escape sequence \\%c" # Only happens when used in `python -W error` + "unexpected node" # Probably irrelevant + "Unexpected node-type in from-import" # Irrelevant, doesn't happen. + "malformed 'try' statement" # Irrelevant, doesn't happen. + +# Python/symtable.c + "duplicate argument '%U' in function definition" + "name '%U' is assigned to before global declaration" + "name '%U' is assigned to before nonlocal declaration" + "name '%U' is used prior to global declaration" + "name '%U' is used prior to nonlocal declaration" + "annotated name '%U' can't be global" + "annotated name '%U' can't be nonlocal" + "import * only allowed at module level" + + "name '%U' is parameter and global", + "name '%U' is nonlocal and global", + "name '%U' is parameter and nonlocal", + + "nonlocal declaration not allowed at module level"); + "no binding for nonlocal '%U' found", + # RecursionError. Not handled. For all human written code, this is probably + # not an issue. eval("()"*x) with x>=2998 for example fails, but that's + # more than 2000 executions on one line. + "maximum recursion depth exceeded during compilation"); + +# Python/future.c + "not a chance" + "future feature %.100s is not defined" + "from __future__ imports must occur at the beginning of the file" # Also in compile.c + +# Parser/tokenizer.c + # All the following issues seem to be irrelevant for parso, because the + # encoding stuff is done before it reaches the tokenizer. It's already + # unicode at that point. + "encoding problem: %s" + "encoding problem: %s with BOM" + "Non-UTF-8 code starting with '\\x%.2x' in file %U on line %i, but no encoding declared; see http://python.org/dev/peps/pep-0263/ for details" + +# Parser/pythonrun.c + E_SYNTAX: "invalid syntax" + E_LINECONT: "unexpected character after line continuation character" + E_IDENTIFIER: "invalid character in identifier" + # Also just use 'invalid syntax'. Happens mostly with stuff like `(`. This + # message doesn't really help the user, because it only appears very + # randomly, e.g. `(or` wouldn't yield this error. + E_EOF: "unexpected EOF while parsing" + # Even in 3.6 this is implemented kind of shaky. Not implemented, I think + # cPython needs to fix this one first. + # e.g. `ast.parse('def x():\n\t if 1:\n \t \tpass')` works :/ + E_TABSPACE: "inconsistent use of tabs and spaces in indentation" + # Ignored, just shown as "invalid syntax". The error has mostly to do with + # numbers like 0b2 everywhere or 1.6_ in Python3.6. + E_TOKEN: "invalid token" + E_EOFS: "EOF while scanning triple-quoted string literal" + E_EOLS: "EOL while scanning string literal" + + # IndentationError + E_DEDENT: "unindent does not match any outer indentation level" + E_TOODEEP: "too many levels of indentation" # 100 levels + E_SYNTAX: "expected an indented block" + "unexpected indent" + # I don't think this actually ever happens. + "unexpected unindent" + + + # Irrelevant for parso for now. + E_OVERFLOW: "expression too long" + E_DECODE: "unknown decode error" + E_BADSINGLE: "multiple statements found while compiling a single statement" + + +Version specific: +Python 3.5: + 'yield' inside async function +Python 3.3/3.4: + can use starred expression only as assignment target diff --git a/pythonFiles/parso/python/parser.py b/pythonFiles/parso/python/parser.py new file mode 100644 index 000000000000..1897f53e8d6f --- /dev/null +++ b/pythonFiles/parso/python/parser.py @@ -0,0 +1,261 @@ +from parso.python import tree +from parso.python.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER, + STRING, tok_name, NAME) +from parso.parser import BaseParser +from parso.pgen2.parse import token_to_ilabel + + +class Parser(BaseParser): + """ + This class is used to parse a Python file, it then divides them into a + class structure of different scopes. + + :param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar. + """ + + node_map = { + 'expr_stmt': tree.ExprStmt, + 'classdef': tree.Class, + 'funcdef': tree.Function, + 'file_input': tree.Module, + 'import_name': tree.ImportName, + 'import_from': tree.ImportFrom, + 'break_stmt': tree.KeywordStatement, + 'continue_stmt': tree.KeywordStatement, + 'return_stmt': tree.ReturnStmt, + 'raise_stmt': tree.KeywordStatement, + 'yield_expr': tree.YieldExpr, + 'del_stmt': tree.KeywordStatement, + 'pass_stmt': tree.KeywordStatement, + 'global_stmt': tree.GlobalStmt, + 'nonlocal_stmt': tree.KeywordStatement, + 'print_stmt': tree.KeywordStatement, + 'assert_stmt': tree.AssertStmt, + 'if_stmt': tree.IfStmt, + 'with_stmt': tree.WithStmt, + 'for_stmt': tree.ForStmt, + 'while_stmt': tree.WhileStmt, + 'try_stmt': tree.TryStmt, + 'comp_for': tree.CompFor, + # Not sure if this is the best idea, but IMO it's the easiest way to + # avoid extreme amounts of work around the subtle difference of 2/3 + # grammar in list comoprehensions. + 'list_for': tree.CompFor, + # Same here. This just exists in Python 2.6. + 'gen_for': tree.CompFor, + 'decorator': tree.Decorator, + 'lambdef': tree.Lambda, + 'old_lambdef': tree.Lambda, + 'lambdef_nocond': tree.Lambda, + } + default_node = tree.PythonNode + + def __init__(self, pgen_grammar, error_recovery=True, start_symbol='file_input'): + super(Parser, self).__init__(pgen_grammar, start_symbol, error_recovery=error_recovery) + + self.syntax_errors = [] + self._omit_dedent_list = [] + self._indent_counter = 0 + + # TODO do print absolute import detection here. + # try: + # del python_grammar_no_print_statement.keywords["print"] + # except KeyError: + # pass # Doesn't exist in the Python 3 grammar. + + # if self.options["print_function"]: + # python_grammar = pygram.python_grammar_no_print_statement + # else: + + def parse(self, tokens): + if self._error_recovery: + if self._start_symbol != 'file_input': + raise NotImplementedError + + tokens = self._recovery_tokenize(tokens) + + node = super(Parser, self).parse(tokens) + + if self._start_symbol == 'file_input' != node.type: + # If there's only one statement, we get back a non-module. That's + # not what we want, we want a module, so we add it here: + node = self.convert_node( + self._pgen_grammar, + self._pgen_grammar.symbol2number['file_input'], + [node] + ) + + return node + + def convert_node(self, pgen_grammar, type, children): + """ + Convert raw node information to a PythonBaseNode instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + """ + # TODO REMOVE symbol, we don't want type here. + symbol = pgen_grammar.number2symbol[type] + try: + return self.node_map[symbol](children) + except KeyError: + if symbol == 'suite': + # We don't want the INDENT/DEDENT in our parser tree. Those + # leaves are just cancer. They are virtual leaves and not real + # ones and therefore have pseudo start/end positions and no + # prefixes. Just ignore them. + children = [children[0]] + children[2:-1] + elif symbol == 'list_if': + # Make transitioning from 2 to 3 easier. + symbol = 'comp_if' + elif symbol == 'listmaker': + # Same as list_if above. + symbol = 'testlist_comp' + return self.default_node(symbol, children) + + def convert_leaf(self, pgen_grammar, type, value, prefix, start_pos): + # print('leaf', repr(value), token.tok_name[type]) + if type == NAME: + if value in pgen_grammar.keywords: + return tree.Keyword(value, start_pos, prefix) + else: + return tree.Name(value, start_pos, prefix) + elif type == STRING: + return tree.String(value, start_pos, prefix) + elif type == NUMBER: + return tree.Number(value, start_pos, prefix) + elif type == NEWLINE: + return tree.Newline(value, start_pos, prefix) + elif type == ENDMARKER: + return tree.EndMarker(value, start_pos, prefix) + else: + return tree.Operator(value, start_pos, prefix) + + def error_recovery(self, pgen_grammar, stack, arcs, typ, value, start_pos, prefix, + add_token_callback): + def get_symbol_and_nodes(stack): + for dfa, state, (type_, nodes) in stack: + symbol = pgen_grammar.number2symbol[type_] + yield symbol, nodes + + tos_nodes = stack.get_tos_nodes() + if tos_nodes: + last_leaf = tos_nodes[-1].get_last_leaf() + else: + last_leaf = None + + if self._start_symbol == 'file_input' and \ + (typ == ENDMARKER or typ == DEDENT and '\n' not in last_leaf.value): + def reduce_stack(states, newstate): + # reduce + state = newstate + while states[state] == [(0, state)]: + self.pgen_parser._pop() + + dfa, state, (type_, nodes) = stack[-1] + states, first = dfa + + + # In Python statements need to end with a newline. But since it's + # possible (and valid in Python ) that there's no newline at the + # end of a file, we have to recover even if the user doesn't want + # error recovery. + #print('x', pprint.pprint(stack)) + ilabel = token_to_ilabel(pgen_grammar, NEWLINE, value) + + dfa, state, (type_, nodes) = stack[-1] + symbol = pgen_grammar.number2symbol[type_] + states, first = dfa + arcs = states[state] + # Look for a state with this label + for i, newstate in arcs: + if ilabel == i: + if symbol == 'simple_stmt': + # This is basically shifting + stack[-1] = (dfa, newstate, (type_, nodes)) + + reduce_stack(states, newstate) + add_token_callback(typ, value, start_pos, prefix) + return + # Check if we're at the right point + #for symbol, nodes in get_symbol_and_nodes(stack): + # self.pgen_parser._pop() + + #break + break + #symbol = pgen_grammar.number2symbol[type_] + + if not self._error_recovery: + return super(Parser, self).error_recovery( + pgen_grammar, stack, arcs, typ, value, start_pos, prefix, + add_token_callback) + + def current_suite(stack): + # For now just discard everything that is not a suite or + # file_input, if we detect an error. + for index, (symbol, nodes) in reversed(list(enumerate(get_symbol_and_nodes(stack)))): + # `suite` can sometimes be only simple_stmt, not stmt. + if symbol == 'file_input': + break + elif symbol == 'suite' and len(nodes) > 1: + # suites without an indent in them get discarded. + break + return index, symbol, nodes + + index, symbol, nodes = current_suite(stack) + + # print('err', token.tok_name[typ], repr(value), start_pos, len(stack), index) + if self._stack_removal(pgen_grammar, stack, arcs, index + 1, value, start_pos): + add_token_callback(typ, value, start_pos, prefix) + else: + if typ == INDENT: + # For every deleted INDENT we have to delete a DEDENT as well. + # Otherwise the parser will get into trouble and DEDENT too early. + self._omit_dedent_list.append(self._indent_counter) + + error_leaf = tree.PythonErrorLeaf(tok_name[typ].lower(), value, start_pos, prefix) + stack[-1][2][1].append(error_leaf) + + if symbol == 'suite': + dfa, state, node = stack[-1] + states, first = dfa + arcs = states[state] + intended_label = pgen_grammar.symbol2label['stmt'] + # Introduce a proper state transition. We're basically allowing + # there to be no valid statements inside a suite. + if [x[0] for x in arcs] == [intended_label]: + new_state = arcs[0][1] + stack[-1] = dfa, new_state, node + + def _stack_removal(self, pgen_grammar, stack, arcs, start_index, value, start_pos): + failed_stack = False + found = False + all_nodes = [] + for dfa, state, (type_, nodes) in stack[start_index:]: + if nodes: + found = True + if found: + failed_stack = True + all_nodes += nodes + if failed_stack: + stack[start_index - 1][2][1].append(tree.PythonErrorNode(all_nodes)) + + stack[start_index:] = [] + return failed_stack + + def _recovery_tokenize(self, tokens): + for typ, value, start_pos, prefix in tokens: + # print(tok_name[typ], repr(value), start_pos, repr(prefix)) + if typ == DEDENT: + # We need to count indents, because if we just omit any DEDENT, + # we might omit them in the wrong place. + o = self._omit_dedent_list + if o and o[-1] == self._indent_counter: + o.pop() + continue + + self._indent_counter -= 1 + elif typ == INDENT: + self._indent_counter += 1 + yield typ, value, start_pos, prefix diff --git a/pythonFiles/parso/python/pep8.py b/pythonFiles/parso/python/pep8.py new file mode 100644 index 000000000000..59fe452d06c4 --- /dev/null +++ b/pythonFiles/parso/python/pep8.py @@ -0,0 +1,727 @@ +import re +from contextlib import contextmanager + +from parso.python.errors import ErrorFinder, ErrorFinderConfig +from parso.normalizer import Rule +from parso.python.tree import search_ancestor, Flow, Scope + + +_IMPORT_TYPES = ('import_name', 'import_from') +_SUITE_INTRODUCERS = ('classdef', 'funcdef', 'if_stmt', 'while_stmt', + 'for_stmt', 'try_stmt', 'with_stmt') +_NON_STAR_TYPES = ('term', 'import_from', 'power') +_OPENING_BRACKETS = '(', '[', '{' +_CLOSING_BRACKETS = ')', ']', '}' +_FACTOR = '+', '-', '~' +_ALLOW_SPACE = '*', '+', '-', '**', '/', '//', '@' +_BITWISE_OPERATOR = '<<', '>>', '|', '&', '^' +_NEEDS_SPACE = ('=', '%', '->', + '<', '>', '==', '>=', '<=', '<>', '!=', + '+=', '-=', '*=', '@=', '/=', '%=', '&=', '|=', '^=', '<<=', + '>>=', '**=', '//=') +_NEEDS_SPACE += _BITWISE_OPERATOR +_IMPLICIT_INDENTATION_TYPES = ('dictorsetmaker', 'argument') +_POSSIBLE_SLICE_PARENTS = ('subscript', 'subscriptlist', 'sliceop') + + +class IndentationTypes(object): + VERTICAL_BRACKET = object() + HANGING_BRACKET = object() + BACKSLASH = object() + SUITE = object() + IMPLICIT = object() + + +class IndentationNode(object): + type = IndentationTypes.SUITE + + def __init__(self, config, indentation, parent=None): + self.bracket_indentation = self.indentation = indentation + self.parent = parent + + def __repr__(self): + return '<%s>' % self.__class__.__name__ + + def get_latest_suite_node(self): + n = self + while n is not None: + if n.type == IndentationTypes.SUITE: + return n + + n = n.parent + + +class BracketNode(IndentationNode): + def __init__(self, config, leaf, parent, in_suite_introducer=False): + self.leaf = leaf + + # Figure out here what the indentation is. For chained brackets + # we can basically use the previous indentation. + previous_leaf = leaf + n = parent + if n.type == IndentationTypes.IMPLICIT: + n = n.parent + while True: + if hasattr(n, 'leaf') and previous_leaf.line != n.leaf.line: + break + + previous_leaf = previous_leaf.get_previous_leaf() + if not isinstance(n, BracketNode) or previous_leaf != n.leaf: + break + n = n.parent + parent_indentation = n.indentation + + + next_leaf = leaf.get_next_leaf() + if '\n' in next_leaf.prefix: + # This implies code like: + # foobarbaz( + # a, + # b, + # ) + self.bracket_indentation = parent_indentation \ + + config.closing_bracket_hanging_indentation + self.indentation = parent_indentation + config.indentation + self.type = IndentationTypes.HANGING_BRACKET + else: + # Implies code like: + # foobarbaz( + # a, + # b, + # ) + expected_end_indent = leaf.end_pos[1] + if '\t' in config.indentation: + self.indentation = None + else: + self.indentation = ' ' * expected_end_indent + self.bracket_indentation = self.indentation + self.type = IndentationTypes.VERTICAL_BRACKET + + if in_suite_introducer and parent.type == IndentationTypes.SUITE \ + and self.indentation == parent_indentation + config.indentation: + self.indentation += config.indentation + # The closing bracket should have the same indentation. + self.bracket_indentation = self.indentation + self.parent = parent + + +class ImplicitNode(BracketNode): + """ + Implicit indentation after keyword arguments, default arguments, + annotations and dict values. + """ + def __init__(self, config, leaf, parent): + super(ImplicitNode, self).__init__(config, leaf, parent) + self.type = IndentationTypes.IMPLICIT + + next_leaf = leaf.get_next_leaf() + if leaf == ':' and '\n' not in next_leaf.prefix: + self.indentation += ' ' + + +class BackslashNode(IndentationNode): + type = IndentationTypes.BACKSLASH + + def __init__(self, config, parent_indentation, containing_leaf, spacing, parent=None): + expr_stmt = search_ancestor(containing_leaf, 'expr_stmt') + if expr_stmt is not None: + equals = expr_stmt.children[-2] + + if '\t' in config.indentation: + # TODO unite with the code of BracketNode + self.indentation = None + else: + # If the backslash follows the equals, use normal indentation + # otherwise it should align with the equals. + if equals.end_pos == spacing.start_pos: + self.indentation = parent_indentation + config.indentation + else: + # +1 because there is a space. + self.indentation = ' ' * (equals.end_pos[1] + 1) + else: + self.indentation = parent_indentation + config.indentation + self.bracket_indentation = self.indentation + self.parent = parent + + +def _is_magic_name(name): + return name.value.startswith('__') and name.value.endswith('__') + + +class PEP8Normalizer(ErrorFinder): + def __init__(self, *args, **kwargs): + super(PEP8Normalizer, self).__init__(*args, **kwargs) + self._previous_part = None + self._previous_leaf = None + self._on_newline = True + self._newline_count = 0 + self._wanted_newline_count = None + self._max_new_lines_in_prefix = 0 + self._new_statement = True + self._implicit_indentation_possible = False + # The top of stack of the indentation nodes. + self._indentation_tos = self._last_indentation_tos = \ + IndentationNode(self._config, indentation='') + self._in_suite_introducer = False + + if ' ' in self._config.indentation: + self._indentation_type = 'spaces' + self._wrong_indentation_char = '\t' + else: + self._indentation_type = 'tabs' + self._wrong_indentation_char = ' ' + + @contextmanager + def visit_node(self, node): + with super(PEP8Normalizer, self).visit_node(node): + with self._visit_node(node): + yield + + @contextmanager + def _visit_node(self, node): + typ = node.type + + if typ in 'import_name': + names = node.get_defined_names() + if len(names) > 1: + for name in names[:1]: + self.add_issue(name, 401, 'Multiple imports on one line') + elif typ == 'lambdef': + expr_stmt = node.parent + # Check if it's simply defining a single name, not something like + # foo.bar or x[1], where using a lambda could make more sense. + if expr_stmt.type == 'expr_stmt' and any(n.type == 'name' for n in expr_stmt.children[:-2:2]): + self.add_issue(node, 731, 'Do not assign a lambda expression, use a def') + elif typ == 'try_stmt': + for child in node.children: + # Here we can simply check if it's an except, because otherwise + # it would be an except_clause. + if child.type == 'keyword' and child.value == 'except': + self.add_issue(child, 722, 'Do not use bare except, specify exception instead') + elif typ == 'comparison': + for child in node.children: + if child.type not in ('atom_expr', 'power'): + continue + if len(child.children) > 2: + continue + trailer = child.children[1] + atom = child.children[0] + if trailer.type == 'trailer' and atom.type == 'name' \ + and atom.value == 'type': + self.add_issue(node, 721, "Do not compare types, use 'isinstance()") + break + elif typ == 'file_input': + endmarker = node.children[-1] + prev = endmarker.get_previous_leaf() + prefix = endmarker.prefix + if (not prefix.endswith('\n') and ( + prefix or prev is None or prev.value != '\n')): + self.add_issue(endmarker, 292, "No newline at end of file") + + if typ in _IMPORT_TYPES: + simple_stmt = node.parent + module = simple_stmt.parent + #if module.type == 'simple_stmt': + if module.type == 'file_input': + index = module.children.index(simple_stmt) + for child in module.children[:index]: + children = [child] + if child.type == 'simple_stmt': + # Remove the newline. + children = child.children[:-1] + + found_docstring = False + for c in children: + if c.type == 'string' and not found_docstring: + continue + found_docstring = True + + if c.type == 'expr_stmt' and \ + all(_is_magic_name(n) for n in c.get_defined_names()): + continue + + if c.type in _IMPORT_TYPES or isinstance(c, Flow): + continue + + self.add_issue(node, 402, 'Module level import not at top of file') + break + else: + continue + break + + implicit_indentation_possible = typ in _IMPLICIT_INDENTATION_TYPES + in_introducer = typ in _SUITE_INTRODUCERS + if in_introducer: + self._in_suite_introducer = True + elif typ == 'suite': + if self._indentation_tos.type == IndentationTypes.BACKSLASH: + self._indentation_tos = self._indentation_tos.parent + + self._indentation_tos = IndentationNode( + self._config, + self._indentation_tos.indentation + self._config.indentation, + parent=self._indentation_tos + ) + elif implicit_indentation_possible: + self._implicit_indentation_possible = True + yield + if typ == 'suite': + assert self._indentation_tos.type == IndentationTypes.SUITE + self._indentation_tos = self._indentation_tos.parent + # If we dedent, no lines are needed anymore. + self._wanted_newline_count = None + elif implicit_indentation_possible: + self._implicit_indentation_possible = False + if self._indentation_tos.type == IndentationTypes.IMPLICIT: + self._indentation_tos = self._indentation_tos.parent + elif in_introducer: + self._in_suite_introducer = False + if typ in ('classdef', 'funcdef'): + self._wanted_newline_count = self._get_wanted_blank_lines_count() + + def _check_tabs_spaces(self, spacing): + if self._wrong_indentation_char in spacing.value: + self.add_issue(spacing, 101, 'Indentation contains ' + self._indentation_type) + return True + return False + + def _get_wanted_blank_lines_count(self): + suite_node = self._indentation_tos.get_latest_suite_node() + return int(suite_node.parent is None) + 1 + + def _reset_newlines(self, spacing, leaf, is_comment=False): + self._max_new_lines_in_prefix = \ + max(self._max_new_lines_in_prefix, self._newline_count) + + wanted = self._wanted_newline_count + if wanted is not None: + # Need to substract one + blank_lines = self._newline_count - 1 + if wanted > blank_lines and leaf.type != 'endmarker': + # In case of a comment we don't need to add the issue, yet. + if not is_comment: + # TODO end_pos wrong. + code = 302 if wanted == 2 else 301 + message = "expected %s blank line, found %s" \ + % (wanted, blank_lines) + self.add_issue(spacing, code, message) + self._wanted_newline_count = None + else: + self._wanted_newline_count = None + + if not is_comment: + wanted = self._get_wanted_blank_lines_count() + actual = self._max_new_lines_in_prefix - 1 + + val = leaf.value + needs_lines = ( + val == '@' and leaf.parent.type == 'decorator' + or ( + val == 'class' + or val == 'async' and leaf.get_next_leaf() == 'def' + or val == 'def' and self._previous_leaf != 'async' + ) and leaf.parent.parent.type != 'decorated' + ) + if needs_lines and actual < wanted: + func_or_cls = leaf.parent + suite = func_or_cls.parent + if suite.type == 'decorated': + suite = suite.parent + + # The first leaf of a file or a suite should not need blank + # lines. + if suite.children[int(suite.type == 'suite')] != func_or_cls: + code = 302 if wanted == 2 else 301 + message = "expected %s blank line, found %s" \ + % (wanted, actual) + self.add_issue(spacing, code, message) + + self._max_new_lines_in_prefix = 0 + + self._newline_count = 0 + + def visit_leaf(self, leaf): + super(PEP8Normalizer, self).visit_leaf(leaf) + for part in leaf._split_prefix(): + if part.type == 'spacing': + # This part is used for the part call after for. + break + self._visit_part(part, part.create_spacing_part(), leaf) + + self._analyse_non_prefix(leaf) + self._visit_part(leaf, part, leaf) + + # Cleanup + self._last_indentation_tos = self._indentation_tos + + self._new_statement = leaf.type == 'newline' + + # TODO does this work? with brackets and stuff? + if leaf.type == 'newline' and \ + self._indentation_tos.type == IndentationTypes.BACKSLASH: + self._indentation_tos = self._indentation_tos.parent + + if leaf.value == ':' and leaf.parent.type in _SUITE_INTRODUCERS: + self._in_suite_introducer = False + elif leaf.value == 'elif': + self._in_suite_introducer = True + + if not self._new_statement: + self._reset_newlines(part, leaf) + self._max_blank_lines = 0 + + self._previous_leaf = leaf + + return leaf.value + + def _visit_part(self, part, spacing, leaf): + value = part.value + type_ = part.type + if type_ == 'error_leaf': + return + + if value == ',' and part.parent.type == 'dictorsetmaker': + self._indentation_tos = self._indentation_tos.parent + + node = self._indentation_tos + + if type_ == 'comment': + if value.startswith('##'): + # Whole blocks of # should not raise an error. + if value.lstrip('#'): + self.add_issue(part, 266, "Too many leading '#' for block comment.") + elif self._on_newline: + if not re.match('#:? ', value) and not value == '#' \ + and not (value.startswith('#!') and part.start_pos == (1, 0)): + self.add_issue(part, 265, "Block comment should start with '# '") + else: + if not re.match('#:? [^ ]', value): + self.add_issue(part, 262, "Inline comment should start with '# '") + + self._reset_newlines(spacing, leaf, is_comment=True) + elif type_ == 'newline': + if self._newline_count > self._get_wanted_blank_lines_count(): + self.add_issue(part, 303, "Too many blank lines (%s)" % self._newline_count) + elif leaf in ('def', 'class') \ + and leaf.parent.parent.type == 'decorated': + self.add_issue(part, 304, "Blank lines found after function decorator") + + + self._newline_count += 1 + + if type_ == 'backslash': + # TODO is this enough checking? What about ==? + if node.type != IndentationTypes.BACKSLASH: + if node.type != IndentationTypes.SUITE: + self.add_issue(part, 502, 'The backslash is redundant between brackets') + else: + indentation = node.indentation + if self._in_suite_introducer and node.type == IndentationTypes.SUITE: + indentation += self._config.indentation + + self._indentation_tos = BackslashNode( + self._config, + indentation, + part, + spacing, + parent=self._indentation_tos + ) + elif self._on_newline: + indentation = spacing.value + if node.type == IndentationTypes.BACKSLASH \ + and self._previous_part.type == 'newline': + self._indentation_tos = self._indentation_tos.parent + + if not self._check_tabs_spaces(spacing): + should_be_indentation = node.indentation + if type_ == 'comment': + # Comments can be dedented. So we have to care for that. + n = self._last_indentation_tos + while True: + if len(indentation) > len(n.indentation): + break + + should_be_indentation = n.indentation + + self._last_indentation_tos = n + if n == node: + break + n = n.parent + + if self._new_statement: + if type_ == 'newline': + if indentation: + self.add_issue(spacing, 291, 'Trailing whitespace') + elif indentation != should_be_indentation: + s = '%s %s' % (len(self._config.indentation), self._indentation_type) + self.add_issue(part, 111, 'Indentation is not a multiple of ' + s) + else: + if value in '])}': + should_be_indentation = node.bracket_indentation + else: + should_be_indentation = node.indentation + if self._in_suite_introducer and indentation == \ + node.get_latest_suite_node().indentation \ + + self._config.indentation: + self.add_issue(part, 129, "Line with same indent as next logical block") + elif indentation != should_be_indentation: + if not self._check_tabs_spaces(spacing) and part.value != '\n': + if value in '])}': + if node.type == IndentationTypes.VERTICAL_BRACKET: + self.add_issue(part, 124, "Closing bracket does not match visual indentation") + else: + self.add_issue(part, 123, "Losing bracket does not match indentation of opening bracket's line") + else: + if len(indentation) < len(should_be_indentation): + if node.type == IndentationTypes.VERTICAL_BRACKET: + self.add_issue(part, 128, 'Continuation line under-indented for visual indent') + elif node.type == IndentationTypes.BACKSLASH: + self.add_issue(part, 122, 'Continuation line missing indentation or outdented') + elif node.type == IndentationTypes.IMPLICIT: + self.add_issue(part, 135, 'xxx') + else: + self.add_issue(part, 121, 'Continuation line under-indented for hanging indent') + else: + if node.type == IndentationTypes.VERTICAL_BRACKET: + self.add_issue(part, 127, 'Continuation line over-indented for visual indent') + elif node.type == IndentationTypes.IMPLICIT: + self.add_issue(part, 136, 'xxx') + else: + self.add_issue(part, 126, 'Continuation line over-indented for hanging indent') + else: + self._check_spacing(part, spacing) + + self._check_line_length(part, spacing) + # ------------------------------- + # Finalizing. Updating the state. + # ------------------------------- + if value and value in '()[]{}' and type_ != 'error_leaf' \ + and part.parent.type != 'error_node': + if value in _OPENING_BRACKETS: + self._indentation_tos = BracketNode( + self._config, part, + parent=self._indentation_tos, + in_suite_introducer=self._in_suite_introducer + ) + else: + assert node.type != IndentationTypes.IMPLICIT + self._indentation_tos = self._indentation_tos.parent + elif value in ('=', ':') and self._implicit_indentation_possible \ + and part.parent.type in _IMPLICIT_INDENTATION_TYPES: + indentation = node.indentation + self._indentation_tos = ImplicitNode( + self._config, part, parent=self._indentation_tos + ) + + self._on_newline = type_ in ('newline', 'backslash', 'bom') + + self._previous_part = part + self._previous_spacing = spacing + + def _check_line_length(self, part, spacing): + if part.type == 'backslash': + last_column = part.start_pos[1] + 1 + else: + last_column = part.end_pos[1] + if last_column > self._config.max_characters \ + and spacing.start_pos[1] <= self._config.max_characters : + # Special case for long URLs in multi-line docstrings or comments, + # but still report the error when the 72 first chars are whitespaces. + report = True + if part.type == 'comment': + splitted = part.value[1:].split() + if len(splitted) == 1 \ + and (part.end_pos[1] - len(splitted[0])) < 72: + report = False + if report: + self.add_issue( + part, + 501, + 'Line too long (%s > %s characters)' % + (last_column, self._config.max_characters), + ) + + def _check_spacing(self, part, spacing): + def add_if_spaces(*args): + if spaces: + return self.add_issue(*args) + + def add_not_spaces(*args): + if not spaces: + return self.add_issue(*args) + + spaces = spacing.value + prev = self._previous_part + if prev is not None and prev.type == 'error_leaf' or part.type == 'error_leaf': + return + + type_ = part.type + if '\t' in spaces: + self.add_issue(spacing, 223, 'Used tab to separate tokens') + elif type_ == 'comment': + if len(spaces) < self._config.spaces_before_comment: + self.add_issue(spacing, 261, 'At least two spaces before inline comment') + elif type_ == 'newline': + add_if_spaces(spacing, 291, 'Trailing whitespace') + elif len(spaces) > 1: + self.add_issue(spacing, 221, 'Multiple spaces used') + else: + if prev in _OPENING_BRACKETS: + message = "Whitespace after '%s'" % part.value + add_if_spaces(spacing, 201, message) + elif part in _CLOSING_BRACKETS: + message = "Whitespace before '%s'" % part.value + add_if_spaces(spacing, 202, message) + elif part in (',', ';') or part == ':' \ + and part.parent.type not in _POSSIBLE_SLICE_PARENTS: + message = "Whitespace before '%s'" % part.value + add_if_spaces(spacing, 203, message) + elif prev == ':' and prev.parent.type in _POSSIBLE_SLICE_PARENTS: + pass # TODO + elif prev in (',', ';', ':'): + add_not_spaces(spacing, 231, "missing whitespace after '%s'") + elif part == ':': # Is a subscript + # TODO + pass + elif part in ('*', '**') and part.parent.type not in _NON_STAR_TYPES \ + or prev in ('*', '**') \ + and prev.parent.type not in _NON_STAR_TYPES: + # TODO + pass + elif prev in _FACTOR and prev.parent.type == 'factor': + pass + elif prev == '@' and prev.parent.type == 'decorator': + pass # TODO should probably raise an error if there's a space here + elif part in _NEEDS_SPACE or prev in _NEEDS_SPACE: + if part == '=' and part.parent.type in ('argument', 'param') \ + or prev == '=' and prev.parent.type in ('argument', 'param'): + if part == '=': + param = part.parent + else: + param = prev.parent + if param.type == 'param' and param.annotation: + add_not_spaces(spacing, 252, 'Expected spaces around annotation equals') + else: + add_if_spaces(spacing, 251, 'Unexpected spaces around keyword / parameter equals') + elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR: + add_not_spaces(spacing, 227, 'Missing whitespace around bitwise or shift operator') + elif part == '%' or prev == '%': + add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator') + else: + message_225 = 'Missing whitespace between tokens' + add_not_spaces(spacing, 225, message_225) + elif type_ == 'keyword' or prev.type == 'keyword': + add_not_spaces(spacing, 275, 'Missing whitespace around keyword') + else: + prev_spacing = self._previous_spacing + if prev in _ALLOW_SPACE and spaces != prev_spacing.value \ + and '\n' not in self._previous_leaf.prefix: + message = "Whitespace before operator doesn't match with whitespace after" + self.add_issue(spacing, 229, message) + + if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE: + message_225 = 'Missing whitespace between tokens' + #print('xy', spacing) + #self.add_issue(spacing, 225, message_225) + # TODO why only brackets? + if part in _OPENING_BRACKETS: + message = "Whitespace before '%s'" % part.value + add_if_spaces(spacing, 211, message) + + def _analyse_non_prefix(self, leaf): + typ = leaf.type + if typ == 'name' and leaf.value in ('l', 'O', 'I'): + if leaf.is_definition(): + message = "Do not define %s named 'l', 'O', or 'I' one line" + if leaf.parent.type == 'class' and leaf.parent.name == leaf: + self.add_issue(leaf, 742, message % 'classes') + elif leaf.parent.type == 'function' and leaf.parent.name == leaf: + self.add_issue(leaf, 743, message % 'function') + else: + self.add_issuadd_issue(741, message % 'variables', leaf) + elif leaf.value == ':': + if isinstance(leaf.parent, (Flow, Scope)) and leaf.parent.type != 'lambdef': + next_leaf = leaf.get_next_leaf() + if next_leaf.type != 'newline': + if leaf.parent.type == 'funcdef': + self.add_issue(next_leaf, 704, 'Multiple statements on one line (def)') + else: + self.add_issue(next_leaf, 701, 'Multiple statements on one line (colon)') + elif leaf.value == ';': + if leaf.get_next_leaf().type in ('newline', 'endmarker'): + self.add_issue(leaf, 703, 'Statement ends with a semicolon') + else: + self.add_issue(leaf, 702, 'Multiple statements on one line (semicolon)') + elif leaf.value in ('==', '!='): + comparison = leaf.parent + index = comparison.children.index(leaf) + left = comparison.children[index - 1] + right = comparison.children[index + 1] + for node in left, right: + if node.type == 'keyword' or node.type == 'name': + if node.value == 'None': + message = "comparison to None should be 'if cond is None:'" + self.add_issue(leaf, 711, message) + break + elif node.value in ('True', 'False'): + message = "comparison to False/True should be 'if cond is True:' or 'if cond:'" + self.add_issue(leaf, 712, message) + break + elif leaf.value in ('in', 'is'): + comparison = leaf.parent + if comparison.type == 'comparison' and comparison.parent.type == 'not_test': + if leaf.value == 'in': + self.add_issue(leaf, 713, "test for membership should be 'not in'") + else: + self.add_issue(leaf, 714, "test for object identity should be 'is not'") + elif typ == 'string': + # Checking multiline strings + for i, line in enumerate(leaf.value.splitlines()[1:]): + indentation = re.match('[ \t]*', line).group(0) + start_pos = leaf.line + i, len(indentation) + # TODO check multiline indentation. + elif typ == 'endmarker': + if self._newline_count >= 2: + self.add_issue(leaf, 391, 'Blank line at end of file') + + def add_issue(self, node, code, message): + if self._previous_leaf is not None: + if search_ancestor(self._previous_leaf, 'error_node') is not None: + return + if self._previous_leaf.type == 'error_leaf': + return + if search_ancestor(node, 'error_node') is not None: + return + if code in (901, 903): + # 901 and 903 are raised by the ErrorFinder. + super(PEP8Normalizer, self).add_issue(node, code, message) + else: + # Skip ErrorFinder here, because it has custom behavior. + super(ErrorFinder, self).add_issue(node, code, message) + + +class PEP8NormalizerConfig(ErrorFinderConfig): + normalizer_class = PEP8Normalizer + """ + Normalizing to PEP8. Not really implemented, yet. + """ + def __init__(self, indentation=' ' * 4, hanging_indentation=None, + max_characters=79, spaces_before_comment=2): + self.indentation = indentation + if hanging_indentation is None: + hanging_indentation = indentation + self.hanging_indentation = hanging_indentation + self.closing_bracket_hanging_indentation = '' + self.break_after_binary = False + self.max_characters = max_characters + self.spaces_before_comment = spaces_before_comment + + +# TODO this is not yet ready. +#@PEP8Normalizer.register_rule(type='endmarker') +class BlankLineAtEnd(Rule): + code = 392 + message = 'Blank line at end of file' + + def is_issue(self, leaf): + return self._newline_count >= 2 diff --git a/pythonFiles/parso/python/prefix.py b/pythonFiles/parso/python/prefix.py new file mode 100644 index 000000000000..b7f1e1bc4db9 --- /dev/null +++ b/pythonFiles/parso/python/prefix.py @@ -0,0 +1,97 @@ +import re +from codecs import BOM_UTF8 + +from parso.python.tokenize import group + +unicode_bom = BOM_UTF8.decode('utf-8') + + +class PrefixPart(object): + def __init__(self, leaf, typ, value, spacing='', start_pos=None): + assert start_pos is not None + self.parent = leaf + self.type = typ + self.value = value + self.spacing = spacing + self.start_pos = start_pos + + @property + def end_pos(self): + if self.value.endswith('\n'): + return self.start_pos[0] + 1, 0 + if self.value == unicode_bom: + # The bom doesn't have a length at the start of a Python file. + return self.start_pos + return self.start_pos[0], self.start_pos[1] + len(self.value) + + def create_spacing_part(self): + column = self.start_pos[1] - len(self.spacing) + return PrefixPart( + self.parent, 'spacing', self.spacing, + start_pos=(self.start_pos[0], column) + ) + + def __repr__(self): + return '%s(%s, %s, %s)' % ( + self.__class__.__name__, + self.type, + repr(self.value), + self.start_pos + ) + + +_comment = r'#[^\n\r\f]*' +_backslash = r'\\\r?\n' +_newline = r'\r?\n' +_form_feed = r'\f' +_only_spacing = '$' +_spacing = r'[ \t]*' +_bom = unicode_bom + +_regex = group( + _comment, _backslash, _newline, _form_feed, _only_spacing, _bom, + capture=True +) +_regex = re.compile(group(_spacing, capture=True) + _regex) + + +_types = { + '#': 'comment', + '\\': 'backslash', + '\f': 'formfeed', + '\n': 'newline', + '\r': 'newline', + unicode_bom: 'bom' +} + + +def split_prefix(leaf, start_pos): + line, column = start_pos + start = 0 + value = spacing = '' + bom = False + while start != len(leaf.prefix): + match =_regex.match(leaf.prefix, start) + spacing = match.group(1) + value = match.group(2) + if not value: + break + type_ = _types[value[0]] + yield PrefixPart( + leaf, type_, value, spacing, + start_pos=(line, column + start - int(bom) + len(spacing)) + ) + if type_ == 'bom': + bom = True + + start = match.end(0) + if value.endswith('\n'): + line += 1 + column = -start + + if value: + spacing = '' + yield PrefixPart( + leaf, 'spacing', spacing, + start_pos=(line, column + start) + ) diff --git a/pythonFiles/preview/jedi/parser/token.py b/pythonFiles/parso/python/token.py similarity index 56% rename from pythonFiles/preview/jedi/parser/token.py rename to pythonFiles/parso/python/token.py index 0cb846da11a3..fb590a5f28c6 100644 --- a/pythonFiles/preview/jedi/parser/token.py +++ b/pythonFiles/parso/python/token.py @@ -1,34 +1,36 @@ from __future__ import absolute_import - -from jedi._compatibility import is_py3, is_py35 +from itertools import count from token import * +from parso._compatibility import py_version + + +_counter = count(N_TOKENS) +# Never want to see this thing again. +del N_TOKENS -COMMENT = N_TOKENS +COMMENT = next(_counter) tok_name[COMMENT] = 'COMMENT' -N_TOKENS += 1 -NL = N_TOKENS +NL = next(_counter) tok_name[NL] = 'NL' -N_TOKENS += 1 -if is_py3: - BACKQUOTE = N_TOKENS +# Sets the attributes that don't exist in these tok_name versions. +if py_version >= 30: + BACKQUOTE = next(_counter) tok_name[BACKQUOTE] = 'BACKQUOTE' - N_TOKENS += 1 else: - RARROW = N_TOKENS + RARROW = next(_counter) tok_name[RARROW] = 'RARROW' - N_TOKENS += 1 - ELLIPSIS = N_TOKENS + ELLIPSIS = next(_counter) tok_name[ELLIPSIS] = 'ELLIPSIS' - N_TOKENS += 1 -if not is_py35: - ATEQUAL = N_TOKENS +if py_version < 35: + ATEQUAL = next(_counter) tok_name[ATEQUAL] = 'ATEQUAL' - N_TOKENS += 1 +ERROR_DEDENT = next(_counter) +tok_name[ERROR_DEDENT] = 'ERROR_DEDENT' # Map from operator to number (since tokenize doesn't do this) @@ -88,3 +90,15 @@ for line in opmap_raw.splitlines(): op, name = line.split() opmap[op] = globals()[name] + + +def generate_token_id(string): + """ + Uses a token in the grammar (e.g. `'+'` or `'and'`returns the corresponding + ID for it. The strings are part of the grammar file. + """ + try: + return opmap[string] + except KeyError: + pass + return globals()[string] diff --git a/pythonFiles/parso/python/tokenize.py b/pythonFiles/parso/python/tokenize.py new file mode 100644 index 000000000000..ecd2437f5ebb --- /dev/null +++ b/pythonFiles/parso/python/tokenize.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- +""" +This tokenizer has been copied from the ``tokenize.py`` standard library +tokenizer. The reason was simple: The standard library tokenizer fails +if the indentation is not right. To make it possible to do error recovery the + tokenizer needed to be rewritten. + +Basically this is a stripped down version of the standard library module, so +you can read the documentation there. Additionally we included some speed and +memory optimizations here. +""" +from __future__ import absolute_import + +import sys +import string +import re +from collections import namedtuple +import itertools as _itertools +from codecs import BOM_UTF8 + +from parso.python.token import (tok_name, ENDMARKER, STRING, NUMBER, opmap, + NAME, ERRORTOKEN, NEWLINE, INDENT, DEDENT, + ERROR_DEDENT) +from parso._compatibility import py_version +from parso.utils import split_lines + + +TokenCollection = namedtuple( + 'TokenCollection', + 'pseudo_token single_quoted triple_quoted endpats always_break_tokens', +) + +BOM_UTF8_STRING = BOM_UTF8.decode('utf-8') + +_token_collection_cache = {} + +if py_version >= 30: + # Python 3 has str.isidentifier() to check if a char is a valid identifier + is_identifier = str.isidentifier +else: + namechars = string.ascii_letters + '_' + is_identifier = lambda s: s in namechars + + +def group(*choices, **kwargs): + capture = kwargs.pop('capture', False) # Python 2, arrghhhhh :( + assert not kwargs + + start = '(' + if not capture: + start += '?:' + return start + '|'.join(choices) + ')' + + +def any(*choices): + return group(*choices) + '*' + + +def maybe(*choices): + return group(*choices) + '?' + + +# Return the empty string, plus all of the valid string prefixes. +def _all_string_prefixes(version_info): + def different_case_versions(prefix): + for s in _itertools.product(*[(c, c.upper()) for c in prefix]): + yield ''.join(s) + # The valid string prefixes. Only contain the lower case versions, + # and don't contain any permuations (include 'fr', but not + # 'rf'). The various permutations will be generated. + _valid_string_prefixes = ['b', 'r', 'u'] + if version_info >= (3, 0): + _valid_string_prefixes.append('br') + + if version_info >= (3, 6): + _valid_string_prefixes += ['f', 'fr'] + + # if we add binary f-strings, add: ['fb', 'fbr'] + result = set(['']) + for prefix in _valid_string_prefixes: + for t in _itertools.permutations(prefix): + # create a list with upper and lower versions of each + # character + result.update(different_case_versions(t)) + if version_info <= (2, 7): + # In Python 2 the order cannot just be random. + result.update(different_case_versions('ur')) + result.update(different_case_versions('br')) + return result + + +def _compile(expr): + return re.compile(expr, re.UNICODE) + + +def _get_token_collection(version_info): + try: + return _token_collection_cache[tuple(version_info)] + except KeyError: + _token_collection_cache[tuple(version_info)] = result = \ + _create_token_collection(version_info) + return result + + +def _create_token_collection(version_info): + # Note: we use unicode matching for names ("\w") but ascii matching for + # number literals. + Whitespace = r'[ \f\t]*' + Comment = r'#[^\r\n]*' + Name = r'\w+' + + if version_info >= (3, 6): + Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' + Binnumber = r'0[bB](?:_?[01])+' + Octnumber = r'0[oO](?:_?[0-7])+' + Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' + Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) + Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' + Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', + r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) + Expfloat = r'[0-9](?:_?[0-9])*' + Exponent + Floatnumber = group(Pointfloat, Expfloat) + Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') + else: + Hexnumber = r'0[xX][0-9a-fA-F]+' + Binnumber = r'0[bB][01]+' + if version_info >= (3, 0): + Octnumber = r'0[oO][0-7]+' + else: + Octnumber = '0[oO]?[0-7]+' + Decnumber = r'(?:0+|[1-9][0-9]*)' + Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) + Exponent = r'[eE][-+]?[0-9]+' + Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) + Expfloat = r'[0-9]+' + Exponent + Floatnumber = group(Pointfloat, Expfloat) + Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') + Number = group(Imagnumber, Floatnumber, Intnumber) + + # Note that since _all_string_prefixes includes the empty string, + # StringPrefix can be the empty string (making it optional). + possible_prefixes = _all_string_prefixes(version_info) + StringPrefix = group(*possible_prefixes) + + # Tail end of ' string. + Single = r"[^'\\]*(?:\\.[^'\\]*)*'" + # Tail end of " string. + Double = r'[^"\\]*(?:\\.[^"\\]*)*"' + # Tail end of ''' string. + Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" + # Tail end of """ string. + Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' + Triple = group(StringPrefix + "'''", StringPrefix + '"""') + + # Because of leftmost-then-longest match semantics, be sure to put the + # longest operators first (e.g., if = came before ==, == would get + # recognized as two instances of =). + Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", + r"//=?", r"->", + r"[+\-*/%&@`|^=<>]=?", + r"~") + + Bracket = '[][(){}]' + + special_args = [r'\r?\n', r'[:;.,@]'] + if version_info >= (3, 0): + special_args.insert(0, r'\.\.\.') + Special = group(*special_args) + + Funny = group(Operator, Bracket, Special) + + # First (or only) line of ' or " string. + ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) + PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) + PseudoToken = group(Whitespace, capture=True) + \ + group(PseudoExtras, Number, Funny, ContStr, Name, capture=True) + + # For a given string prefix plus quotes, endpats maps it to a regex + # to match the remainder of that string. _prefix can be empty, for + # a normal single or triple quoted string (with no prefix). + endpats = {} + for _prefix in possible_prefixes: + endpats[_prefix + "'"] = _compile(Single) + endpats[_prefix + '"'] = _compile(Double) + endpats[_prefix + "'''"] = _compile(Single3) + endpats[_prefix + '"""'] = _compile(Double3) + + # A set of all of the single and triple quoted string prefixes, + # including the opening quotes. + single_quoted = set() + triple_quoted = set() + for t in possible_prefixes: + for p in (t + '"', t + "'"): + single_quoted.add(p) + for p in (t + '"""', t + "'''"): + triple_quoted.add(p) + + ALWAYS_BREAK_TOKENS = (';', 'import', 'class', 'def', 'try', 'except', + 'finally', 'while', 'with', 'return') + pseudo_token_compiled = _compile(PseudoToken) + return TokenCollection( + pseudo_token_compiled, single_quoted, triple_quoted, endpats, + ALWAYS_BREAK_TOKENS + ) + + +class Token(namedtuple('Token', ['type', 'string', 'start_pos', 'prefix'])): + @property + def end_pos(self): + lines = split_lines(self.string) + if len(lines) > 1: + return self.start_pos[0] + len(lines) - 1, 0 + else: + return self.start_pos[0], self.start_pos[1] + len(self.string) + + +class PythonToken(Token): + def _get_type_name(self, exact=True): + return tok_name[self.type] + + def __repr__(self): + return ('TokenInfo(type=%s, string=%r, start=%r, prefix=%r)' % + self._replace(type=self._get_type_name())) + + +def tokenize(code, version_info, start_pos=(1, 0)): + """Generate tokens from a the source code (string).""" + lines = split_lines(code, keepends=True) + return tokenize_lines(lines, version_info, start_pos=start_pos) + + +def tokenize_lines(lines, version_info, start_pos=(1, 0)): + """ + A heavily modified Python standard library tokenizer. + + Additionally to the default information, yields also the prefix of each + token. This idea comes from lib2to3. The prefix contains all information + that is irrelevant for the parser like newlines in parentheses or comments. + """ + pseudo_token, single_quoted, triple_quoted, endpats, always_break_tokens, = \ + _get_token_collection(version_info) + paren_level = 0 # count parentheses + indents = [0] + max = 0 + numchars = '0123456789' + contstr = '' + contline = None + # We start with a newline. This makes indent at the first position + # possible. It's not valid Python, but still better than an INDENT in the + # second line (and not in the first). This makes quite a few things in + # Jedi's fast parser possible. + new_line = True + prefix = '' # Should never be required, but here for safety + additional_prefix = '' + first = True + lnum = start_pos[0] - 1 + for line in lines: # loop over lines in stream + lnum += 1 + pos = 0 + max = len(line) + if first: + if line.startswith(BOM_UTF8_STRING): + additional_prefix = BOM_UTF8_STRING + line = line[1:] + max = len(line) + + # Fake that the part before was already parsed. + line = '^' * start_pos[1] + line + pos = start_pos[1] + max += start_pos[1] + + first = False + + if contstr: # continued string + endmatch = endprog.match(line) + if endmatch: + pos = endmatch.end(0) + yield PythonToken(STRING, contstr + line[:pos], contstr_start, prefix) + contstr = '' + contline = None + else: + contstr = contstr + line + contline = contline + line + continue + + while pos < max: + pseudomatch = pseudo_token.match(line, pos) + if not pseudomatch: # scan for tokens + txt = line[pos:] + if txt.endswith('\n'): + new_line = True + yield PythonToken(ERRORTOKEN, txt, (lnum, pos), additional_prefix) + additional_prefix = '' + break + + prefix = additional_prefix + pseudomatch.group(1) + additional_prefix = '' + start, pos = pseudomatch.span(2) + spos = (lnum, start) + token = pseudomatch.group(2) + if token == '': + assert prefix + additional_prefix = prefix + # This means that we have a line with whitespace/comments at + # the end, which just results in an endmarker. + break + initial = token[0] + + if new_line and initial not in '\r\n#': + new_line = False + if paren_level == 0: + i = 0 + while line[i] == '\f': + i += 1 + start -= 1 + if start > indents[-1]: + yield PythonToken(INDENT, '', spos, '') + indents.append(start) + while start < indents[-1]: + if start > indents[-2]: + yield PythonToken(ERROR_DEDENT, '', (lnum, 0), '') + break + yield PythonToken(DEDENT, '', spos, '') + indents.pop() + + if (initial in numchars or # ordinary number + (initial == '.' and token != '.' and token != '...')): + yield PythonToken(NUMBER, token, spos, prefix) + elif initial in '\r\n': + if not new_line and paren_level == 0: + yield PythonToken(NEWLINE, token, spos, prefix) + else: + additional_prefix = prefix + token + new_line = True + elif initial == '#': # Comments + assert not token.endswith("\n") + additional_prefix = prefix + token + elif token in triple_quoted: + endprog = endpats[token] + endmatch = endprog.match(line, pos) + if endmatch: # all on one line + pos = endmatch.end(0) + token = line[start:pos] + yield PythonToken(STRING, token, spos, prefix) + else: + contstr_start = (lnum, start) # multiple lines + contstr = line[start:] + contline = line + break + elif initial in single_quoted or \ + token[:2] in single_quoted or \ + token[:3] in single_quoted: + if token[-1] == '\n': # continued string + contstr_start = lnum, start + endprog = (endpats.get(initial) or endpats.get(token[1]) + or endpats.get(token[2])) + contstr = line[start:] + contline = line + break + else: # ordinary string + yield PythonToken(STRING, token, spos, prefix) + elif is_identifier(initial): # ordinary name + if token in always_break_tokens: + paren_level = 0 + while True: + indent = indents.pop() + if indent > start: + yield PythonToken(DEDENT, '', spos, '') + else: + indents.append(indent) + break + yield PythonToken(NAME, token, spos, prefix) + elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n'): # continued stmt + additional_prefix += prefix + line[start:] + break + else: + if token in '([{': + paren_level += 1 + elif token in ')]}': + paren_level -= 1 + + try: + # This check is needed in any case to check if it's a valid + # operator or just some random unicode character. + typ = opmap[token] + except KeyError: + typ = ERRORTOKEN + yield PythonToken(typ, token, spos, prefix) + + if contstr: + yield PythonToken(ERRORTOKEN, contstr, contstr_start, prefix) + if contstr.endswith('\n'): + new_line = True + + end_pos = lnum, max + # As the last position we just take the maximally possible position. We + # remove -1 for the last new line. + for indent in indents[1:]: + yield PythonToken(DEDENT, '', end_pos, '') + yield PythonToken(ENDMARKER, '', end_pos, additional_prefix) + + +if __name__ == "__main__": + if len(sys.argv) >= 2: + path = sys.argv[1] + with open(path) as f: + code = f.read() + else: + code = sys.stdin.read() + + from parso.utils import python_bytes_to_unicode, parse_version_string + + if isinstance(code, bytes): + code = python_bytes_to_unicode(code) + + for token in tokenize(code, parse_version_string()): + print(token) diff --git a/pythonFiles/parso/python/tree.py b/pythonFiles/parso/python/tree.py new file mode 100644 index 000000000000..eb977800a607 --- /dev/null +++ b/pythonFiles/parso/python/tree.py @@ -0,0 +1,1165 @@ +""" +This is the syntax tree for Python syntaxes (2 & 3). The classes represent +syntax elements like functions and imports. + +All of the nodes can be traced back to the `Python grammar file +`_. If you want to know how +a tree is structured, just analyse that file (for each Python version it's a +bit different). + +There's a lot of logic here that makes it easier for Jedi (and other libraries) +to deal with a Python syntax tree. + +By using :py:meth:`parso.tree.NodeOrLeaf.get_code` on a module, you can get +back the 1-to-1 representation of the input given to the parser. This is +important if you want to refactor a parser tree. + +>>> from parso import parse +>>> parser = parse('import os') +>>> module = parser.get_root_node() +>>> module + + +Any subclasses of :class:`Scope`, including :class:`Module` has an attribute +:attr:`iter_imports `: + +>>> list(module.iter_imports()) +[] + +Changes to the Python Grammar +----------------------------- + +A few things have changed when looking at Python grammar files: + +- :class:`Param` does not exist in Python grammar files. It is essentially a + part of a ``parameters`` node. |parso| splits it up to make it easier to + analyse parameters. However this just makes it easier to deal with the syntax + tree, it doesn't actually change the valid syntax. +- A few nodes like `lambdef` and `lambdef_nocond` have been merged in the + syntax tree to make it easier to do deal with them. + +Parser Tree Classes +------------------- +""" + +import re + +from parso._compatibility import utf8_repr, unicode +from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, \ + search_ancestor +from parso.python.prefix import split_prefix + +_FLOW_CONTAINERS = set(['if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', + 'with_stmt', 'async_stmt', 'suite']) +_RETURN_STMT_CONTAINERS = set(['suite', 'simple_stmt']) | _FLOW_CONTAINERS +_FUNC_CONTAINERS = set(['suite', 'simple_stmt', 'decorated']) | _FLOW_CONTAINERS +_GET_DEFINITION_TYPES = set([ + 'expr_stmt', 'comp_for', 'with_stmt', 'for_stmt', 'import_name', + 'import_from', 'param' +]) +_IMPORTS = set(['import_name', 'import_from']) + + + +class DocstringMixin(object): + __slots__ = () + + def get_doc_node(self): + """ + Returns the string leaf of a docstring. e.g. ``r'''foo'''``. + """ + if self.type == 'file_input': + node = self.children[0] + elif self.type in ('funcdef', 'classdef'): + node = self.children[self.children.index(':') + 1] + if node.type == 'suite': # Normally a suite + node = node.children[1] # -> NEWLINE stmt + else: # ExprStmt + simple_stmt = self.parent + c = simple_stmt.parent.children + index = c.index(simple_stmt) + if not index: + return None + node = c[index - 1] + + if node.type == 'simple_stmt': + node = node.children[0] + if node.type == 'string': + return node + return None + + +class PythonMixin(object): + """ + Some Python specific utitilies. + """ + __slots__ = () + + def get_name_of_position(self, position): + """ + Given a (line, column) tuple, returns a :py:class:`Name` or ``None`` if + there is no name at that position. + """ + for c in self.children: + if isinstance(c, Leaf): + if c.type == 'name' and c.start_pos <= position <= c.end_pos: + return c + else: + result = c.get_name_of_position(position) + if result is not None: + return result + return None + + +class PythonLeaf(PythonMixin, Leaf): + __slots__ = () + + def _split_prefix(self): + return split_prefix(self, self.get_start_pos_of_prefix()) + + def get_start_pos_of_prefix(self): + """ + Basically calls :py:meth:`parso.tree.NodeOrLeaf.get_start_pos_of_prefix`. + """ + # TODO it is really ugly that we have to override it. Maybe change + # indent error leafs somehow? No idea how, though. + previous_leaf = self.get_previous_leaf() + if previous_leaf is not None and previous_leaf.type == 'error_leaf' \ + and previous_leaf.original_type in ('indent', 'error_dedent'): + previous_leaf = previous_leaf.get_previous_leaf() + + if previous_leaf is None: + return self.line - self.prefix.count('\n'), 0 # It's the first leaf. + return previous_leaf.end_pos + + + +class _LeafWithoutNewlines(PythonLeaf): + """ + Simply here to optimize performance. + """ + __slots__ = () + + @property + def end_pos(self): + return self.line, self.column + len(self.value) + + +# Python base classes +class PythonBaseNode(PythonMixin, BaseNode): + __slots__ = () + + +class PythonNode(PythonMixin, Node): + __slots__ = () + + +class PythonErrorNode(PythonMixin, ErrorNode): + __slots__ = () + + +class PythonErrorLeaf(ErrorLeaf, PythonLeaf): + __slots__ = () + + +class EndMarker(_LeafWithoutNewlines): + __slots__ = () + type = 'endmarker' + + +class Newline(PythonLeaf): + """Contains NEWLINE and ENDMARKER tokens.""" + __slots__ = () + type = 'newline' + + @utf8_repr + def __repr__(self): + return "<%s: %s>" % (type(self).__name__, repr(self.value)) + + +class Name(_LeafWithoutNewlines): + """ + A string. Sometimes it is important to know if the string belongs to a name + or not. + """ + type = 'name' + __slots__ = () + + def __repr__(self): + return "<%s: %s@%s,%s>" % (type(self).__name__, self.value, + self.line, self.column) + + def is_definition(self): + """ + Returns True if the name is being defined. + """ + return self.get_definition() is not None + + def get_definition(self, import_name_always=False): + """ + Returns None if there's on definition for a name. + + :param import_name_alway: Specifies if an import name is always a + definition. Normally foo in `from foo import bar` is not a + definition. + """ + node = self.parent + type_ = node.type + if type_ in ('power', 'atom_expr'): + # In `self.x = 3` self is not a definition, but x is. + return None + + if type_ in ('funcdef', 'classdef'): + if self == node.name: + return node + return None + + if type_ == 'except_clause': + # TODO in Python 2 this doesn't work correctly. See grammar file. + # I think we'll just let it be. Python 2 will be gone in a few + # years. + if self.get_previous_sibling() == 'as': + return node.parent # The try_stmt. + return None + + while node is not None: + if node.type == 'suite': + return None + if node.type in _GET_DEFINITION_TYPES: + if self in node.get_defined_names(): + return node + if import_name_always and node.type in _IMPORTS: + return node + return None + node = node.parent + return None + + + +class Literal(PythonLeaf): + __slots__ = () + + +class Number(Literal): + type = 'number' + __slots__ = () + + +class String(Literal): + type = 'string' + __slots__ = () + + @property + def string_prefix(self): + return re.match('\w*(?=[\'"])', self.value).group(0) + + def _get_payload(self): + match = re.search( + r'''('{3}|"{3}|'|")(.*)$''', + self.value, + flags=re.DOTALL + ) + return match.group(2)[:-len(match.group(1))] + + +class _StringComparisonMixin(object): + def __eq__(self, other): + """ + Make comparisons with strings easy. + Improves the readability of the parser. + """ + if isinstance(other, (str, unicode)): + return self.value == other + + return self is other + + def __ne__(self, other): + """Python 2 compatibility.""" + return not self.__eq__(other) + + def __hash__(self): + return hash(self.value) + + +class Operator(_LeafWithoutNewlines, _StringComparisonMixin): + type = 'operator' + __slots__ = () + + +class Keyword(_LeafWithoutNewlines, _StringComparisonMixin): + type = 'keyword' + __slots__ = () + + +class Scope(PythonBaseNode, DocstringMixin): + """ + Super class for the parser tree, which represents the state of a python + text file. + A Scope is either a function, class or lambda. + """ + __slots__ = () + + def __init__(self, children): + super(Scope, self).__init__(children) + + def iter_funcdefs(self): + """ + Returns a generator of `funcdef` nodes. + """ + return self._search_in_scope('funcdef') + + def iter_classdefs(self): + """ + Returns a generator of `classdef` nodes. + """ + return self._search_in_scope('classdef') + + def iter_imports(self): + """ + Returns a generator of `import_name` and `import_from` nodes. + """ + return self._search_in_scope('import_name', 'import_from') + + def _search_in_scope(self, *names): + def scan(children): + for element in children: + if element.type in names: + yield element + if element.type in _FUNC_CONTAINERS: + for e in scan(element.children): + yield e + + return scan(self.children) + + def get_suite(self): + """ + Returns the part that is executed by the function. + """ + return self.children[-1] + + def __repr__(self): + try: + name = self.name.value + except AttributeError: + name = '' + + return "<%s: %s@%s-%s>" % (type(self).__name__, name, + self.start_pos[0], self.end_pos[0]) + + +class Module(Scope): + """ + The top scope, which is always a module. + Depending on the underlying parser this may be a full module or just a part + of a module. + """ + __slots__ = ('_used_names',) + type = 'file_input' + + def __init__(self, children): + super(Module, self).__init__(children) + self._used_names = None + + def _iter_future_import_names(self): + """ + :return: A list of future import names. + :rtype: list of str + """ + # In Python it's not allowed to use future imports after the first + # actual (non-future) statement. However this is not a linter here, + # just return all future imports. If people want to scan for issues + # they should use the API. + for imp in self.iter_imports(): + if imp.type == 'import_from' and imp.level == 0: + for path in imp.get_paths(): + names = [name.value for name in path] + if len(names) == 2 and names[0] == '__future__': + yield names[1] + + def _has_explicit_absolute_import(self): + """ + Checks if imports in this module are explicitly absolute, i.e. there + is a ``__future__`` import. + Currently not public, might be in the future. + :return bool: + """ + for name in self._iter_future_import_names(): + if name == 'absolute_import': + return True + return False + + def get_used_names(self): + """ + Returns all the :class:`Name` leafs that exist in this module. This + includes both definitions and references of names. + """ + if self._used_names is None: + # Don't directly use self._used_names to eliminate a lookup. + dct = {} + + def recurse(node): + try: + children = node.children + except AttributeError: + if node.type == 'name': + arr = dct.setdefault(node.value, []) + arr.append(node) + else: + for child in children: + recurse(child) + + recurse(self) + self._used_names = dct + return self._used_names + + +class Decorator(PythonBaseNode): + type = 'decorator' + __slots__ = () + + +class ClassOrFunc(Scope): + __slots__ = () + + @property + def name(self): + """ + Returns the `Name` leaf that defines the function or class name. + """ + return self.children[1] + + def get_decorators(self): + """ + :rtype: list of :class:`Decorator` + """ + decorated = self.parent + if decorated.type == 'decorated': + if decorated.children[0].type == 'decorators': + return decorated.children[0].children + else: + return decorated.children[:1] + else: + return [] + + +class Class(ClassOrFunc): + """ + Used to store the parsed contents of a python class. + """ + type = 'classdef' + __slots__ = () + + def __init__(self, children): + super(Class, self).__init__(children) + + def get_super_arglist(self): + """ + Returns the `arglist` node that defines the super classes. It returns + None if there are no arguments. + """ + if self.children[2] != '(': # Has no parentheses + return None + else: + if self.children[3] == ')': # Empty parentheses + return None + else: + return self.children[3] + + +def _create_params(parent, argslist_list): + """ + `argslist_list` is a list that can contain an argslist as a first item, but + most not. It's basically the items between the parameter brackets (which is + at most one item). + This function modifies the parser structure. It generates `Param` objects + from the normal ast. Those param objects do not exist in a normal ast, but + make the evaluation of the ast tree so much easier. + You could also say that this function replaces the argslist node with a + list of Param objects. + """ + def check_python2_nested_param(node): + """ + Python 2 allows params to look like ``def x(a, (b, c))``, which is + basically a way of unpacking tuples in params. Python 3 has ditched + this behavior. Jedi currently just ignores those constructs. + """ + return node.type == 'fpdef' and node.children[0] == '(' + + try: + first = argslist_list[0] + except IndexError: + return [] + + if first.type in ('name', 'fpdef'): + if check_python2_nested_param(first): + return [first] + else: + return [Param([first], parent)] + elif first == '*': + return [first] + else: # argslist is a `typedargslist` or a `varargslist`. + if first.type == 'tfpdef': + children = [first] + else: + children = first.children + new_children = [] + start = 0 + # Start with offset 1, because the end is higher. + for end, child in enumerate(children + [None], 1): + if child is None or child == ',': + param_children = children[start:end] + if param_children: # Could as well be comma and then end. + if param_children[0] == '*' and param_children[1] == ',' \ + or check_python2_nested_param(param_children[0]): + for p in param_children: + p.parent = parent + new_children += param_children + else: + new_children.append(Param(param_children, parent)) + start = end + return new_children + + +class Function(ClassOrFunc): + """ + Used to store the parsed contents of a python function. + + Children:: + + 0. + 1. + 2. parameter list (including open-paren and close-paren s) + 3. or 5. + 4. or 6. Node() representing function body + 3. -> (if annotation is also present) + 4. annotation (if present) + """ + type = 'funcdef' + + def __init__(self, children): + super(Function, self).__init__(children) + parameters = self.children[2] # After `def foo` + parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1]) + + def _get_param_nodes(self): + return self.children[2].children + + def get_params(self): + """ + Returns a list of `Param()`. + """ + return [p for p in self._get_param_nodes() if p.type == 'param'] + + @property + def name(self): + return self.children[1] # First token after `def` + + def iter_yield_exprs(self): + """ + Returns a generator of `yield_expr`. + """ + def scan(children): + for element in children: + if element.type in ('classdef', 'funcdef', 'lambdef'): + continue + + try: + nested_children = element.children + except AttributeError: + if element.value == 'yield': + if element.parent.type == 'yield_expr': + yield element.parent + else: + yield element + else: + for result in scan(nested_children): + yield result + + return scan(self.children) + + def iter_return_stmts(self): + """ + Returns a generator of `return_stmt`. + """ + def scan(children): + for element in children: + if element.type == 'return_stmt' \ + or element.type == 'keyword' and element.value == 'return': + yield element + if element.type in _RETURN_STMT_CONTAINERS: + for e in scan(element.children): + yield e + + return scan(self.children) + + def iter_raise_stmts(self): + """ + Returns a generator of `raise_stmt`. Includes raise statements inside try-except blocks + """ + def scan(children): + for element in children: + if element.type == 'raise_stmt' \ + or element.type == 'keyword' and element.value == 'raise': + yield element + if element.type in _RETURN_STMT_CONTAINERS: + for e in scan(element.children): + yield e + + return scan(self.children) + + def is_generator(self): + """ + :return bool: Checks if a function is a generator or not. + """ + return next(self.iter_yield_exprs(), None) is not None + + @property + def annotation(self): + """ + Returns the test node after `->` or `None` if there is no annotation. + """ + try: + if self.children[3] == "->": + return self.children[4] + assert self.children[3] == ":" + return None + except IndexError: + return None + +class Lambda(Function): + """ + Lambdas are basically trimmed functions, so give it the same interface. + + Children:: + + 0. + *. for each argument x + -2. + -1. Node() representing body + """ + type = 'lambdef' + __slots__ = () + + def __init__(self, children): + # We don't want to call the Function constructor, call its parent. + super(Function, self).__init__(children) + # Everything between `lambda` and the `:` operator is a parameter. + self.children[1:-2] = _create_params(self, self.children[1:-2]) + + @property + def name(self): + """ + Raises an AttributeError. Lambdas don't have a defined name. + """ + raise AttributeError("lambda is not named.") + + def _get_param_nodes(self): + return self.children[1:-2] + + @property + def annotation(self): + """ + Returns `None`, lambdas don't have annotations. + """ + return None + + def __repr__(self): + return "<%s@%s>" % (self.__class__.__name__, self.start_pos) + + +class Flow(PythonBaseNode): + __slots__ = () + + +class IfStmt(Flow): + type = 'if_stmt' + __slots__ = () + + def get_test_nodes(self): + """ + E.g. returns all the `test` nodes that are named as x, below: + + if x: + pass + elif x: + pass + """ + for i, c in enumerate(self.children): + if c in ('elif', 'if'): + yield self.children[i + 1] + + def get_corresponding_test_node(self, node): + """ + Searches for the branch in which the node is and returns the + corresponding test node (see function above). However if the node is in + the test node itself and not in the suite return None. + """ + start_pos = node.start_pos + for check_node in reversed(list(self.get_test_nodes())): + if check_node.start_pos < start_pos: + if start_pos < check_node.end_pos: + return None + # In this case the node is within the check_node itself, + # not in the suite + else: + return check_node + + def is_node_after_else(self, node): + """ + Checks if a node is defined after `else`. + """ + for c in self.children: + if c == 'else': + if node.start_pos > c.start_pos: + return True + else: + return False + + +class WhileStmt(Flow): + type = 'while_stmt' + __slots__ = () + + +class ForStmt(Flow): + type = 'for_stmt' + __slots__ = () + + def get_testlist(self): + """ + Returns the input node ``y`` from: ``for x in y:``. + """ + return self.children[3] + + def get_defined_names(self): + return _defined_names(self.children[1]) + + +class TryStmt(Flow): + type = 'try_stmt' + __slots__ = () + + def get_except_clause_tests(self): + """ + Returns the ``test`` nodes found in ``except_clause`` nodes. + Returns ``[None]`` for except clauses without an exception given. + """ + for node in self.children: + if node.type == 'except_clause': + yield node.children[1] + elif node == 'except': + yield None + + +class WithStmt(Flow): + type = 'with_stmt' + __slots__ = () + + def get_defined_names(self): + """ + Returns the a list of `Name` that the with statement defines. The + defined names are set after `as`. + """ + names = [] + for with_item in self.children[1:-2:2]: + # Check with items for 'as' names. + if with_item.type == 'with_item': + names += _defined_names(with_item.children[2]) + return names + + def get_test_node_from_name(self, name): + node = name.parent + if node.type != 'with_item': + raise ValueError('The name is not actually part of a with statement.') + return node.children[0] + + +class Import(PythonBaseNode): + __slots__ = () + + def get_path_for_name(self, name): + """ + The path is the list of names that leads to the searched name. + + :return list of Name: + """ + try: + # The name may be an alias. If it is, just map it back to the name. + name = self._aliases()[name] + except KeyError: + pass + + for path in self.get_paths(): + if name in path: + return path[:path.index(name) + 1] + raise ValueError('Name should be defined in the import itself') + + def is_nested(self): + return False # By default, sub classes may overwrite this behavior + + def is_star_import(self): + return self.children[-1] == '*' + + +class ImportFrom(Import): + type = 'import_from' + __slots__ = () + + def get_defined_names(self): + """ + Returns the a list of `Name` that the import defines. The + defined names are set after `import` or in case an alias - `as` - is + present that name is returned. + """ + return [alias or name for name, alias in self._as_name_tuples()] + + def _aliases(self): + """Mapping from alias to its corresponding name.""" + return dict((alias, name) for name, alias in self._as_name_tuples() + if alias is not None) + + def get_from_names(self): + for n in self.children[1:]: + if n not in ('.', '...'): + break + if n.type == 'dotted_name': # from x.y import + return n.children[::2] + elif n == 'import': # from . import + return [] + else: # from x import + return [n] + + @property + def level(self): + """The level parameter of ``__import__``.""" + level = 0 + for n in self.children[1:]: + if n in ('.', '...'): + level += len(n.value) + else: + break + return level + + def _as_name_tuples(self): + last = self.children[-1] + if last == ')': + last = self.children[-2] + elif last == '*': + return # No names defined directly. + + if last.type == 'import_as_names': + as_names = last.children[::2] + else: + as_names = [last] + for as_name in as_names: + if as_name.type == 'name': + yield as_name, None + else: + yield as_name.children[::2] # yields x, y -> ``x as y`` + + def get_paths(self): + """ + The import paths defined in an import statement. Typically an array + like this: ``[, ]``. + + :return list of list of Name: + """ + dotted = self.get_from_names() + + if self.children[-1] == '*': + return [dotted] + return [dotted + [name] for name, alias in self._as_name_tuples()] + + +class ImportName(Import): + """For ``import_name`` nodes. Covers normal imports without ``from``.""" + type = 'import_name' + __slots__ = () + + def get_defined_names(self): + """ + Returns the a list of `Name` that the import defines. The defined names + is always the first name after `import` or in case an alias - `as` - is + present that name is returned. + """ + return [alias or path[0] for path, alias in self._dotted_as_names()] + + @property + def level(self): + """The level parameter of ``__import__``.""" + return 0 # Obviously 0 for imports without from. + + def get_paths(self): + return [path for path, alias in self._dotted_as_names()] + + def _dotted_as_names(self): + """Generator of (list(path), alias) where alias may be None.""" + dotted_as_names = self.children[1] + if dotted_as_names.type == 'dotted_as_names': + as_names = dotted_as_names.children[::2] + else: + as_names = [dotted_as_names] + + for as_name in as_names: + if as_name.type == 'dotted_as_name': + alias = as_name.children[2] + as_name = as_name.children[0] + else: + alias = None + if as_name.type == 'name': + yield [as_name], alias + else: + # dotted_names + yield as_name.children[::2], alias + + def is_nested(self): + """ + This checks for the special case of nested imports, without aliases and + from statement:: + + import foo.bar + """ + return bool([1 for path, alias in self._dotted_as_names() + if alias is None and len(path) > 1]) + + def _aliases(self): + """ + :return list of Name: Returns all the alias + """ + return dict((alias, path[-1]) for path, alias in self._dotted_as_names() + if alias is not None) + + +class KeywordStatement(PythonBaseNode): + """ + For the following statements: `assert`, `del`, `global`, `nonlocal`, + `raise`, `return`, `yield`, `return`, `yield`. + + `pass`, `continue` and `break` are not in there, because they are just + simple keywords and the parser reduces it to a keyword. + """ + __slots__ = () + + @property + def type(self): + """ + Keyword statements start with the keyword and end with `_stmt`. You can + crosscheck this with the Python grammar. + """ + return '%s_stmt' % self.keyword + + @property + def keyword(self): + return self.children[0].value + + +class AssertStmt(KeywordStatement): + __slots__ = () + + @property + def assertion(self): + return self.children[1] + + +class GlobalStmt(KeywordStatement): + __slots__ = () + + def get_global_names(self): + return self.children[1::2] + + +class ReturnStmt(KeywordStatement): + __slots__ = () + + +class YieldExpr(PythonBaseNode): + type = 'yield_expr' + __slots__ = () + + +def _defined_names(current): + """ + A helper function to find the defined names in statements, for loops and + list comprehensions. + """ + names = [] + if current.type in ('testlist_star_expr', 'testlist_comp', 'exprlist', 'testlist'): + for child in current.children[::2]: + names += _defined_names(child) + elif current.type in ('atom', 'star_expr'): + names += _defined_names(current.children[1]) + elif current.type in ('power', 'atom_expr'): + if current.children[-2] != '**': # Just if there's no operation + trailer = current.children[-1] + if trailer.children[0] == '.': + names.append(trailer.children[1]) + else: + names.append(current) + return names + + +class ExprStmt(PythonBaseNode, DocstringMixin): + type = 'expr_stmt' + __slots__ = () + + def get_defined_names(self): + """ + Returns a list of `Name` defined before the `=` sign. + """ + names = [] + if self.children[1].type == 'annassign': + names = _defined_names(self.children[0]) + return [ + name + for i in range(0, len(self.children) - 2, 2) + if '=' in self.children[i + 1].value + for name in _defined_names(self.children[i]) + ] + names + + def get_rhs(self): + """Returns the right-hand-side of the equals.""" + return self.children[-1] + + def yield_operators(self): + """ + Returns a generator of `+=`, `=`, etc. or None if there is no operation. + """ + first = self.children[1] + if first.type == 'annassign': + if len(first.children) <= 2: + return # No operator is available, it's just PEP 484. + + first = first.children[2] + yield first + + for operator in self.children[3::2]: + yield operator + + +class Param(PythonBaseNode): + """ + It's a helper class that makes business logic with params much easier. The + Python grammar defines no ``param`` node. It defines it in a different way + that is not really suited to working with parameters. + """ + type = 'param' + + def __init__(self, children, parent): + super(Param, self).__init__(children) + self.parent = parent + for child in children: + child.parent = self + + @property + def star_count(self): + """ + Is `0` in case of `foo`, `1` in case of `*foo` or `2` in case of + `**foo`. + """ + first = self.children[0] + if first in ('*', '**'): + return len(first.value) + return 0 + + @property + def default(self): + """ + The default is the test node that appears after the `=`. Is `None` in + case no default is present. + """ + has_comma = self.children[-1] == ',' + try: + if self.children[-2 - int(has_comma)] == '=': + return self.children[-1 - int(has_comma)] + except IndexError: + return None + + @property + def annotation(self): + """ + The default is the test node that appears after `:`. Is `None` in case + no annotation is present. + """ + tfpdef = self._tfpdef() + if tfpdef.type == 'tfpdef': + assert tfpdef.children[1] == ":" + assert len(tfpdef.children) == 3 + annotation = tfpdef.children[2] + return annotation + else: + return None + + def _tfpdef(self): + """ + tfpdef: see e.g. grammar36.txt. + """ + offset = int(self.children[0] in ('*', '**')) + return self.children[offset] + + @property + def name(self): + """ + The `Name` leaf of the param. + """ + if self._tfpdef().type == 'tfpdef': + return self._tfpdef().children[0] + else: + return self._tfpdef() + + def get_defined_names(self): + return [self.name] + + @property + def position_index(self): + """ + Property for the positional index of a paramter. + """ + index = self.parent.children.index(self) + try: + keyword_only_index = self.parent.children.index('*') + if index > keyword_only_index: + # Skip the ` *, ` + index -= 2 + except ValueError: + pass + return index - 1 + + def get_parent_function(self): + """ + Returns the function/lambda of a parameter. + """ + return search_ancestor(self, 'funcdef', 'lambdef') + + def get_code(self, include_prefix=True, include_comma=True): + """ + Like all the other get_code functions, but includes the param + `include_comma`. + + :param include_comma bool: If enabled includes the comma in the string output. + """ + if include_comma: + return super(Param, self).get_code(include_prefix) + + children = self.children + if children[-1] == ',': + children = children[:-1] + return self._get_code_for_children( + children, + include_prefix=include_prefix + ) + + def __repr__(self): + default = '' if self.default is None else '=%s' % self.default.get_code() + return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default) + + +class CompFor(PythonBaseNode): + type = 'comp_for' + __slots__ = () + + def get_defined_names(self): + """ + Returns the a list of `Name` that the comprehension defines. + """ + # allow async for + return _defined_names(self.children[self.children.index('for') + 1]) diff --git a/pythonFiles/parso/tree.py b/pythonFiles/parso/tree.py new file mode 100644 index 000000000000..72a14945b0f6 --- /dev/null +++ b/pythonFiles/parso/tree.py @@ -0,0 +1,364 @@ +from abc import abstractmethod, abstractproperty +from parso._compatibility import utf8_repr, encoding, py_version + + +def search_ancestor(node, *node_types): + """ + Recursively looks at the parents of a node and returns the first found node + that matches node_types. Returns ``None`` if no matching node is found. + + :param node: The ancestors of this node will be checked. + :param node_types: type names that are searched for. + :type node_types: tuple of str + """ + while True: + node = node.parent + if node is None or node.type in node_types: + return node + + +class NodeOrLeaf(object): + """ + The base class for nodes and leaves. + """ + __slots__ = () + type = None + ''' + The type is a string that typically matches the types of the grammar file. + ''' + + def get_root_node(self): + """ + Returns the root node of a parser tree. The returned node doesn't have + a parent node like all the other nodes/leaves. + """ + scope = self + while scope.parent is not None: + scope = scope.parent + return scope + + def get_next_sibling(self): + """ + Returns the node immediately following this node in this parent's + children list. If this node does not have a next sibling, it is None + """ + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + try: + return self.parent.children[i + 1] + except IndexError: + return None + + def get_previous_sibling(self): + """ + Returns the node immediately preceding this node in this parent's + children list. If this node does not have a previous sibling, it is + None. + None. + """ + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i - 1] + + def get_previous_leaf(self): + """ + Returns the previous leaf in the parser tree. + Returns `None` if this is the first element in the parser tree. + """ + node = self + while True: + c = node.parent.children + i = c.index(node) + if i == 0: + node = node.parent + if node.parent is None: + return None + else: + node = c[i - 1] + break + + while True: + try: + node = node.children[-1] + except AttributeError: # A Leaf doesn't have children. + return node + + def get_next_leaf(self): + """ + Returns the next leaf in the parser tree. + Returns None if this is the last element in the parser tree. + """ + node = self + while True: + c = node.parent.children + i = c.index(node) + if i == len(c) - 1: + node = node.parent + if node.parent is None: + return None + else: + node = c[i + 1] + break + + while True: + try: + node = node.children[0] + except AttributeError: # A Leaf doesn't have children. + return node + + @abstractproperty + def start_pos(self): + """ + Returns the starting position of the prefix as a tuple, e.g. `(3, 4)`. + + :return tuple of int: (line, column) + """ + + @abstractproperty + def end_pos(self): + """ + Returns the end position of the prefix as a tuple, e.g. `(3, 4)`. + + :return tuple of int: (line, column) + """ + + @abstractmethod + def get_start_pos_of_prefix(self): + """ + Returns the start_pos of the prefix. This means basically it returns + the end_pos of the last prefix. The `get_start_pos_of_prefix()` of the + prefix `+` in `2 + 1` would be `(1, 1)`, while the start_pos is + `(1, 2)`. + + :return tuple of int: (line, column) + """ + + @abstractmethod + def get_first_leaf(self): + """ + Returns the first leaf of a node or itself if this is a leaf. + """ + + @abstractmethod + def get_last_leaf(self): + """ + Returns the last leaf of a node or itself if this is a leaf. + """ + + @abstractmethod + def get_code(self, include_prefix=True): + """ + Returns the code that was input the input for the parser for this node. + + :param include_prefix: Removes the prefix (whitespace and comments) of + e.g. a statement. + """ + + +class Leaf(NodeOrLeaf): + ''' + Leafs are basically tokens with a better API. Leafs exactly know where they + were defined and what text preceeds them. + ''' + __slots__ = ('value', 'parent', 'line', 'column', 'prefix') + + def __init__(self, value, start_pos, prefix=''): + self.value = value + ''' + :py:func:`str` The value of the current token. + ''' + self.start_pos = start_pos + self.prefix = prefix + ''' + :py:func:`str` Typically a mixture of whitespace and comments. Stuff + that is syntactically irrelevant for the syntax tree. + ''' + self.parent = None + ''' + The parent :class:`BaseNode` of this leaf. + ''' + + @property + def start_pos(self): + return self.line, self.column + + @start_pos.setter + def start_pos(self, value): + self.line = value[0] + self.column = value[1] + + def get_start_pos_of_prefix(self): + previous_leaf = self.get_previous_leaf() + if previous_leaf is None: + return self.line - self.prefix.count('\n'), 0 # It's the first leaf. + return previous_leaf.end_pos + + def get_first_leaf(self): + return self + + def get_last_leaf(self): + return self + + def get_code(self, include_prefix=True): + if include_prefix: + return self.prefix + self.value + else: + return self.value + + @property + def end_pos(self): + lines = self.value.split('\n') + end_pos_line = self.line + len(lines) - 1 + # Check for multiline token + if self.line == end_pos_line: + end_pos_column = self.column + len(lines[-1]) + else: + end_pos_column = len(lines[-1]) + return end_pos_line, end_pos_column + + @utf8_repr + def __repr__(self): + value = self.value + if not value: + value = self.type + return "<%s: %s>" % (type(self).__name__, value) + + +class TypedLeaf(Leaf): + __slots__ = ('type',) + def __init__(self, type, value, start_pos, prefix=''): + super(TypedLeaf, self).__init__(value, start_pos, prefix) + self.type = type + + +class BaseNode(NodeOrLeaf): + """ + The super class for all nodes. + A node has children, a type and possibly a parent node. + """ + __slots__ = ('children', 'parent') + type = None + + def __init__(self, children): + for c in children: + c.parent = self + self.children = children + """ + A list of :class:`NodeOrLeaf` child nodes. + """ + self.parent = None + ''' + The parent :class:`BaseNode` of this leaf. + None if this is the root node. + ''' + + @property + def start_pos(self): + return self.children[0].start_pos + + def get_start_pos_of_prefix(self): + return self.children[0].get_start_pos_of_prefix() + + @property + def end_pos(self): + return self.children[-1].end_pos + + def _get_code_for_children(self, children, include_prefix): + if include_prefix: + return "".join(c.get_code() for c in children) + else: + first = children[0].get_code(include_prefix=False) + return first + "".join(c.get_code() for c in children[1:]) + + def get_code(self, include_prefix=True): + return self._get_code_for_children(self.children, include_prefix) + + def get_leaf_for_position(self, position, include_prefixes=False): + """ + Get the :py:class:`parso.tree.Leaf` at ``position`` + + :param tuple position: A position tuple, row, column. Rows start from 1 + :param bool include_prefixes: If ``False``, ``None`` will be returned if ``position`` falls + on whitespace or comments before a leaf + :return: :py:class:`parso.tree.Leaf` at ``position``, or ``None`` + """ + def binary_search(lower, upper): + if lower == upper: + element = self.children[lower] + if not include_prefixes and position < element.start_pos: + # We're on a prefix. + return None + # In case we have prefixes, a leaf always matches + try: + return element.get_leaf_for_position(position, include_prefixes) + except AttributeError: + return element + + + index = int((lower + upper) / 2) + element = self.children[index] + if position <= element.end_pos: + return binary_search(lower, index) + else: + return binary_search(index + 1, upper) + + if not ((1, 0) <= position <= self.children[-1].end_pos): + raise ValueError('Please provide a position that exists within this node.') + return binary_search(0, len(self.children) - 1) + + def get_first_leaf(self): + return self.children[0].get_first_leaf() + + def get_last_leaf(self): + return self.children[-1].get_last_leaf() + + @utf8_repr + def __repr__(self): + code = self.get_code().replace('\n', ' ').strip() + if not py_version >= 30: + code = code.encode(encoding, 'replace') + return "<%s: %s@%s,%s>" % \ + (type(self).__name__, code, self.start_pos[0], self.start_pos[1]) + + +class Node(BaseNode): + """Concrete implementation for interior nodes.""" + __slots__ = ('type',) + + def __init__(self, type, children): + super(Node, self).__init__(children) + self.type = type + + def __repr__(self): + return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children) + + +class ErrorNode(BaseNode): + """ + A node that containes valid nodes/leaves that we're follow by a token that + was invalid. This basically means that the leaf after this node is where + Python would mark a syntax error. + """ + __slots__ = () + type = 'error_node' + + +class ErrorLeaf(Leaf): + """ + A leaf that is either completely invalid in a language (like `$` in Python) + or is invalid at that position. Like the star in `1 +* 1`. + """ + __slots__ = ('original_type',) + type = 'error_leaf' + + def __init__(self, original_type, value, start_pos, prefix=''): + super(ErrorLeaf, self).__init__(value, start_pos, prefix) + self.original_type = original_type + + def __repr__(self): + return "<%s: %s:%s, %s>" % \ + (type(self).__name__, self.original_type, repr(self.value), self.start_pos) diff --git a/pythonFiles/parso/utils.py b/pythonFiles/parso/utils.py new file mode 100644 index 000000000000..a4801b996616 --- /dev/null +++ b/pythonFiles/parso/utils.py @@ -0,0 +1,156 @@ +from collections import namedtuple +import re +import sys +from ast import literal_eval + +from parso._compatibility import unicode, total_ordering + + +Version = namedtuple('Version', 'major, minor, micro') + + +def split_lines(string, keepends=False): + r""" + Intended for Python code. In contrast to Python's :py:meth:`str.splitlines`, + looks at form feeds and other special characters as normal text. Just + splits ``\n`` and ``\r\n``. + Also different: Returns ``[""]`` for an empty string input. + + In Python 2.7 form feeds are used as normal characters when using + str.splitlines. However in Python 3 somewhere there was a decision to split + also on form feeds. + """ + if keepends: + lst = string.splitlines(True) + + # We have to merge lines that were broken by form feed characters. + merge = [] + for i, line in enumerate(lst): + if line.endswith('\f'): + merge.append(i) + + for index in reversed(merge): + try: + lst[index] = lst[index] + lst[index + 1] + del lst[index + 1] + except IndexError: + # index + 1 can be empty and therefore there's no need to + # merge. + pass + + # The stdlib's implementation of the end is inconsistent when calling + # it with/without keepends. One time there's an empty string in the + # end, one time there's none. + if string.endswith('\n') or string == '': + lst.append('') + return lst + else: + return re.split('\n|\r\n', string) + + +def python_bytes_to_unicode(source, encoding='utf-8', errors='strict'): + """ + Checks for unicode BOMs and PEP 263 encoding declarations. Then returns a + unicode object like in :py:meth:`bytes.decode`. + + :param encoding: See :py:meth:`bytes.decode` documentation. + :param errors: See :py:meth:`bytes.decode` documentation. ``errors`` can be + ``'strict'``, ``'replace'`` or ``'ignore'``. + """ + def detect_encoding(): + """ + For the implementation of encoding definitions in Python, look at: + - http://www.python.org/dev/peps/pep-0263/ + - http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations + """ + byte_mark = literal_eval(r"b'\xef\xbb\xbf'") + if source.startswith(byte_mark): + # UTF-8 byte-order mark + return 'utf-8' + + first_two_lines = re.match(br'(?:[^\n]*\n){0,2}', source).group(0) + possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)", + first_two_lines) + if possible_encoding: + return possible_encoding.group(1) + else: + # the default if nothing else has been set -> PEP 263 + return encoding + + if isinstance(source, unicode): + # only cast str/bytes + return source + + encoding = detect_encoding() + if not isinstance(encoding, unicode): + encoding = unicode(encoding, 'utf-8', 'replace') + + # Cast to unicode + return unicode(source, encoding, errors) + + +def version_info(): + """ + Returns a namedtuple of parso's version, similar to Python's + ``sys.version_info``. + """ + from parso import __version__ + tupl = re.findall(r'[a-z]+|\d+', __version__) + return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) + + +def _parse_version(version): + match = re.match(r'(\d+)(?:\.(\d)(?:\.\d+)?)?$', version) + if match is None: + raise ValueError('The given version is not in the right format. ' + 'Use something like "3.2" or "3".') + + major = int(match.group(1)) + minor = match.group(2) + if minor is None: + # Use the latest Python in case it's not exactly defined, because the + # grammars are typically backwards compatible? + if major == 2: + minor = "7" + elif major == 3: + minor = "6" + else: + raise NotImplementedError("Sorry, no support yet for those fancy new/old versions.") + minor = int(minor) + return PythonVersionInfo(major, minor) + + +@total_ordering +class PythonVersionInfo(namedtuple('Version', 'major, minor')): + def __gt__(self, other): + if isinstance(other, tuple): + if len(other) != 2: + raise ValueError("Can only compare to tuples of length 2.") + return (self.major, self.minor) > other + super(PythonVersionInfo, self).__gt__(other) + + return (self.major, self.minor) + + def __eq__(self, other): + if isinstance(other, tuple): + if len(other) != 2: + raise ValueError("Can only compare to tuples of length 2.") + return (self.major, self.minor) == other + super(PythonVersionInfo, self).__eq__(other) + + def __ne__(self, other): + return not self.__eq__(other) + + +def parse_version_string(version=None): + """ + Checks for a valid version number (e.g. `3.2` or `2.7.1` or `3`) and + returns a corresponding version info that is always two characters long in + decimal. + """ + if version is None: + version = '%s.%s' % sys.version_info[:2] + if not isinstance(version, (unicode, str)): + raise TypeError("version must be a string like 3.2.") + + return _parse_version(version) diff --git a/pythonFiles/preview/jedi/__init__.py b/pythonFiles/preview/jedi/__init__.py deleted file mode 100644 index c2782760c0b0..000000000000 --- a/pythonFiles/preview/jedi/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its -historic focus is autocompletion, but does static analysis for now as well. -Jedi is fast and is very well tested. It understands Python on a deeper level -than all other static analysis frameworks for Python. - -Jedi has support for two different goto functions. It's possible to search for -related names and to list all names in a Python file and infer them. Jedi -understands docstrings and you can use Jedi autocompletion in your REPL as -well. - -Jedi uses a very simple API to connect with IDE's. There's a reference -implementation as a `VIM-Plugin `_, -which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs. -It's really easy. - -To give you a simple example how you can use the Jedi library, here is an -example for the autocompletion feature: - ->>> import jedi ->>> source = ''' -... import datetime -... datetime.da''' ->>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py') ->>> script - ->>> completions = script.completions() ->>> completions #doctest: +ELLIPSIS -[, , ...] ->>> print(completions[0].complete) -te ->>> print(completions[0].name) -date - -As you see Jedi is pretty simple and allows you to concentrate on writing a -good text editor, while still having very good IDE features for Python. -""" - -__version__ = '0.10.0' - -from jedi.api import Script, Interpreter, NotFoundError, set_debug_function -from jedi.api import preload_module, defined_names, names -from jedi import settings diff --git a/pythonFiles/preview/jedi/_compatibility.py b/pythonFiles/preview/jedi/_compatibility.py deleted file mode 100644 index 352c2d6b5da9..000000000000 --- a/pythonFiles/preview/jedi/_compatibility.py +++ /dev/null @@ -1,267 +0,0 @@ -""" -To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been -created. Clearly there is huge need to use conforming syntax. -""" -import sys -import imp -import os -import re -import pkgutil -try: - import importlib -except ImportError: - pass - -# Cannot use sys.version.major and minor names, because in Python 2.6 it's not -# a namedtuple. -is_py3 = sys.version_info[0] >= 3 -is_py33 = is_py3 and sys.version_info[1] >= 3 -is_py34 = is_py3 and sys.version_info[1] >= 4 -is_py35 = is_py3 and sys.version_info[1] >= 5 -is_py26 = not is_py3 and sys.version_info[1] < 7 -py_version = int(str(sys.version_info[0]) + str(sys.version_info[1])) - - -class DummyFile(object): - def __init__(self, loader, string): - self.loader = loader - self.string = string - - def read(self): - return self.loader.get_source(self.string) - - def close(self): - del self.loader - - -def find_module_py33(string, path=None): - loader = importlib.machinery.PathFinder.find_module(string, path) - - if loader is None and path is None: # Fallback to find builtins - try: - loader = importlib.find_loader(string) - except ValueError as e: - # See #491. Importlib might raise a ValueError, to avoid this, we - # just raise an ImportError to fix the issue. - raise ImportError("Originally " + repr(e)) - - if loader is None: - raise ImportError("Couldn't find a loader for {0}".format(string)) - - try: - is_package = loader.is_package(string) - if is_package: - if hasattr(loader, 'path'): - module_path = os.path.dirname(loader.path) - else: - # At least zipimporter does not have path attribute - module_path = os.path.dirname(loader.get_filename(string)) - if hasattr(loader, 'archive'): - module_file = DummyFile(loader, string) - else: - module_file = None - else: - module_path = loader.get_filename(string) - module_file = DummyFile(loader, string) - except AttributeError: - # ExtensionLoader has not attribute get_filename, instead it has a - # path attribute that we can use to retrieve the module path - try: - module_path = loader.path - module_file = DummyFile(loader, string) - except AttributeError: - module_path = string - module_file = None - finally: - is_package = False - - if hasattr(loader, 'archive'): - module_path = loader.archive - - return module_file, module_path, is_package - - -def find_module_pre_py33(string, path=None): - try: - module_file, module_path, description = imp.find_module(string, path) - module_type = description[2] - return module_file, module_path, module_type is imp.PKG_DIRECTORY - except ImportError: - pass - - if path is None: - path = sys.path - for item in path: - loader = pkgutil.get_importer(item) - if loader: - try: - loader = loader.find_module(string) - if loader: - is_package = loader.is_package(string) - is_archive = hasattr(loader, 'archive') - try: - module_path = loader.get_filename(string) - except AttributeError: - # fallback for py26 - try: - module_path = loader._get_filename(string) - except AttributeError: - continue - if is_package: - module_path = os.path.dirname(module_path) - if is_archive: - module_path = loader.archive - file = None - if not is_package or is_archive: - file = DummyFile(loader, string) - return (file, module_path, is_package) - except ImportError: - pass - raise ImportError("No module named {0}".format(string)) - - -find_module = find_module_py33 if is_py33 else find_module_pre_py33 -find_module.__doc__ = """ -Provides information about a module. - -This function isolates the differences in importing libraries introduced with -python 3.3 on; it gets a module name and optionally a path. It will return a -tuple containin an open file for the module (if not builtin), the filename -or the name of the module if it is a builtin one and a boolean indicating -if the module is contained in a package. -""" - - -# unicode function -try: - unicode = unicode -except NameError: - unicode = str - -if is_py3: - u = lambda s: s -else: - u = lambda s: s.decode('utf-8') - -u.__doc__ = """ -Decode a raw string into unicode object. Do nothing in Python 3. -""" - -# exec function -if is_py3: - def exec_function(source, global_map): - exec(source, global_map) -else: - eval(compile("""def exec_function(source, global_map): - exec source in global_map """, 'blub', 'exec')) - -# re-raise function -if is_py3: - def reraise(exception, traceback): - raise exception.with_traceback(traceback) -else: - eval(compile(""" -def reraise(exception, traceback): - raise exception, None, traceback -""", 'blub', 'exec')) - -reraise.__doc__ = """ -Re-raise `exception` with a `traceback` object. - -Usage:: - - reraise(Exception, sys.exc_info()[2]) - -""" - -class Python3Method(object): - def __init__(self, func): - self.func = func - - def __get__(self, obj, objtype): - if obj is None: - return lambda *args, **kwargs: self.func(*args, **kwargs) - else: - return lambda *args, **kwargs: self.func(obj, *args, **kwargs) - - -def use_metaclass(meta, *bases): - """ Create a class with a metaclass. """ - if not bases: - bases = (object,) - return meta("HackClass", bases, {}) - - -try: - encoding = sys.stdout.encoding - if encoding is None: - encoding = 'utf-8' -except AttributeError: - encoding = 'ascii' - - -def u(string): - """Cast to unicode DAMMIT! - Written because Python2 repr always implicitly casts to a string, so we - have to cast back to a unicode (and we now that we always deal with valid - unicode, because we check that in the beginning). - """ - if is_py3: - return str(string) - - if not isinstance(string, unicode): - return unicode(str(string), 'UTF-8') - return string - -try: - import builtins # module name in python 3 -except ImportError: - import __builtin__ as builtins - - -import ast - - -def literal_eval(string): - # py3.0, py3.1 and py32 don't support unicode literals. Support those, I - # don't want to write two versions of the tokenizer. - if is_py3 and sys.version_info.minor < 3: - if re.match('[uU][\'"]', string): - string = string[1:] - return ast.literal_eval(string) - - -try: - from itertools import zip_longest -except ImportError: - from itertools import izip_longest as zip_longest # Python 2 - - -def no_unicode_pprint(dct): - """ - Python 2/3 dict __repr__ may be different, because of unicode differens - (with or without a `u` prefix). Normally in doctests we could use `pprint` - to sort dicts and check for equality, but here we have to write a separate - function to do that. - """ - import pprint - s = pprint.pformat(dct) - print(re.sub("u'", "'", s)) - - -def utf8_repr(func): - """ - ``__repr__`` methods in Python 2 don't allow unicode objects to be - returned. Therefore cast them to utf-8 bytes in this decorator. - """ - def wrapper(self): - result = func(self) - if isinstance(result, unicode): - return result.encode('utf-8') - else: - return result - - if is_py3: - return func - else: - return wrapper diff --git a/pythonFiles/preview/jedi/api/__init__.py b/pythonFiles/preview/jedi/api/__init__.py deleted file mode 100644 index ab91934be6bd..000000000000 --- a/pythonFiles/preview/jedi/api/__init__.py +++ /dev/null @@ -1,467 +0,0 @@ -""" -The API basically only provides one class. You can create a :class:`Script` and -use its methods. - -Additionally you can add a debug function with :func:`set_debug_function`. -Alternatively, if you don't need a custom function and are happy with printing -debug messages to stdout, simply call :func:`set_debug_function` without -arguments. - -.. warning:: Please, note that Jedi is **not thread safe**. -""" -import os -import warnings -import sys - -from jedi._compatibility import unicode -from jedi.parser import load_grammar -from jedi.parser import tree -from jedi.parser.fast import FastParser -from jedi.parser.utils import save_parser -from jedi import debug -from jedi import settings -from jedi import common -from jedi import cache -from jedi.api import classes -from jedi.api import interpreter -from jedi.api import usages -from jedi.api import helpers -from jedi.api.completion import Completion -from jedi.evaluate import Evaluator -from jedi.evaluate import representation as er -from jedi.evaluate import imports -from jedi.evaluate.param import try_iter_content -from jedi.evaluate.helpers import get_module_names -from jedi.evaluate.sys_path import get_venv_path -from jedi.evaluate.iterable import unpack_tuple_to_dict - -# Jedi uses lots and lots of recursion. By setting this a little bit higher, we -# can remove some "maximum recursion depth" errors. -sys.setrecursionlimit(2000) - - -class NotFoundError(Exception): - """A custom error to avoid catching the wrong exceptions. - - .. deprecated:: 0.9.0 - Not in use anymore, Jedi just returns no goto result if you're not on a - valid name. - .. todo:: Remove! - """ - - -class Script(object): - """ - A Script is the base for completions, goto or whatever you want to do with - |jedi|. - - You can either use the ``source`` parameter or ``path`` to read a file. - Usually you're going to want to use both of them (in an editor). - - The script might be analyzed in a different ``sys.path`` than |jedi|: - - - if `sys_path` parameter is not ``None``, it will be used as ``sys.path`` - for the script; - - - if `sys_path` parameter is ``None`` and ``VIRTUAL_ENV`` environment - variable is defined, ``sys.path`` for the specified environment will be - guessed (see :func:`jedi.evaluate.sys_path.get_venv_path`) and used for - the script; - - - otherwise ``sys.path`` will match that of |jedi|. - - :param source: The source code of the current file, separated by newlines. - :type source: str - :param line: The line to perform actions on (starting with 1). - :type line: int - :param column: The column of the cursor (starting with 0). - :type column: int - :param path: The path of the file in the file system, or ``''`` if - it hasn't been saved yet. - :type path: str or None - :param encoding: The encoding of ``source``, if it is not a - ``unicode`` object (default ``'utf-8'``). - :type encoding: str - :param source_encoding: The encoding of ``source``, if it is not a - ``unicode`` object (default ``'utf-8'``). - :type encoding: str - :param sys_path: ``sys.path`` to use during analysis of the script - :type sys_path: list - - """ - def __init__(self, source=None, line=None, column=None, path=None, - encoding='utf-8', source_path=None, source_encoding=None, - sys_path=None): - if source_path is not None: - warnings.warn("Use path instead of source_path.", DeprecationWarning) - path = source_path - if source_encoding is not None: - warnings.warn("Use encoding instead of source_encoding.", DeprecationWarning) - encoding = source_encoding - - self._orig_path = path - # An empty path (also empty string) should always result in no path. - self.path = os.path.abspath(path) if path else None - - if source is None: - # TODO add a better warning than the traceback! - try: - with open(path) as f: - source = f.read() - except UnicodeDecodeError: - with open(path, encoding=encoding) as f: - source = f.read() - - self._source = common.source_to_unicode(source, encoding) - self._code_lines = common.splitlines(self._source) - line = max(len(self._code_lines), 1) if line is None else line - if not (0 < line <= len(self._code_lines)): - raise ValueError('`line` parameter is not in a valid range.') - - line_len = len(self._code_lines[line - 1]) - column = line_len if column is None else column - if not (0 <= column <= line_len): - raise ValueError('`column` parameter is not in a valid range.') - self._pos = line, column - self._path = path - - cache.clear_time_caches() - debug.reset_time() - self._grammar = load_grammar(version='%s.%s' % sys.version_info[:2]) - if sys_path is None: - venv = os.getenv('VIRTUAL_ENV') - if venv: - sys_path = list(get_venv_path(venv)) - self._evaluator = Evaluator(self._grammar, sys_path=sys_path) - debug.speed('init') - - def _get_module(self): - cache.invalidate_star_import_cache(self._path) - parser = FastParser(self._grammar, self._source, self.path) - save_parser(self.path, parser, pickling=False) - - module = self._evaluator.wrap(parser.module) - imports.add_module(self._evaluator, unicode(module.name), module) - return parser.module - - @property - def source_path(self): - """ - .. deprecated:: 0.7.0 - Use :attr:`.path` instead. - .. todo:: Remove! - """ - warnings.warn("Use path instead of source_path.", DeprecationWarning) - return self.path - - def __repr__(self): - return '<%s: %s>' % (self.__class__.__name__, repr(self._orig_path)) - - def completions(self): - """ - Return :class:`classes.Completion` objects. Those objects contain - information about the completions, more than just names. - - :return: Completion objects, sorted by name and __ comes last. - :rtype: list of :class:`classes.Completion` - """ - debug.speed('completions start') - completion = Completion( - self._evaluator, self._get_module(), self._code_lines, - self._pos, self.call_signatures - ) - completions = completion.completions() - debug.speed('completions end') - return completions - - def goto_definitions(self): - """ - Return the definitions of a the path under the cursor. goto function! - This follows complicated paths and returns the end, not the first - definition. The big difference between :meth:`goto_assignments` and - :meth:`goto_definitions` is that :meth:`goto_assignments` doesn't - follow imports and statements. Multiple objects may be returned, - because Python itself is a dynamic language, which means depending on - an option you can have two different versions of a function. - - :rtype: list of :class:`classes.Definition` - """ - leaf = self._get_module().name_for_position(self._pos) - if leaf is None: - leaf = self._get_module().get_leaf_for_position(self._pos) - if leaf is None: - return [] - definitions = helpers.evaluate_goto_definition(self._evaluator, leaf) - - names = [s.name for s in definitions] - defs = [classes.Definition(self._evaluator, name) for name in names] - # The additional set here allows the definitions to become unique in an - # API sense. In the internals we want to separate more things than in - # the API. - return helpers.sorted_definitions(set(defs)) - - def goto_assignments(self, follow_imports=False): - """ - Return the first definition found, while optionally following imports. - Multiple objects may be returned, because Python itself is a - dynamic language, which means depending on an option you can have two - different versions of a function. - - :rtype: list of :class:`classes.Definition` - """ - def filter_follow_imports(names): - for name in names: - definition = name.get_definition() - if definition.type in ('import_name', 'import_from'): - imp = imports.ImportWrapper(self._evaluator, name) - for name in filter_follow_imports(imp.follow(is_goto=True)): - yield name - else: - yield name - - names = self._goto() - if follow_imports: - names = filter_follow_imports(names) - - defs = [classes.Definition(self._evaluator, d) for d in set(names)] - return helpers.sorted_definitions(defs) - - def _goto(self): - """ - Used for goto_assignments and usages. - """ - name = self._get_module().name_for_position(self._pos) - if name is None: - return [] - return list(self._evaluator.goto(name)) - - def usages(self, additional_module_paths=()): - """ - Return :class:`classes.Definition` objects, which contain all - names that point to the definition of the name under the cursor. This - is very useful for refactoring (renaming), or to show all usages of a - variable. - - .. todo:: Implement additional_module_paths - - :rtype: list of :class:`classes.Definition` - """ - temp, settings.dynamic_flow_information = \ - settings.dynamic_flow_information, False - try: - user_stmt = self._get_module().get_statement_for_position(self._pos) - definitions = self._goto() - if not definitions and isinstance(user_stmt, tree.Import): - # For not defined imports (goto doesn't find something, we take - # the name as a definition. This is enough, because every name - # points to it. - name = user_stmt.name_for_position(self._pos) - if name is None: - # Must be syntax - return [] - definitions = [name] - - if not definitions: - # Without a definition for a name we cannot find references. - return [] - - if not isinstance(user_stmt, tree.Import): - # import case is looked at with add_import_name option - definitions = usages.usages_add_import_modules(self._evaluator, - definitions) - - module = set([d.get_parent_until() for d in definitions]) - module.add(self._get_module()) - names = usages.usages(self._evaluator, definitions, module) - - for d in set(definitions): - names.append(classes.Definition(self._evaluator, d)) - finally: - settings.dynamic_flow_information = temp - - return helpers.sorted_definitions(set(names)) - - def call_signatures(self): - """ - Return the function object of the call you're currently in. - - E.g. if the cursor is here:: - - abs(# <-- cursor is here - - This would return the ``abs`` function. On the other hand:: - - abs()# <-- cursor is here - - This would return an empty list.. - - :rtype: list of :class:`classes.CallSignature` - """ - call_signature_details = \ - helpers.get_call_signature_details(self._get_module(), self._pos) - if call_signature_details is None: - return [] - - with common.scale_speed_settings(settings.scale_call_signatures): - definitions = helpers.cache_call_signatures( - self._evaluator, - call_signature_details.bracket_leaf, - self._code_lines, - self._pos - ) - debug.speed('func_call followed') - - return [classes.CallSignature(self._evaluator, d.name, - call_signature_details.bracket_leaf.start_pos, - call_signature_details.call_index, - call_signature_details.keyword_name_str) - for d in definitions if hasattr(d, 'py__call__')] - - def _analysis(self): - self._evaluator.is_analysis = True - self._evaluator.analysis_modules = [self._get_module()] - try: - for node in self._get_module().nodes_to_execute(): - if node.type in ('funcdef', 'classdef'): - if node.type == 'classdef': - continue - raise NotImplementedError - er.Function(self._evaluator, node).get_decorated_func() - elif isinstance(node, tree.Import): - import_names = set(node.get_defined_names()) - if node.is_nested(): - import_names |= set(path[-1] for path in node.paths()) - for n in import_names: - imports.ImportWrapper(self._evaluator, n).follow() - elif node.type == 'expr_stmt': - types = self._evaluator.eval_element(node) - for testlist in node.children[:-1:2]: - # Iterate tuples. - unpack_tuple_to_dict(self._evaluator, types, testlist) - else: - try_iter_content(self._evaluator.goto_definitions(node)) - self._evaluator.reset_recursion_limitations() - - ana = [a for a in self._evaluator.analysis if self.path == a.path] - return sorted(set(ana), key=lambda x: x.line) - finally: - self._evaluator.is_analysis = False - - -class Interpreter(Script): - """ - Jedi API for Python REPLs. - - In addition to completion of simple attribute access, Jedi - supports code completion based on static code analysis. - Jedi can complete attributes of object which is not initialized - yet. - - >>> from os.path import join - >>> namespace = locals() - >>> script = Interpreter('join("").up', [namespace]) - >>> print(script.completions()[0].name) - upper - """ - - def __init__(self, source, namespaces, **kwds): - """ - Parse `source` and mixin interpreted Python objects from `namespaces`. - - :type source: str - :arg source: Code to parse. - :type namespaces: list of dict - :arg namespaces: a list of namespace dictionaries such as the one - returned by :func:`locals`. - - Other optional arguments are same as the ones for :class:`Script`. - If `line` and `column` are None, they are assumed be at the end of - `source`. - """ - try: - namespaces = [dict(n) for n in namespaces] - except Exception: - raise TypeError("namespaces must be a non-empty list of dicts.") - - super(Interpreter, self).__init__(source, **kwds) - self.namespaces = namespaces - - parser_module = super(Interpreter, self)._get_module() - self._module = interpreter.MixedModule(self._evaluator, parser_module, self.namespaces) - - def _get_module(self): - return self._module - - -def defined_names(source, path=None, encoding='utf-8'): - """ - Get all definitions in `source` sorted by its position. - - This functions can be used for listing functions, classes and - data defined in a file. This can be useful if you want to list - them in "sidebar". Each element in the returned list also has - `defined_names` method which can be used to get sub-definitions - (e.g., methods in class). - - :rtype: list of classes.Definition - - .. deprecated:: 0.9.0 - Use :func:`names` instead. - .. todo:: Remove! - """ - warnings.warn("Use call_signatures instead.", DeprecationWarning) - return names(source, path, encoding) - - -def names(source=None, path=None, encoding='utf-8', all_scopes=False, - definitions=True, references=False): - """ - Returns a list of `Definition` objects, containing name parts. - This means you can call ``Definition.goto_assignments()`` and get the - reference of a name. - The parameters are the same as in :py:class:`Script`, except or the - following ones: - - :param all_scopes: If True lists the names of all scopes instead of only - the module namespace. - :param definitions: If True lists the names that have been defined by a - class, function or a statement (``a = b`` returns ``a``). - :param references: If True lists all the names that are not listed by - ``definitions=True``. E.g. ``a = b`` returns ``b``. - """ - def def_ref_filter(_def): - is_def = _def.is_definition() - return definitions and is_def or references and not is_def - - # Set line/column to a random position, because they don't matter. - script = Script(source, line=1, column=0, path=path, encoding=encoding) - defs = [classes.Definition(script._evaluator, name_part) - for name_part in get_module_names(script._get_module(), all_scopes)] - return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column)) - - -def preload_module(*modules): - """ - Preloading modules tells Jedi to load a module now, instead of lazy parsing - of modules. Usful for IDEs, to control which modules to load on startup. - - :param modules: different module names, list of string. - """ - for m in modules: - s = "import %s as x; x." % m - Script(s, 1, len(s), None).completions() - - -def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, - notices=True, speed=True): - """ - Define a callback debug function to get all the debug messages. - - If you don't specify any arguments, debug messages will be printed to stdout. - - :param func_cb: The callback function for debug messages, with n params. - """ - debug.debug_function = func_cb - debug.enable_warning = warnings - debug.enable_notice = notices - debug.enable_speed = speed diff --git a/pythonFiles/preview/jedi/api/classes.py b/pythonFiles/preview/jedi/api/classes.py deleted file mode 100644 index a1d3ce8a3412..000000000000 --- a/pythonFiles/preview/jedi/api/classes.py +++ /dev/null @@ -1,764 +0,0 @@ -""" -The :mod:`jedi.api.classes` module contains the return classes of the API. -These classes are the much bigger part of the whole API, because they contain -the interesting information about completion and goto operations. -""" -import warnings -from itertools import chain -import re - -from jedi._compatibility import unicode, use_metaclass -from jedi import settings -from jedi import common -from jedi.parser import tree -from jedi.parser.utils import load_parser -from jedi.evaluate.cache import memoize_default, CachedMetaClass -from jedi.evaluate import representation as er -from jedi.evaluate import iterable -from jedi.evaluate import imports -from jedi.evaluate import compiled -from jedi.evaluate.compiled import mixed -from jedi.api import keywords -from jedi.evaluate.finder import filter_definition_names - - -def defined_names(evaluator, scope): - """ - List sub-definitions (e.g., methods in class). - - :type scope: Scope - :rtype: list of Definition - """ - dct = scope.names_dict - names = list(chain.from_iterable(dct.values())) - names = filter_definition_names(names, scope) - return [Definition(evaluator, d) for d in sorted(names, key=lambda s: s.start_pos)] - - -class BaseDefinition(object): - _mapping = { - 'posixpath': 'os.path', - 'riscospath': 'os.path', - 'ntpath': 'os.path', - 'os2emxpath': 'os.path', - 'macpath': 'os.path', - 'genericpath': 'os.path', - 'posix': 'os', - '_io': 'io', - '_functools': 'functools', - '_sqlite3': 'sqlite3', - '__builtin__': '', - 'builtins': '', - } - - _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { - 'argparse._ActionsContainer': 'argparse.ArgumentParser', - '_sre.SRE_Match': 're.MatchObject', - '_sre.SRE_Pattern': 're.RegexObject', - }.items()) - - def __init__(self, evaluator, name): - self._evaluator = evaluator - self._name = name - """ - An instance of :class:`jedi.parser.reprsentation.Name` subclass. - """ - self._definition = evaluator.wrap(self._name.get_definition()) - self.is_keyword = isinstance(self._definition, keywords.Keyword) - - # generate a path to the definition - self._module = name.get_parent_until() - if self.in_builtin_module(): - self.module_path = None - else: - self.module_path = self._module.path - """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``""" - - @property - def name(self): - """ - Name of variable/function/class/module. - - For example, for ``x = None`` it returns ``'x'``. - - :rtype: str or None - """ - return unicode(self._name) - - @property - def start_pos(self): - """ - .. deprecated:: 0.7.0 - Use :attr:`.line` and :attr:`.column` instead. - .. todo:: Remove! - """ - warnings.warn("Use line/column instead.", DeprecationWarning) - return self._name.start_pos - - @property - def type(self): - """ - The type of the definition. - - Here is an example of the value of this attribute. Let's consider - the following source. As what is in ``variable`` is unambiguous - to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of - definition for ``sys``, ``f``, ``C`` and ``x``. - - >>> from jedi import Script - >>> source = ''' - ... import keyword - ... - ... class C: - ... pass - ... - ... class D: - ... pass - ... - ... x = D() - ... - ... def f(): - ... pass - ... - ... for variable in [keyword, f, C, x]: - ... variable''' - - >>> script = Script(source) - >>> defs = script.goto_definitions() - - Before showing what is in ``defs``, let's sort it by :attr:`line` - so that it is easy to relate the result to the source code. - - >>> defs = sorted(defs, key=lambda d: d.line) - >>> defs # doctest: +NORMALIZE_WHITESPACE - [, , - , ] - - Finally, here is what you can get from :attr:`type`: - - >>> defs[0].type - 'module' - >>> defs[1].type - 'class' - >>> defs[2].type - 'instance' - >>> defs[3].type - 'function' - - """ - stripped = self._definition - if isinstance(stripped, er.InstanceElement): - stripped = stripped.var - - if isinstance(stripped, (compiled.CompiledObject, mixed.MixedObject)): - return stripped.api_type() - elif isinstance(stripped, iterable.Array): - return 'instance' - elif isinstance(stripped, tree.Import): - return 'import' - - string = type(stripped).__name__.lower().replace('wrapper', '') - if string == 'exprstmt': - return 'statement' - else: - return string - - def _path(self): - """The path to a module/class/function definition.""" - path = [] - par = self._definition - while par is not None: - if isinstance(par, tree.Import): - path += imports.ImportWrapper(self._evaluator, self._name).import_path - break - try: - name = par.name - except AttributeError: - pass - else: - if isinstance(par, er.ModuleWrapper): - # TODO just make the path dotted from the beginning, we - # shouldn't really split here. - path[0:0] = par.py__name__().split('.') - break - else: - path.insert(0, unicode(name)) - par = par.parent - return path - - @property - def module_name(self): - """ - The module name. - - >>> from jedi import Script - >>> source = 'import json' - >>> script = Script(source, path='example.py') - >>> d = script.goto_definitions()[0] - >>> print(d.module_name) # doctest: +ELLIPSIS - json - """ - return str(self._module.name) - - def in_builtin_module(self): - """Whether this is a builtin module.""" - return isinstance(self._module, compiled.CompiledObject) - - @property - def line(self): - """The line where the definition occurs (starting with 1).""" - if self.in_builtin_module(): - return None - return self._name.start_pos[0] - - @property - def column(self): - """The column where the definition occurs (starting with 0).""" - if self.in_builtin_module(): - return None - return self._name.start_pos[1] - - def docstring(self, raw=False): - r""" - Return a document string for this completion object. - - Example: - - >>> from jedi import Script - >>> source = '''\ - ... def f(a, b=1): - ... "Document for function f." - ... ''' - >>> script = Script(source, 1, len('def f'), 'example.py') - >>> doc = script.goto_definitions()[0].docstring() - >>> print(doc) - f(a, b=1) - - Document for function f. - - Notice that useful extra information is added to the actual - docstring. For function, it is call signature. If you need - actual docstring, use ``raw=True`` instead. - - >>> print(script.goto_definitions()[0].docstring(raw=True)) - Document for function f. - - """ - if raw: - return _Help(self._definition).raw() - else: - return _Help(self._definition).full() - - @property - def doc(self): - """ - .. deprecated:: 0.8.0 - Use :meth:`.docstring` instead. - .. todo:: Remove! - """ - warnings.warn("Use docstring() instead.", DeprecationWarning) - return self.docstring() - - @property - def raw_doc(self): - """ - .. deprecated:: 0.8.0 - Use :meth:`.docstring` instead. - .. todo:: Remove! - """ - warnings.warn("Use docstring() instead.", DeprecationWarning) - return self.docstring(raw=True) - - @property - def description(self): - """A textual description of the object.""" - return unicode(self._name) - - @property - def full_name(self): - """ - Dot-separated path of this object. - - It is in the form of ``[.[...]][.]``. - It is useful when you want to look up Python manual of the - object at hand. - - Example: - - >>> from jedi import Script - >>> source = ''' - ... import os - ... os.path.join''' - >>> script = Script(source, 3, len('os.path.join'), 'example.py') - >>> print(script.goto_definitions()[0].full_name) - os.path.join - - Notice that it correctly returns ``'os.path.join'`` instead of - (for example) ``'posixpath.join'``. - - """ - path = [unicode(p) for p in self._path()] - # TODO add further checks, the mapping should only occur on stdlib. - if not path: - return None # for keywords the path is empty - - with common.ignored(KeyError): - path[0] = self._mapping[path[0]] - for key, repl in self._tuple_mapping.items(): - if tuple(path[:len(key)]) == key: - path = [repl] + path[len(key):] - - return '.'.join(path if path[0] else path[1:]) - - def goto_assignments(self): - defs = self._evaluator.goto(self._name) - return [Definition(self._evaluator, d) for d in defs] - - @memoize_default() - def _follow_statements_imports(self): - """ - Follow both statements and imports, as far as possible. - """ - if self._definition.isinstance(tree.ExprStmt): - return self._evaluator.eval_statement(self._definition) - elif self._definition.isinstance(tree.Import): - return imports.ImportWrapper(self._evaluator, self._name).follow() - else: - return set([self._definition]) - - @property - @memoize_default() - def params(self): - """ - Raises an ``AttributeError``if the definition is not callable. - Otherwise returns a list of `Definition` that represents the params. - """ - followed = list(self._follow_statements_imports()) - if not followed or not hasattr(followed[0], 'py__call__'): - raise AttributeError() - followed = followed[0] # only check the first one. - - if followed.type in ('funcdef', 'lambda'): - if isinstance(followed, er.InstanceElement): - params = followed.params[1:] - else: - params = followed.params - elif followed.isinstance(er.compiled.CompiledObject): - params = followed.params - elif isinstance(followed, er.Class): - try: - sub = followed.get_subscope_by_name('__init__') - params = sub.params[1:] # ignore self - except KeyError: - return [] - elif isinstance(followed, er.Instance): - try: - sub = followed.get_subscope_by_name('__call__') - params = sub.params[1:] # ignore self - except KeyError: - return [] - else: - return [] - return [_Param(self._evaluator, p.name) for p in params] - - def parent(self): - scope = self._definition.get_parent_scope() - scope = self._evaluator.wrap(scope) - return Definition(self._evaluator, scope.name) - - def __repr__(self): - return "<%s %s>" % (type(self).__name__, self.description) - - def get_line_code(self, before=0, after=0): - """ - Returns the line of code where this object was defined. - - :param before: Add n lines before the current line to the output. - :param after: Add n lines after the current line to the output. - - :return str: Returns the line(s) of code or an empty string if it's a - builtin. - """ - if self.in_builtin_module(): - return '' - - path = self._definition.get_parent_until().path - parser = load_parser(path) - lines = common.splitlines(parser.source) - - line_nr = self._name.start_pos[0] - start_line_nr = line_nr - before - return '\n'.join(lines[start_line_nr:line_nr + after + 1]) - - -class Completion(BaseDefinition): - """ - `Completion` objects are returned from :meth:`api.Script.completions`. They - provide additional information about a completion. - """ - def __init__(self, evaluator, name, stack, like_name_length): - super(Completion, self).__init__(evaluator, name) - - self._like_name_length = like_name_length - self._stack = stack - - # Completion objects with the same Completion name (which means - # duplicate items in the completion) - self._same_name_completions = [] - - def _complete(self, like_name): - append = '' - if settings.add_bracket_after_function \ - and self.type == 'Function': - append = '(' - - if isinstance(self._definition, tree.Param) and self._stack is not None: - node_names = list(self._stack.get_node_names(self._evaluator.grammar)) - if 'trailer' in node_names and 'argument' not in node_names: - append += '=' - - name = str(self._name) - if like_name: - name = name[self._like_name_length:] - return name + append - - @property - def complete(self): - """ - Return the rest of the word, e.g. completing ``isinstance``:: - - isinstan# <-- Cursor is here - - would return the string 'ce'. It also adds additional stuff, depending - on your `settings.py`. - """ - return self._complete(True) - - @property - def name_with_symbols(self): - """ - Similar to :attr:`name`, but like :attr:`name` - returns also the symbols, for example:: - - list() - - would return ``.append`` and others (which means it adds a dot). - """ - return self._complete(False) - - @property - def description(self): - """Provide a description of the completion object.""" - if self._definition is None: - return '' - t = self.type - if t == 'statement' or t == 'import': - desc = self._definition.get_code() - else: - desc = '.'.join(unicode(p) for p in self._path()) - - line = '' if self.in_builtin_module else '@%s' % self.line - return '%s: %s%s' % (t, desc, line) - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self._name) - - def docstring(self, raw=False, fast=True): - """ - :param fast: Don't follow imports that are only one level deep like - ``import foo``, but follow ``from foo import bar``. This makes - sense for speed reasons. Completing `import a` is slow if you use - the ``foo.docstring(fast=False)`` on every object, because it - parses all libraries starting with ``a``. - """ - definition = self._definition - if isinstance(definition, tree.Import): - i = imports.ImportWrapper(self._evaluator, self._name) - if len(i.import_path) > 1 or not fast: - followed = self._follow_statements_imports() - if followed: - # TODO: Use all of the followed objects as input to Documentation. - definition = list(followed)[0] - - if raw: - return _Help(definition).raw() - else: - return _Help(definition).full() - - @property - def type(self): - """ - The type of the completion objects. Follows imports. For a further - description, look at :attr:`jedi.api.classes.BaseDefinition.type`. - """ - if isinstance(self._definition, tree.Import): - i = imports.ImportWrapper(self._evaluator, self._name) - if len(i.import_path) <= 1: - return 'module' - - followed = self.follow_definition() - if followed: - # Caveat: Only follows the first one, ignore the other ones. - # This is ok, since people are almost never interested in - # variations. - return followed[0].type - return super(Completion, self).type - - @memoize_default() - def _follow_statements_imports(self): - # imports completion is very complicated and needs to be treated - # separately in Completion. - definition = self._definition - if definition.isinstance(tree.Import): - i = imports.ImportWrapper(self._evaluator, self._name) - return i.follow() - return super(Completion, self)._follow_statements_imports() - - @memoize_default() - def follow_definition(self): - """ - Return the original definitions. I strongly recommend not using it for - your completions, because it might slow down |jedi|. If you want to - read only a few objects (<=20), it might be useful, especially to get - the original docstrings. The basic problem of this function is that it - follows all results. This means with 1000 completions (e.g. numpy), - it's just PITA-slow. - """ - defs = self._follow_statements_imports() - return [Definition(self._evaluator, d.name) for d in defs] - - -class Definition(use_metaclass(CachedMetaClass, BaseDefinition)): - """ - *Definition* objects are returned from :meth:`api.Script.goto_assignments` - or :meth:`api.Script.goto_definitions`. - """ - def __init__(self, evaluator, definition): - super(Definition, self).__init__(evaluator, definition) - - @property - def description(self): - """ - A description of the :class:`.Definition` object, which is heavily used - in testing. e.g. for ``isinstance`` it returns ``def isinstance``. - - Example: - - >>> from jedi import Script - >>> source = ''' - ... def f(): - ... pass - ... - ... class C: - ... pass - ... - ... variable = f if random.choice([0,1]) else C''' - >>> script = Script(source, column=3) # line is maximum by default - >>> defs = script.goto_definitions() - >>> defs = sorted(defs, key=lambda d: d.line) - >>> defs - [, ] - >>> str(defs[0].description) # strip literals in python2 - 'def f' - >>> str(defs[1].description) - 'class C' - - """ - d = self._definition - if isinstance(d, er.InstanceElement): - d = d.var - - if isinstance(d, compiled.CompiledObject): - typ = d.api_type() - if typ == 'instance': - typ = 'class' # The description should be similar to Py objects. - d = typ + ' ' + d.name.get_code() - elif isinstance(d, iterable.Array): - d = 'class ' + d.type - elif isinstance(d, (tree.Class, er.Class, er.Instance)): - d = 'class ' + unicode(d.name) - elif isinstance(d, (er.Function, tree.Function)): - d = 'def ' + unicode(d.name) - elif isinstance(d, tree.Module): - # only show module name - d = 'module %s' % self.module_name - elif isinstance(d, tree.Param): - d = d.get_code().strip() - if d.endswith(','): - d = d[:-1] # Remove the comma. - else: # ExprStmt - try: - first_leaf = d.first_leaf() - except AttributeError: - # `d` is already a Leaf (Name). - first_leaf = d - # Remove the prefix, because that's not what we want for get_code - # here. - old, first_leaf.prefix = first_leaf.prefix, '' - try: - d = d.get_code() - finally: - first_leaf.prefix = old - # Delete comments: - d = re.sub('#[^\n]+\n', ' ', d) - # Delete multi spaces/newlines - return re.sub('\s+', ' ', d).strip() - - @property - def desc_with_module(self): - """ - In addition to the definition, also return the module. - - .. warning:: Don't use this function yet, its behaviour may change. If - you really need it, talk to me. - - .. todo:: Add full path. This function is should return a - `module.class.function` path. - """ - position = '' if self.in_builtin_module else '@%s' % (self.line) - return "%s:%s%s" % (self.module_name, self.description, position) - - @memoize_default() - def defined_names(self): - """ - List sub-definitions (e.g., methods in class). - - :rtype: list of Definition - """ - defs = self._follow_statements_imports() - # For now we don't want base classes or evaluate decorators. - defs = [d.base if isinstance(d, (er.Class, er.Function)) else d for d in defs] - iterable = (defined_names(self._evaluator, d) for d in defs) - iterable = list(iterable) - return list(chain.from_iterable(iterable)) - - def is_definition(self): - """ - Returns True, if defined as a name in a statement, function or class. - Returns False, if it's a reference to such a definition. - """ - return self._name.is_definition() - - def __eq__(self, other): - return self._name.start_pos == other._name.start_pos \ - and self.module_path == other.module_path \ - and self.name == other.name \ - and self._evaluator == other._evaluator - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return hash((self._name.start_pos, self.module_path, self.name, self._evaluator)) - - -class CallSignature(Definition): - """ - `CallSignature` objects is the return value of `Script.function_definition`. - It knows what functions you are currently in. e.g. `isinstance(` would - return the `isinstance` function. without `(` it would return nothing. - """ - def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str): - super(CallSignature, self).__init__(evaluator, executable_name) - self._index = index - self._key_name_str = key_name_str - self._bracket_start_pos = bracket_start_pos - - @property - def index(self): - """ - The Param index of the current call. - Returns None if the index cannot be found in the curent call. - """ - if self._key_name_str is not None: - for i, param in enumerate(self.params): - if self._key_name_str == param.name: - return i - if self.params and self.params[-1]._name.get_definition().stars == 2: - return i - else: - return None - - if self._index >= len(self.params): - - for i, param in enumerate(self.params): - # *args case - if param._name.get_definition().stars == 1: - return i - return None - return self._index - - @property - def bracket_start(self): - """ - The indent of the bracket that is responsible for the last function - call. - """ - return self._bracket_start_pos - - @property - def call_name(self): - """ - .. deprecated:: 0.8.0 - Use :attr:`.name` instead. - .. todo:: Remove! - - The name (e.g. 'isinstance') as a string. - """ - warnings.warn("Use name instead.", DeprecationWarning) - return unicode(self.name) - - @property - def module(self): - """ - .. deprecated:: 0.8.0 - Use :attr:`.module_name` for the module name. - .. todo:: Remove! - """ - return self._executable.get_parent_until() - - def __repr__(self): - return '<%s: %s index %s>' % (type(self).__name__, self._name, - self.index) - - -class _Param(Definition): - """ - Just here for backwards compatibility. - """ - def get_code(self): - """ - .. deprecated:: 0.8.0 - Use :attr:`.description` and :attr:`.name` instead. - .. todo:: Remove! - - A function to get the whole code of the param. - """ - warnings.warn("Use description instead.", DeprecationWarning) - return self.description - - -class _Help(object): - """ - Temporary implementation, will be used as `Script.help() or something in - the future. - """ - def __init__(self, definition): - self._name = definition - - def full(self): - try: - return self._name.doc - except AttributeError: - return self.raw() - - def raw(self): - """ - The raw docstring ``__doc__`` for any object. - - See :attr:`doc` for example. - """ - try: - return self._name.raw_doc - except AttributeError: - return '' diff --git a/pythonFiles/preview/jedi/api/completion.py b/pythonFiles/preview/jedi/api/completion.py deleted file mode 100644 index cc362dacbfa8..000000000000 --- a/pythonFiles/preview/jedi/api/completion.py +++ /dev/null @@ -1,256 +0,0 @@ -from itertools import chain - -from jedi.parser import token -from jedi.parser import tree -from jedi import debug -from jedi import settings -from jedi.api import classes -from jedi.api import helpers -from jedi.evaluate import imports -from jedi.api import keywords -from jedi.evaluate import compiled -from jedi.evaluate.helpers import call_of_leaf -from jedi.evaluate.finder import global_names_dict_generator, filter_definition_names - - -def get_call_signature_param_names(call_signatures): - # add named params - for call_sig in call_signatures: - # Allow protected access, because it's a public API. - module = call_sig._name.get_parent_until() - # Compiled modules typically don't allow keyword arguments. - if not isinstance(module, compiled.CompiledObject): - for p in call_sig.params: - # Allow access on _definition here, because it's a - # public API and we don't want to make the internal - # Name object public. - if p._definition.stars == 0: # no *args/**kwargs - yield p._name - - -def filter_names(evaluator, completion_names, stack, like_name): - comp_dct = {} - for name in set(completion_names): - if settings.case_insensitive_completion \ - and str(name).lower().startswith(like_name.lower()) \ - or str(name).startswith(like_name): - - if isinstance(name.parent, (tree.Function, tree.Class)): - # TODO I think this is a hack. It should be an - # er.Function/er.Class before that. - name = evaluator.wrap(name.parent).name - new = classes.Completion( - evaluator, - name, - stack, - len(like_name) - ) - k = (new.name, new.complete) # key - if k in comp_dct and settings.no_completion_duplicates: - comp_dct[k]._same_name_completions.append(new) - else: - comp_dct[k] = new - yield new - - -def get_user_scope(module, position): - """ - Returns the scope in which the user resides. This includes flows. - """ - user_stmt = module.get_statement_for_position(position) - if user_stmt is None: - def scan(scope): - for s in scope.children: - if s.start_pos <= position <= s.end_pos: - if isinstance(s, (tree.Scope, tree.Flow)): - return scan(s) or s - elif s.type in ('suite', 'decorated'): - return scan(s) - return None - - return scan(module) or module - else: - return user_stmt.get_parent_scope(include_flows=True) - - -class Completion: - def __init__(self, evaluator, module, code_lines, position, call_signatures_method): - self._evaluator = evaluator - self._module = evaluator.wrap(module) - self._code_lines = code_lines - - # The first step of completions is to get the name - self._like_name = helpers.get_on_completion_name(module, code_lines, position) - # The actual cursor position is not what we need to calculate - # everything. We want the start of the name we're on. - self._position = position[0], position[1] - len(self._like_name) - self._call_signatures_method = call_signatures_method - - def completions(self): - completion_names = self._get_context_completions() - - completions = filter_names(self._evaluator, completion_names, - self.stack, self._like_name) - - return sorted(completions, key=lambda x: (x.name.startswith('__'), - x.name.startswith('_'), - x.name.lower())) - - def _get_context_completions(self): - """ - Analyzes the context that a completion is made in and decides what to - return. - - Technically this works by generating a parser stack and analysing the - current stack for possible grammar nodes. - - Possible enhancements: - - global/nonlocal search global - - yield from / raise from <- could be only exceptions/generators - - In args: */**: no completion - - In params (also lambda): no completion before = - """ - - grammar = self._evaluator.grammar - - try: - self.stack = helpers.get_stack_at_position( - grammar, self._code_lines, self._module, self._position - ) - except helpers.OnErrorLeaf as e: - self.stack = None - if e.error_leaf.value == '.': - # After ErrorLeaf's that are dots, we will not do any - # completions since this probably just confuses the user. - return [] - # If we don't have a context, just use global completion. - - return self._global_completions() - - allowed_keywords, allowed_tokens = \ - helpers.get_possible_completion_types(grammar, self.stack) - - completion_names = list(self._get_keyword_completion_names(allowed_keywords)) - - if token.NAME in allowed_tokens: - # This means that we actually have to do type inference. - - symbol_names = list(self.stack.get_node_names(grammar)) - - nodes = list(self.stack.get_nodes()) - - if "import_stmt" in symbol_names: - level = 0 - only_modules = True - level, names = self._parse_dotted_names(nodes) - if "import_from" in symbol_names: - if 'import' in nodes: - only_modules = False - else: - assert "import_name" in symbol_names - - completion_names += self._get_importer_names( - names, - level, - only_modules - ) - elif nodes and nodes[-1] in ('as', 'def', 'class'): - # No completions for ``with x as foo`` and ``import x as foo``. - # Also true for defining names as a class or function. - return list(self._get_class_context_completions(is_function=True)) - elif symbol_names[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.': - dot = self._module.get_leaf_for_position(self._position) - atom_expr = call_of_leaf(dot.get_previous_leaf()) - completion_names += self._trailer_completions(atom_expr) - else: - completion_names += self._global_completions() - completion_names += self._get_class_context_completions(is_function=False) - - if 'trailer' in symbol_names: - call_signatures = self._call_signatures_method() - completion_names += get_call_signature_param_names(call_signatures) - - return completion_names - - def _get_keyword_completion_names(self, keywords_): - for k in keywords_: - yield keywords.keyword(self._evaluator, k).name - - def _global_completions(self): - scope = get_user_scope(self._module, self._position) - if not scope.is_scope(): # Might be a flow (if/while/etc). - scope = scope.get_parent_scope() - scope = self._evaluator.wrap(scope) - debug.dbg('global completion scope: %s', scope) - names_dicts = global_names_dict_generator( - self._evaluator, - scope, - self._position - ) - completion_names = [] - for names_dict, pos in names_dicts: - names = list(chain.from_iterable(names_dict.values())) - if not names: - continue - completion_names += filter_definition_names( - names, self._module.get_statement_for_position(self._position), pos - ) - return completion_names - - def _trailer_completions(self, atom_expr): - scopes = self._evaluator.eval_element(atom_expr) - completion_names = [] - debug.dbg('trailer completion scopes: %s', scopes) - for s in scopes: - names = [] - for names_dict in s.names_dicts(search_global=False): - names += chain.from_iterable(names_dict.values()) - - completion_names += filter_definition_names( - names, self._module.get_statement_for_position(self._position) - ) - return completion_names - - def _parse_dotted_names(self, nodes): - level = 0 - names = [] - for node in nodes[1:]: - if node in ('.', '...'): - if not names: - level += len(node.value) - elif node.type == 'dotted_name': - names += node.children[::2] - elif node.type == 'name': - names.append(node) - else: - break - return level, names - - def _get_importer_names(self, names, level=0, only_modules=True): - names = [str(n) for n in names] - i = imports.Importer(self._evaluator, names, self._module, level) - return i.completion_names(self._evaluator, only_modules=only_modules) - - def _get_class_context_completions(self, is_function=True): - """ - Autocomplete inherited methods when overriding in child class. - """ - leaf = self._module.get_leaf_for_position(self._position, include_prefixes=True) - cls = leaf.get_parent_until(tree.Class) - if isinstance(cls, (tree.Class, tree.Function)): - # Complete the methods that are defined in the super classes. - cls = self._evaluator.wrap(cls) - else: - return - - if cls.start_pos[1] >= leaf.start_pos[1]: - return - - names_dicts = cls.names_dicts(search_global=False, is_instance=True) - # The first dict is the dictionary of class itself. - next(names_dicts) - for names_dict in names_dicts: - for values in names_dict.values(): - for value in values: - if (value.parent.type == 'funcdef') == is_function: - yield value diff --git a/pythonFiles/preview/jedi/api/helpers.py b/pythonFiles/preview/jedi/api/helpers.py deleted file mode 100644 index 8102cc7f2e23..000000000000 --- a/pythonFiles/preview/jedi/api/helpers.py +++ /dev/null @@ -1,315 +0,0 @@ -""" -Helpers for the API -""" -import re -from collections import namedtuple - -from jedi._compatibility import u -from jedi.evaluate.helpers import call_of_leaf -from jedi import parser -from jedi.parser import tokenize -from jedi.cache import time_cache -from jedi import common - - -CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name']) - - -def sorted_definitions(defs): - # Note: `or ''` below is required because `module_path` could be - return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0)) - - -def get_on_completion_name(module, lines, position): - leaf = module.get_leaf_for_position(position) - if leaf is None or leaf.type in ('string', 'error_leaf'): - # Completions inside strings are a bit special, we need to parse the - # string. The same is true for comments and error_leafs. - line = lines[position[0] - 1] - # The first step of completions is to get the name - return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0) - elif leaf.type not in ('name', 'keyword'): - return '' - - return leaf.value[:position[1] - leaf.start_pos[1]] - - -def _get_code(code_lines, start_pos, end_pos): - # Get relevant lines. - lines = code_lines[start_pos[0] - 1:end_pos[0]] - # Remove the parts at the end of the line. - lines[-1] = lines[-1][:end_pos[1]] - # Remove first line indentation. - lines[0] = lines[0][start_pos[1]:] - return '\n'.join(lines) - - -class OnErrorLeaf(Exception): - @property - def error_leaf(self): - return self.args[0] - - -def _is_on_comment(leaf, position): - # We might be on a comment. - if leaf.type == 'endmarker': - try: - dedent = leaf.get_previous_leaf() - if dedent.type == 'dedent' and dedent.prefix: - # TODO This is needed because the fast parser uses multiple - # endmarker tokens within a file which is obviously ugly. - # This is so ugly that I'm not even commenting how it exactly - # happens, but let me tell you that I want to get rid of it. - leaf = dedent - except IndexError: - pass - - comment_lines = common.splitlines(leaf.prefix) - difference = leaf.start_pos[0] - position[0] - prefix_start_pos = leaf.get_start_pos_of_prefix() - if difference == 0: - indent = leaf.start_pos[1] - elif position[0] == prefix_start_pos[0]: - indent = prefix_start_pos[1] - else: - indent = 0 - line = comment_lines[-difference - 1][:position[1] - indent] - return '#' in line - - -def _get_code_for_stack(code_lines, module, position): - leaf = module.get_leaf_for_position(position, include_prefixes=True) - # It might happen that we're on whitespace or on a comment. This means - # that we would not get the right leaf. - if leaf.start_pos >= position: - if _is_on_comment(leaf, position): - return u('') - - # If we're not on a comment simply get the previous leaf and proceed. - try: - leaf = leaf.get_previous_leaf() - except IndexError: - return u('') # At the beginning of the file. - - is_after_newline = leaf.type == 'newline' - while leaf.type == 'newline': - try: - leaf = leaf.get_previous_leaf() - except IndexError: - return u('') - - if leaf.type in ('indent', 'dedent'): - return u('') - elif leaf.type == 'error_leaf' or leaf.type == 'string': - # Error leafs cannot be parsed, completion in strings is also - # impossible. - raise OnErrorLeaf(leaf) - else: - if leaf == ';': - user_stmt = leaf.parent - else: - user_stmt = leaf.get_definition() - if user_stmt.parent.type == 'simple_stmt': - user_stmt = user_stmt.parent - - if is_after_newline: - if user_stmt.start_pos[1] > position[1]: - # This means that it's actually a dedent and that means that we - # start without context (part of a suite). - return u('') - - # This is basically getting the relevant lines. - return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position) - - -def get_stack_at_position(grammar, code_lines, module, pos): - """ - Returns the possible node names (e.g. import_from, xor_test or yield_stmt). - """ - class EndMarkerReached(Exception): - pass - - def tokenize_without_endmarker(code): - tokens = tokenize.source_tokens(code, use_exact_op_types=True) - for token_ in tokens: - if token_.string == safeword: - raise EndMarkerReached() - else: - yield token_ - - code = _get_code_for_stack(code_lines, module, pos) - # We use a word to tell Jedi when we have reached the start of the - # completion. - # Use Z as a prefix because it's not part of a number suffix. - safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI' - # Remove as many indents from **all** code lines as possible. - code = code + safeword - - p = parser.ParserWithRecovery(grammar, code, start_parsing=False) - try: - p.parse(tokenizer=tokenize_without_endmarker(code)) - except EndMarkerReached: - return Stack(p.stack) - raise SystemError("This really shouldn't happen. There's a bug in Jedi.") - - -class Stack(list): - def get_node_names(self, grammar): - for dfa, state, (node_number, nodes) in self: - yield grammar.number2symbol[node_number] - - def get_nodes(self): - for dfa, state, (node_number, nodes) in self: - for node in nodes: - yield node - - -def get_possible_completion_types(grammar, stack): - def add_results(label_index): - try: - grammar_labels.append(inversed_tokens[label_index]) - except KeyError: - try: - keywords.append(inversed_keywords[label_index]) - except KeyError: - t, v = grammar.labels[label_index] - assert t >= 256 - # See if it's a symbol and if we're in its first set - inversed_keywords - itsdfa = grammar.dfas[t] - itsstates, itsfirst = itsdfa - for first_label_index in itsfirst.keys(): - add_results(first_label_index) - - inversed_keywords = dict((v, k) for k, v in grammar.keywords.items()) - inversed_tokens = dict((v, k) for k, v in grammar.tokens.items()) - - keywords = [] - grammar_labels = [] - - def scan_stack(index): - dfa, state, node = stack[index] - states, first = dfa - arcs = states[state] - - for label_index, new_state in arcs: - if label_index == 0: - # An accepting state, check the stack below. - scan_stack(index - 1) - else: - add_results(label_index) - - scan_stack(-1) - - return keywords, grammar_labels - - -def evaluate_goto_definition(evaluator, leaf): - if leaf.type == 'name': - # In case of a name we can just use goto_definition which does all the - # magic itself. - return evaluator.goto_definitions(leaf) - - node = None - parent = leaf.parent - if parent.type == 'atom': - node = leaf.parent - elif parent.type == 'trailer': - node = call_of_leaf(leaf) - - if node is None: - return [] - return evaluator.eval_element(node) - - -CallSignatureDetails = namedtuple( - 'CallSignatureDetails', - ['bracket_leaf', 'call_index', 'keyword_name_str'] -) - - -def _get_index_and_key(nodes, position): - """ - Returns the amount of commas and the keyword argument string. - """ - nodes_before = [c for c in nodes if c.start_pos < position] - if nodes_before[-1].type == 'arglist': - nodes_before = [c for c in nodes_before[-1].children if c.start_pos < position] - - key_str = None - - if nodes_before: - last = nodes_before[-1] - if last.type == 'argument' and last.children[1].end_pos <= position: - # Checked if the argument - key_str = last.children[0].value - elif last == '=': - key_str = nodes_before[-2].value - - return nodes_before.count(','), key_str - - -def _get_call_signature_details_from_error_node(node, position): - for index, element in reversed(list(enumerate(node.children))): - # `index > 0` means that it's a trailer and not an atom. - if element == '(' and element.end_pos <= position and index > 0: - # It's an error node, we don't want to match too much, just - # until the parentheses is enough. - children = node.children[index:] - name = element.get_previous_leaf() - if name.type == 'name' or name.parent.type in ('trailer', 'atom'): - return CallSignatureDetails( - element, - *_get_index_and_key(children, position) - ) - - -def get_call_signature_details(module, position): - leaf = module.get_leaf_for_position(position, include_prefixes=True) - if leaf == ')': - if leaf.end_pos == position: - leaf = leaf.get_next_leaf() - # Now that we know where we are in the syntax tree, we start to look at - # parents for possible function definitions. - node = leaf.parent - while node is not None: - if node.type in ('funcdef', 'classdef'): - # Don't show call signatures if there's stuff before it that just - # makes it feel strange to have a call signature. - return None - - for n in node.children[::-1]: - if n.start_pos < position and n.type == 'error_node': - result = _get_call_signature_details_from_error_node(n, position) - if result is not None: - return result - - if node.type == 'trailer' and node.children[0] == '(': - leaf = node.get_previous_leaf() - return CallSignatureDetails( - node.children[0], *_get_index_and_key(node.children, position)) - - node = node.parent - - return None - - -@time_cache("call_signatures_validity") -def cache_call_signatures(evaluator, bracket_leaf, code_lines, user_pos): - """This function calculates the cache key.""" - index = user_pos[0] - 1 - - before_cursor = code_lines[index][:user_pos[1]] - other_lines = code_lines[bracket_leaf.start_pos[0]:index] - whole = '\n'.join(other_lines + [before_cursor]) - before_bracket = re.match(r'.*\(', whole, re.DOTALL) - - module_path = bracket_leaf.get_parent_until().path - if module_path is None: - yield None # Don't cache! - else: - yield (module_path, before_bracket, bracket_leaf.start_pos) - yield evaluate_goto_definition( - evaluator, - bracket_leaf.get_previous_leaf() - ) diff --git a/pythonFiles/preview/jedi/api/interpreter.py b/pythonFiles/preview/jedi/api/interpreter.py deleted file mode 100644 index a6778a6cd65a..000000000000 --- a/pythonFiles/preview/jedi/api/interpreter.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -TODO Some parts of this module are still not well documented. -""" -import copy - -from jedi.cache import underscore_memoization -from jedi.evaluate import helpers -from jedi.evaluate.representation import ModuleWrapper -from jedi.evaluate.compiled import mixed - - -class MixedModule(object): - resets_positions = True - type = 'mixed_module' - - def __init__(self, evaluator, parser_module, namespaces): - self._evaluator = evaluator - self._namespaces = namespaces - - self._namespace_objects = [type('jedi_namespace', (), n) for n in namespaces] - self._wrapped_module = ModuleWrapper(evaluator, parser_module) - # Usually we are dealing with very small code sizes when it comes to - # interpreter modules. In this case we just copy the whole syntax tree - # to be able to modify it. - self._parser_module = copy.deepcopy(parser_module) - - for child in self._parser_module.children: - child.parent = self - - def names_dicts(self, search_global): - for names_dict in self._wrapped_module.names_dicts(search_global): - yield names_dict - - for namespace_obj in self._namespace_objects: - m = mixed.MixedObject(self._evaluator, namespace_obj, self._parser_module.name) - for names_dict in m.names_dicts(False): - yield names_dict - - def __getattr__(self, name): - return getattr(self._parser_module, name) - - -class LazyName(helpers.FakeName): - def __init__(self, evaluator, module, name, value): - super(LazyName, self).__init__(name) - self._module = module - self._evaluator = evaluator - self._value = value - self._name = name - - def is_definition(self): - return True - - @property - @underscore_memoization - def parent(self): - """ - Creating fake statements for the interpreter. - - Here we are trying to link back to Python code, if possible. This means - we try to find the python module for a name (not the builtin). - """ - return mixed.create(self._evaluator, self._value) - - @parent.setter - def parent(self, value): - """Needed because the super class tries to set parent.""" diff --git a/pythonFiles/preview/jedi/api/keywords.py b/pythonFiles/preview/jedi/api/keywords.py deleted file mode 100644 index d6a72b1db98e..000000000000 --- a/pythonFiles/preview/jedi/api/keywords.py +++ /dev/null @@ -1,132 +0,0 @@ -import pydoc -import keyword - -from jedi._compatibility import is_py3, is_py35 -from jedi import common -from jedi.evaluate.helpers import FakeName -from jedi.parser.tree import Leaf -try: - from pydoc_data import topics as pydoc_topics -except ImportError: - # Python 2 - try: - import pydoc_topics - except ImportError: - # This is for Python 3 embeddable version, which dont have - # pydoc_data module in its file python3x.zip. - pydoc_topics = None - -if is_py3: - if is_py35: - # in python 3.5 async and await are not proper keywords, but for - # completion pursposes should as as though they are - keys = keyword.kwlist + ["async", "await"] - else: - keys = keyword.kwlist -else: - keys = keyword.kwlist + ['None', 'False', 'True'] - - -def has_inappropriate_leaf_keyword(pos, module): - relevant_errors = filter( - lambda error: error.first_pos[0] == pos[0], - module.error_statement_stacks) - - for error in relevant_errors: - if error.next_token in keys: - return True - - return False - - -def completion_names(evaluator, stmt, pos, module): - keyword_list = all_keywords(evaluator) - - if not isinstance(stmt, Leaf) or has_inappropriate_leaf_keyword(pos, module): - keyword_list = filter( - lambda keyword: not keyword.only_valid_as_leaf, - keyword_list - ) - return [keyword.name for keyword in keyword_list] - - -def all_keywords(evaluator, pos=(0, 0)): - return set([Keyword(evaluator, k, pos) for k in keys]) - - -def keyword(evaluator, string, pos=(0, 0)): - if string in keys: - return Keyword(evaluator, string, pos) - else: - return None - - -def get_operator(evaluator, string, pos): - return Keyword(evaluator, string, pos) - - -keywords_only_valid_as_leaf = ( - 'continue', - 'break', -) - - -class Keyword(object): - type = 'completion_keyword' - - def __init__(self, evaluator, name, pos): - self.name = FakeName(name, self, pos) - self.start_pos = pos - self.parent = evaluator.BUILTINS - - def get_parent_until(self): - return self.parent - - @property - def only_valid_as_leaf(self): - return self.name.value in keywords_only_valid_as_leaf - - @property - def names(self): - """ For a `parsing.Name` like comparision """ - return [self.name] - - @property - def docstr(self): - return imitate_pydoc(self.name) - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self.name) - - -def imitate_pydoc(string): - """ - It's not possible to get the pydoc's without starting the annoying pager - stuff. - """ - if pydoc_topics is None: - return '' - - # str needed because of possible unicode stuff in py2k (pydoc doesn't work - # with unicode strings) - string = str(string) - h = pydoc.help - with common.ignored(KeyError): - # try to access symbols - string = h.symbols[string] - string, _, related = string.partition(' ') - - get_target = lambda s: h.topics.get(s, h.keywords.get(s)) - while isinstance(string, str): - string = get_target(string) - - try: - # is a tuple now - label, related = string - except TypeError: - return '' - - try: - return pydoc_topics.topics[label] if pydoc_topics else '' - except KeyError: - return '' diff --git a/pythonFiles/preview/jedi/api/usages.py b/pythonFiles/preview/jedi/api/usages.py deleted file mode 100644 index ecb885639032..000000000000 --- a/pythonFiles/preview/jedi/api/usages.py +++ /dev/null @@ -1,49 +0,0 @@ -from jedi._compatibility import unicode -from jedi.api import classes -from jedi.parser import tree -from jedi.evaluate import imports - - -def usages(evaluator, definition_names, mods): - """ - :param definitions: list of Name - """ - def compare_array(definitions): - """ `definitions` are being compared by module/start_pos, because - sometimes the id's of the objects change (e.g. executions). - """ - result = [] - for d in definitions: - module = d.get_parent_until() - result.append((module, d.start_pos)) - return result - - search_name = unicode(list(definition_names)[0]) - compare_definitions = compare_array(definition_names) - mods |= set([d.get_parent_until() for d in definition_names]) - definitions = [] - for m in imports.get_modules_containing_name(evaluator, mods, search_name): - try: - check_names = m.used_names[search_name] - except KeyError: - continue - for name in check_names: - - result = evaluator.goto(name) - if [c for c in compare_array(result) if c in compare_definitions]: - definitions.append(classes.Definition(evaluator, name)) - # Previous definitions might be imports, so include them - # (because goto might return that import name). - compare_definitions += compare_array([name]) - return definitions - - -def usages_add_import_modules(evaluator, definitions): - """ Adds the modules of the imports """ - new = set() - for d in definitions: - imp_or_stmt = d.get_definition() - if isinstance(imp_or_stmt, tree.Import): - s = imports.ImportWrapper(evaluator, d) - new |= set(s.follow(is_goto=True)) - return set(definitions) | new diff --git a/pythonFiles/preview/jedi/cache.py b/pythonFiles/preview/jedi/cache.py deleted file mode 100644 index 8dc82544a87b..000000000000 --- a/pythonFiles/preview/jedi/cache.py +++ /dev/null @@ -1,127 +0,0 @@ -""" -This caching is very important for speed and memory optimizations. There's -nothing really spectacular, just some decorators. The following cache types are -available: - -- module caching (`load_parser` and `save_parser`), which uses pickle and is - really important to assure low load times of modules like ``numpy``. -- ``time_cache`` can be used to cache something for just a limited time span, - which can be useful if there's user interaction and the user cannot react - faster than a certain time. - -This module is one of the reasons why |jedi| is not thread-safe. As you can see -there are global variables, which are holding the cache information. Some of -these variables are being cleaned after every API usage. -""" -import time - -from jedi import settings -from jedi.parser.utils import parser_cache -from jedi.parser.utils import underscore_memoization - -_time_caches = {} - - -def clear_time_caches(delete_all=False): - """ Jedi caches many things, that should be completed after each completion - finishes. - - :param delete_all: Deletes also the cache that is normally not deleted, - like parser cache, which is important for faster parsing. - """ - global _time_caches - - if delete_all: - for cache in _time_caches.values(): - cache.clear() - parser_cache.clear() - else: - # normally just kill the expired entries, not all - for tc in _time_caches.values(): - # check time_cache for expired entries - for key, (t, value) in list(tc.items()): - if t < time.time(): - # delete expired entries - del tc[key] - - -def time_cache(time_add_setting): - """ - This decorator works as follows: Call it with a setting and after that - use the function with a callable that returns the key. - But: This function is only called if the key is not available. After a - certain amount of time (`time_add_setting`) the cache is invalid. - - If the given key is None, the function will not be cached. - """ - def _temp(key_func): - dct = {} - _time_caches[time_add_setting] = dct - - def wrapper(*args, **kwargs): - generator = key_func(*args, **kwargs) - key = next(generator) - try: - expiry, value = dct[key] - if expiry > time.time(): - return value - except KeyError: - pass - - value = next(generator) - time_add = getattr(settings, time_add_setting) - if key is not None: - dct[key] = time.time() + time_add, value - return value - return wrapper - return _temp - - -def memoize_method(method): - """A normal memoize function.""" - def wrapper(self, *args, **kwargs): - dct = self.__dict__.setdefault('_memoize_method_dct', {}) - key = (args, frozenset(kwargs.items())) - try: - return dct[key] - except KeyError: - result = method(self, *args, **kwargs) - dct[key] = result - return result - return wrapper - - -def cache_star_import(func): - @time_cache("star_import_cache_validity") - def wrapper(self): - yield self.base # The cache key - yield func(self) - return wrapper - - -def _invalidate_star_import_cache_module(module, only_main=False): - """ Important if some new modules are being reparsed """ - try: - t, modules = _time_caches['star_import_cache_validity'][module] - except KeyError: - pass - else: - del _time_caches['star_import_cache_validity'][module] - - # This stuff was part of load_parser. However since we're most likely - # not going to use star import caching anymore, just ignore it. - #else: - # In case there is already a module cached and this module - # has to be reparsed, we also need to invalidate the import - # caches. - # _invalidate_star_import_cache_module(parser_cache_item.parser.module) - - -def invalidate_star_import_cache(path): - """On success returns True.""" - try: - parser_cache_item = parser_cache[path] - except KeyError: - pass - else: - _invalidate_star_import_cache_module(parser_cache_item.parser.module) diff --git a/pythonFiles/preview/jedi/common.py b/pythonFiles/preview/jedi/common.py deleted file mode 100644 index ae10f423f393..000000000000 --- a/pythonFiles/preview/jedi/common.py +++ /dev/null @@ -1,169 +0,0 @@ -""" A universal module with functions / classes without dependencies. """ -import sys -import contextlib -import functools -import re -from itertools import chain -from ast import literal_eval - -from jedi._compatibility import unicode, reraise -from jedi import settings - - -class UncaughtAttributeError(Exception): - """ - Important, because `__getattr__` and `hasattr` catch AttributeErrors - implicitly. This is really evil (mainly because of `__getattr__`). - `hasattr` in Python 2 is even more evil, because it catches ALL exceptions. - Therefore this class originally had to be derived from `BaseException` - instead of `Exception`. But because I removed relevant `hasattr` from - the code base, we can now switch back to `Exception`. - - :param base: return values of sys.exc_info(). - """ - - -def safe_property(func): - return property(reraise_uncaught(func)) - - -def reraise_uncaught(func): - """ - Re-throw uncaught `AttributeError`. - - Usage: Put ``@rethrow_uncaught`` in front of the function - which does **not** suppose to raise `AttributeError`. - - AttributeError is easily get caught by `hasattr` and another - ``except AttributeError`` clause. This becomes problem when you use - a lot of "dynamic" attributes (e.g., using ``@property``) because you - can't distinguish if the property does not exist for real or some code - inside of the "dynamic" attribute through that error. In a well - written code, such error should not exist but getting there is very - difficult. This decorator is to help us getting there by changing - `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. - This helps us noticing bugs earlier and facilitates debugging. - - .. note:: Treating StopIteration here is easy. - Add that feature when needed. - """ - @functools.wraps(func) - def wrapper(*args, **kwds): - try: - return func(*args, **kwds) - except AttributeError: - exc_info = sys.exc_info() - reraise(UncaughtAttributeError(exc_info[1]), exc_info[2]) - return wrapper - - -class PushBackIterator(object): - def __init__(self, iterator): - self.pushes = [] - self.iterator = iterator - self.current = None - - def push_back(self, value): - self.pushes.append(value) - - def __iter__(self): - return self - - def next(self): - """ Python 2 Compatibility """ - return self.__next__() - - def __next__(self): - if self.pushes: - self.current = self.pushes.pop() - else: - self.current = next(self.iterator) - return self.current - - -@contextlib.contextmanager -def scale_speed_settings(factor): - a = settings.max_executions - b = settings.max_until_execution_unique - settings.max_executions *= factor - settings.max_until_execution_unique *= factor - try: - yield - finally: - settings.max_executions = a - settings.max_until_execution_unique = b - - -def indent_block(text, indention=' '): - """This function indents a text block with a default of four spaces.""" - temp = '' - while text and text[-1] == '\n': - temp += text[-1] - text = text[:-1] - lines = text.split('\n') - return '\n'.join(map(lambda s: indention + s, lines)) + temp - - -@contextlib.contextmanager -def ignored(*exceptions): - """ - Context manager that ignores all of the specified exceptions. This will - be in the standard library starting with Python 3.4. - """ - try: - yield - except exceptions: - pass - - -def source_to_unicode(source, encoding=None): - def detect_encoding(): - """ - For the implementation of encoding definitions in Python, look at: - - http://www.python.org/dev/peps/pep-0263/ - - http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations - """ - byte_mark = literal_eval(r"b'\xef\xbb\xbf'") - if source.startswith(byte_mark): - # UTF-8 byte-order mark - return 'utf-8' - - first_two_lines = re.match(br'(?:[^\n]*\n){0,2}', source).group(0) - possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)", - first_two_lines) - if possible_encoding: - return possible_encoding.group(1) - else: - # the default if nothing else has been set -> PEP 263 - return encoding if encoding is not None else 'utf-8' - - if isinstance(source, unicode): - # only cast str/bytes - return source - - encoding = detect_encoding() - if not isinstance(encoding, unicode): - encoding = unicode(encoding, 'utf-8', 'replace') - # cast to unicode by default - return unicode(source, encoding, 'replace') - - -def splitlines(string): - """ - A splitlines for Python code. In contrast to Python's ``str.splitlines``, - looks at form feeds and other special characters as normal text. Just - splits ``\n`` and ``\r\n``. - Also different: Returns ``['']`` for an empty string input. - """ - return re.split('\n|\r\n', string) - - -def unite(iterable): - """Turns a two dimensional array into a one dimensional.""" - return set(chain.from_iterable(iterable)) - - -def to_list(func): - def wrapper(*args, **kwargs): - return list(func(*args, **kwargs)) - return wrapper diff --git a/pythonFiles/preview/jedi/debug.py b/pythonFiles/preview/jedi/debug.py deleted file mode 100644 index 692f9c82842b..000000000000 --- a/pythonFiles/preview/jedi/debug.py +++ /dev/null @@ -1,133 +0,0 @@ -from jedi._compatibility import encoding, is_py3, u -import inspect -import os -import time - -def _lazy_colorama_init(): - """ - Lazily init colorama if necessary, not to screw up stdout is debug not - enabled. - - This version of the function does nothing. - """ - pass - -_inited=False - -try: - if os.name == 'nt': - # Does not work on Windows, as pyreadline and colorama interfere - raise ImportError - else: - # Use colorama for nicer console output. - from colorama import Fore, init - from colorama import initialise - def _lazy_colorama_init(): - """ - Lazily init colorama if necessary, not to screw up stdout is - debug not enabled. - - This version of the function does init colorama. - """ - global _inited - if not _inited: - # pytest resets the stream at the end - causes troubles. Since - # after every output the stream is reset automatically we don't - # need this. - initialise.atexit_done = True - try: - init() - except Exception: - # Colorama fails with initializing under vim and is buggy in - # version 0.3.6. - pass - _inited = True - -except ImportError: - class Fore(object): - RED = '' - GREEN = '' - YELLOW = '' - MAGENTA = '' - RESET = '' - -NOTICE = object() -WARNING = object() -SPEED = object() - -enable_speed = False -enable_warning = False -enable_notice = False - -# callback, interface: level, str -debug_function = None -ignored_modules = ['jedi.parser'] -_debug_indent = 0 -_start_time = time.time() - - -def reset_time(): - global _start_time, _debug_indent - _start_time = time.time() - _debug_indent = 0 - - -def increase_indent(func): - """Decorator for makin """ - def wrapper(*args, **kwargs): - global _debug_indent - _debug_indent += 1 - try: - return func(*args, **kwargs) - finally: - _debug_indent -= 1 - return wrapper - - -def dbg(message, *args, **kwargs): - """ Looks at the stack, to see if a debug message should be printed. """ - # Python 2 compatibility, because it doesn't understand default args - color = kwargs.pop('color', 'GREEN') - assert color - - if debug_function and enable_notice: - frm = inspect.stack()[1] - mod = inspect.getmodule(frm[0]) - if not (mod.__name__ in ignored_modules): - i = ' ' * _debug_indent - _lazy_colorama_init() - debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args)) - - -def warning(message, *args, **kwargs): - format = kwargs.pop('format', True) - assert not kwargs - - if debug_function and enable_warning: - i = ' ' * _debug_indent - if format: - message = message % tuple(u(repr(a)) for a in args) - debug_function('RED', i + 'warning: ' + message) - - -def speed(name): - if debug_function and enable_speed: - now = time.time() - i = ' ' * _debug_indent - debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time)) - - -def print_to_stdout(color, str_out): - """ - The default debug function that prints to standard out. - - :param str color: A string that is an attribute of ``colorama.Fore``. - """ - col = getattr(Fore, color) - _lazy_colorama_init() - if not is_py3: - str_out = str_out.encode(encoding, 'replace') - print(col + str_out + Fore.RESET) - - -# debug_function = print_to_stdout diff --git a/pythonFiles/preview/jedi/evaluate/__init__.py b/pythonFiles/preview/jedi/evaluate/__init__.py deleted file mode 100644 index fe2c70237afd..000000000000 --- a/pythonFiles/preview/jedi/evaluate/__init__.py +++ /dev/null @@ -1,536 +0,0 @@ -""" -Evaluation of Python code in |jedi| is based on three assumptions: - -* The code uses as least side effects as possible. Jedi understands certain - list/tuple/set modifications, but there's no guarantee that Jedi detects - everything (list.append in different modules for example). -* No magic is being used: - - - metaclasses - - ``setattr()`` / ``__import__()`` - - writing to ``globals()``, ``locals()``, ``object.__dict__`` -* The programmer is not a total dick, e.g. like `this - `_ :-) - -The actual algorithm is based on a principle called lazy evaluation. If you -don't know about it, google it. That said, the typical entry point for static -analysis is calling ``eval_statement``. There's separate logic for -autocompletion in the API, the evaluator is all about evaluating an expression. - -Now you need to understand what follows after ``eval_statement``. Let's -make an example:: - - import datetime - datetime.date.toda# <-- cursor here - -First of all, this module doesn't care about completion. It really just cares -about ``datetime.date``. At the end of the procedure ``eval_statement`` will -return the ``date`` class. - -To *visualize* this (simplified): - -- ``Evaluator.eval_statement`` doesn't do much, because there's no assignment. -- ``Evaluator.eval_element`` cares for resolving the dotted path -- ``Evaluator.find_types`` searches for global definitions of datetime, which - it finds in the definition of an import, by scanning the syntax tree. -- Using the import logic, the datetime module is found. -- Now ``find_types`` is called again by ``eval_element`` to find ``date`` - inside the datetime module. - -Now what would happen if we wanted ``datetime.date.foo.bar``? Two more -calls to ``find_types``. However the second call would be ignored, because the -first one would return nothing (there's no foo attribute in ``date``). - -What if the import would contain another ``ExprStmt`` like this:: - - from foo import bar - Date = bar.baz - -Well... You get it. Just another ``eval_statement`` recursion. It's really -easy. Python can obviously get way more complicated then this. To understand -tuple assignments, list comprehensions and everything else, a lot more code had -to be written. - -Jedi has been tested very well, so you can just start modifying code. It's best -to write your own test first for your "new" feature. Don't be scared of -breaking stuff. As long as the tests pass, you're most likely to be fine. - -I need to mention now that lazy evaluation is really good because it -only *evaluates* what needs to be *evaluated*. All the statements and modules -that are not used are just being ignored. -""" - -import copy -import sys -from itertools import chain - -from jedi.parser import tree -from jedi import debug -from jedi.evaluate import representation as er -from jedi.evaluate import imports -from jedi.evaluate import recursion -from jedi.evaluate import iterable -from jedi.evaluate.cache import memoize_default -from jedi.evaluate import stdlib -from jedi.evaluate import finder -from jedi.evaluate import compiled -from jedi.evaluate import precedence -from jedi.evaluate import param -from jedi.evaluate import helpers - - -class Evaluator(object): - def __init__(self, grammar, sys_path=None): - self.grammar = grammar - self.memoize_cache = {} # for memoize decorators - # To memorize modules -> equals `sys.modules`. - self.modules = {} # like `sys.modules`. - self.compiled_cache = {} # see `evaluate.compiled.create()` - self.mixed_cache = {} # see `evaluate.compiled.mixed.create()` - self.analysis = [] - self.predefined_if_name_dict_dict = {} - self.dynamic_params_depth = 0 - self.is_analysis = False - - if sys_path is None: - sys_path = sys.path - self.sys_path = copy.copy(sys_path) - try: - self.sys_path.remove('') - except ValueError: - pass - - self.reset_recursion_limitations() - - # Constants - self.BUILTINS = compiled.get_special_object(self, 'BUILTINS') - - def reset_recursion_limitations(self): - self.recursion_detector = recursion.RecursionDetector(self) - self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self) - - def wrap(self, element): - if isinstance(element, (er.Wrapper, er.InstanceElement, - er.ModuleWrapper, er.FunctionExecution, er.Instance, compiled.CompiledObject)) or element is None: - # TODO this is so ugly, please refactor. - return element - - if element.type == 'classdef': - return er.Class(self, element) - elif element.type == 'funcdef': - return er.Function(self, element) - elif element.type == 'lambda': - return er.LambdaWrapper(self, element) - elif element.type == 'file_input': - return er.ModuleWrapper(self, element) - else: - return element - - def find_types(self, scope, name_str, position=None, search_global=False, - is_goto=False): - """ - This is the search function. The most important part to debug. - `remove_statements` and `filter_statements` really are the core part of - this completion. - - :param position: Position of the last statement -> tuple of line, column - :return: List of Names. Their parents are the types. - """ - f = finder.NameFinder(self, scope, name_str, position) - scopes = f.scopes(search_global) - if is_goto: - return f.filter_name(scopes) - return f.find(scopes, attribute_lookup=not search_global) - - #@memoize_default(default=[], evaluator_is_first_arg=True) - #@recursion.recursion_decorator - @debug.increase_indent - def eval_statement(self, stmt, seek_name=None): - """ - The starting point of the completion. A statement always owns a call - list, which are the calls, that a statement does. In case multiple - names are defined in the statement, `seek_name` returns the result for - this name. - - :param stmt: A `tree.ExprStmt`. - """ - debug.dbg('eval_statement %s (%s)', stmt, seek_name) - rhs = stmt.get_rhs() - types = self.eval_element(rhs) - - if seek_name: - types = finder.check_tuple_assignments(self, types, seek_name) - - first_operation = stmt.first_operation() - if first_operation not in ('=', None) and not isinstance(stmt, er.InstanceElement) and first_operation.type == 'operator': # TODO don't check for this. - # `=` is always the last character in aug assignments -> -1 - operator = copy.copy(first_operation) - operator.value = operator.value[:-1] - name = str(stmt.get_defined_names()[0]) - parent = self.wrap(stmt.get_parent_scope()) - left = self.find_types(parent, name, stmt.start_pos, search_global=True) - - for_stmt = stmt.get_parent_until(tree.ForStmt) - if isinstance(for_stmt, tree.ForStmt) and types \ - and for_stmt.defines_one_name(): - # Iterate through result and add the values, that's possible - # only in for loops without clutter, because they are - # predictable. Also only do it, if the variable is not a tuple. - node = for_stmt.get_input_node() - for_iterables = self.eval_element(node) - ordered = list(iterable.py__iter__(self, for_iterables, node)) - - for index_types in ordered: - dct = {str(for_stmt.children[1]): index_types} - self.predefined_if_name_dict_dict[for_stmt] = dct - t = self.eval_element(rhs) - left = precedence.calculate(self, left, operator, t) - types = left - if ordered: - # If there are no for entries, we cannot iterate and the - # types are defined by += entries. Therefore the for loop - # is never called. - del self.predefined_if_name_dict_dict[for_stmt] - else: - types = precedence.calculate(self, left, operator, types) - debug.dbg('eval_statement result %s', types) - return types - - def eval_element(self, element): - if isinstance(element, iterable.AlreadyEvaluated): - return set(element) - elif isinstance(element, iterable.MergedNodes): - return iterable.unite(self.eval_element(e) for e in element) - - if_stmt = element.get_parent_until((tree.IfStmt, tree.ForStmt, tree.IsScope)) - predefined_if_name_dict = self.predefined_if_name_dict_dict.get(if_stmt) - if predefined_if_name_dict is None and isinstance(if_stmt, tree.IfStmt): - if_stmt_test = if_stmt.children[1] - name_dicts = [{}] - # If we already did a check, we don't want to do it again -> If - # predefined_if_name_dict_dict is filled, we stop. - # We don't want to check the if stmt itself, it's just about - # the content. - if element.start_pos > if_stmt_test.end_pos: - # Now we need to check if the names in the if_stmt match the - # names in the suite. - if_names = helpers.get_names_of_node(if_stmt_test) - element_names = helpers.get_names_of_node(element) - str_element_names = [str(e) for e in element_names] - if any(str(i) in str_element_names for i in if_names): - for if_name in if_names: - definitions = self.goto_definitions(if_name) - # Every name that has multiple different definitions - # causes the complexity to rise. The complexity should - # never fall below 1. - if len(definitions) > 1: - if len(name_dicts) * len(definitions) > 16: - debug.dbg('Too many options for if branch evaluation %s.', if_stmt) - # There's only a certain amount of branches - # Jedi can evaluate, otherwise it will take to - # long. - name_dicts = [{}] - break - - original_name_dicts = list(name_dicts) - name_dicts = [] - for definition in definitions: - new_name_dicts = list(original_name_dicts) - for i, name_dict in enumerate(new_name_dicts): - new_name_dicts[i] = name_dict.copy() - new_name_dicts[i][str(if_name)] = [definition] - - name_dicts += new_name_dicts - else: - for name_dict in name_dicts: - name_dict[str(if_name)] = definitions - if len(name_dicts) > 1: - result = set() - for name_dict in name_dicts: - self.predefined_if_name_dict_dict[if_stmt] = name_dict - try: - result |= self._eval_element_not_cached(element) - finally: - del self.predefined_if_name_dict_dict[if_stmt] - return result - else: - return self._eval_element_if_evaluated(element) - return self._eval_element_cached(element) - else: - if predefined_if_name_dict: - return self._eval_element_not_cached(element) - else: - return self._eval_element_if_evaluated(element) - return self._eval_element_cached(element) - - def _eval_element_if_evaluated(self, element): - """ - TODO This function is temporary: Merge with eval_element. - """ - parent = element - while parent is not None: - parent = parent.parent - predefined_if_name_dict = self.predefined_if_name_dict_dict.get(parent) - if predefined_if_name_dict is not None: - return self._eval_element_not_cached(element) - return self._eval_element_cached(element) - - @memoize_default(default=set(), evaluator_is_first_arg=True) - def _eval_element_cached(self, element): - return self._eval_element_not_cached(element) - - @debug.increase_indent - def _eval_element_not_cached(self, element): - debug.dbg('eval_element %s@%s', element, element.start_pos) - types = set() - if isinstance(element, (tree.Name, tree.Literal)) or tree.is_node(element, 'atom'): - types = self._eval_atom(element) - elif isinstance(element, tree.Keyword): - # For False/True/None - if element.value in ('False', 'True', 'None'): - types.add(compiled.builtin_from_name(self, element.value)) - # else: print e.g. could be evaluated like this in Python 2.7 - elif element.isinstance(tree.Lambda): - types = set([er.LambdaWrapper(self, element)]) - elif element.isinstance(er.LambdaWrapper): - types = set([element]) # TODO this is no real evaluation. - elif element.type == 'expr_stmt': - types = self.eval_statement(element) - elif element.type in ('power', 'atom_expr'): - types = self._eval_atom(element.children[0]) - for trailer in element.children[1:]: - if trailer == '**': # has a power operation. - right = self.eval_element(element.children[2]) - types = set(precedence.calculate(self, types, trailer, right)) - break - types = self.eval_trailer(types, trailer) - elif element.type in ('testlist_star_expr', 'testlist',): - # The implicit tuple in statements. - types = set([iterable.ImplicitTuple(self, element)]) - elif element.type in ('not_test', 'factor'): - types = self.eval_element(element.children[-1]) - for operator in element.children[:-1]: - types = set(precedence.factor_calculate(self, types, operator)) - elif element.type == 'test': - # `x if foo else y` case. - types = (self.eval_element(element.children[0]) | - self.eval_element(element.children[-1])) - elif element.type == 'operator': - # Must be an ellipsis, other operators are not evaluated. - assert element.value == '...' - types = set([compiled.create(self, Ellipsis)]) - elif element.type == 'dotted_name': - types = self._eval_atom(element.children[0]) - for next_name in element.children[2::2]: - types = set(chain.from_iterable(self.find_types(typ, next_name) - for typ in types)) - types = types - elif element.type == 'eval_input': - types = self._eval_element_not_cached(element.children[0]) - elif element.type == 'annassign': - types = self.eval_element(element.children[1]) - else: - types = precedence.calculate_children(self, element.children) - debug.dbg('eval_element result %s', types) - return types - - def _eval_atom(self, atom): - """ - Basically to process ``atom`` nodes. The parser sometimes doesn't - generate the node (because it has just one child). In that case an atom - might be a name or a literal as well. - """ - if isinstance(atom, tree.Name): - # This is the first global lookup. - stmt = atom.get_definition() - scope = stmt.get_parent_until(tree.IsScope, include_current=True) - if isinstance(scope, (tree.Function, er.FunctionExecution)): - # Adjust scope: If the name is not in the suite, it's a param - # default or annotation and will be resolved as part of the - # parent scope. - colon = scope.children.index(':') - if atom.start_pos < scope.children[colon + 1].start_pos: - scope = scope.get_parent_scope() - if isinstance(stmt, tree.CompFor): - stmt = stmt.get_parent_until((tree.ClassOrFunc, tree.ExprStmt)) - if stmt.type != 'expr_stmt': - # We only need to adjust the start_pos for statements, because - # there the name cannot be used. - stmt = atom - return self.find_types(scope, atom, stmt.start_pos, search_global=True) - elif isinstance(atom, tree.Literal): - return set([compiled.create(self, atom.eval())]) - else: - c = atom.children - if c[0].type == 'string': - # Will be one string. - types = self._eval_atom(c[0]) - for string in c[1:]: - right = self._eval_atom(string) - types = precedence.calculate(self, types, '+', right) - return types - # Parentheses without commas are not tuples. - elif c[0] == '(' and not len(c) == 2 \ - and not(tree.is_node(c[1], 'testlist_comp') - and len(c[1].children) > 1): - return self.eval_element(c[1]) - - try: - comp_for = c[1].children[1] - except (IndexError, AttributeError): - pass - else: - if comp_for == ':': - # Dict comprehensions have a colon at the 3rd index. - try: - comp_for = c[1].children[3] - except IndexError: - pass - - if comp_for.type == 'comp_for': - return set([iterable.Comprehension.from_atom(self, atom)]) - return set([iterable.Array(self, atom)]) - - def eval_trailer(self, types, trailer): - trailer_op, node = trailer.children[:2] - if node == ')': # `arglist` is optional. - node = () - - new_types = set() - if trailer_op == '[': - new_types |= iterable.py__getitem__(self, types, trailer) - else: - for typ in types: - debug.dbg('eval_trailer: %s in scope %s', trailer, typ) - if trailer_op == '.': - new_types |= self.find_types(typ, node) - elif trailer_op == '(': - new_types |= self.execute(typ, node, trailer) - return new_types - - def execute_evaluated(self, obj, *args): - """ - Execute a function with already executed arguments. - """ - args = [iterable.AlreadyEvaluated([arg]) for arg in args] - return self.execute(obj, args) - - @debug.increase_indent - def execute(self, obj, arguments=(), trailer=None): - if not isinstance(arguments, param.Arguments): - arguments = param.Arguments(self, arguments, trailer) - - if self.is_analysis: - arguments.eval_all() - - if obj.isinstance(er.Function): - obj = obj.get_decorated_func() - - debug.dbg('execute: %s %s', obj, arguments) - try: - # Some stdlib functions like super(), namedtuple(), etc. have been - # hard-coded in Jedi to support them. - return stdlib.execute(self, obj, arguments) - except stdlib.NotInStdLib: - pass - - try: - func = obj.py__call__ - except AttributeError: - debug.warning("no execution possible %s", obj) - return set() - else: - types = func(arguments) - debug.dbg('execute result: %s in %s', types, obj) - return types - - def goto_definitions(self, name): - def_ = name.get_definition() - is_simple_name = name.parent.type not in ('power', 'trailer') - if is_simple_name: - if name.parent.type == 'classdef' and name.parent.name == name: - return [self.wrap(name.parent)] - if name.parent.type in ('file_input', 'funcdef'): - return [self.wrap(name.parent)] - if def_.type == 'expr_stmt' and name in def_.get_defined_names(): - return self.eval_statement(def_, name) - elif def_.type == 'for_stmt': - container_types = self.eval_element(def_.children[3]) - for_types = iterable.py__iter__types(self, container_types, def_.children[3]) - return finder.check_tuple_assignments(self, for_types, name) - elif def_.type in ('import_from', 'import_name'): - return imports.ImportWrapper(self, name).follow() - - call = helpers.call_of_leaf(name) - return self.eval_element(call) - - def goto(self, name): - def resolve_implicit_imports(names): - for name in names: - if isinstance(name.parent, helpers.FakeImport): - # Those are implicit imports. - s = imports.ImportWrapper(self, name) - for n in s.follow(is_goto=True): - yield n - else: - yield name - - stmt = name.get_definition() - par = name.parent - if par.type == 'argument' and par.children[1] == '=' and par.children[0] == name: - # Named param goto. - trailer = par.parent - if trailer.type == 'arglist': - trailer = trailer.parent - if trailer.type != 'classdef': - if trailer.type == 'decorator': - types = self.eval_element(trailer.children[1]) - else: - i = trailer.parent.children.index(trailer) - to_evaluate = trailer.parent.children[:i] - types = self.eval_element(to_evaluate[0]) - for trailer in to_evaluate[1:]: - types = self.eval_trailer(types, trailer) - param_names = [] - for typ in types: - try: - params = typ.params - except AttributeError: - pass - else: - param_names += [param.name for param in params - if param.name.value == name.value] - return param_names - elif isinstance(par, tree.ExprStmt) and name in par.get_defined_names(): - # Only take the parent, because if it's more complicated than just - # a name it's something you can "goto" again. - return [name] - elif isinstance(par, (tree.Param, tree.Function, tree.Class)) and par.name is name: - return [name] - elif isinstance(stmt, tree.Import): - modules = imports.ImportWrapper(self, name).follow(is_goto=True) - return list(resolve_implicit_imports(modules)) - elif par.type == 'dotted_name': # Is a decorator. - index = par.children.index(name) - if index > 0: - new_dotted = helpers.deep_ast_copy(par) - new_dotted.children[index - 1:] = [] - types = self.eval_element(new_dotted) - return resolve_implicit_imports(iterable.unite( - self.find_types(typ, name, is_goto=True) for typ in types - )) - - scope = name.get_parent_scope() - if tree.is_node(par, 'trailer') and par.children[0] == '.': - call = helpers.call_of_leaf(name, cut_own_trailer=True) - types = self.eval_element(call) - return resolve_implicit_imports(iterable.unite( - self.find_types(typ, name, is_goto=True) for typ in types - )) - else: - if stmt.type != 'expr_stmt': - # We only need to adjust the start_pos for statements, because - # there the name cannot be used. - stmt = name - return self.find_types(scope, name, stmt.start_pos, - search_global=True, is_goto=True) diff --git a/pythonFiles/preview/jedi/evaluate/analysis.py b/pythonFiles/preview/jedi/evaluate/analysis.py deleted file mode 100644 index 407bc7da464c..000000000000 --- a/pythonFiles/preview/jedi/evaluate/analysis.py +++ /dev/null @@ -1,216 +0,0 @@ -""" -Module for statical analysis. -""" -from jedi import debug -from jedi.parser import tree -from jedi.evaluate.compiled import CompiledObject - -from jedi.common import unite - - -CODES = { - 'attribute-error': (1, AttributeError, 'Potential AttributeError.'), - 'name-error': (2, NameError, 'Potential NameError.'), - 'import-error': (3, ImportError, 'Potential ImportError.'), - 'type-error-too-many-arguments': (4, TypeError, None), - 'type-error-too-few-arguments': (5, TypeError, None), - 'type-error-keyword-argument': (6, TypeError, None), - 'type-error-multiple-values': (7, TypeError, None), - 'type-error-star-star': (8, TypeError, None), - 'type-error-star': (9, TypeError, None), - 'type-error-operation': (10, TypeError, None), - 'type-error-not-iterable': (11, TypeError, None), - 'type-error-isinstance': (12, TypeError, None), - 'type-error-not-subscriptable': (13, TypeError, None), - 'value-error-too-many-values': (14, ValueError, None), - 'value-error-too-few-values': (15, ValueError, None), -} - - -class Error(object): - def __init__(self, name, module_path, start_pos, message=None): - self.path = module_path - self._start_pos = start_pos - self.name = name - if message is None: - message = CODES[self.name][2] - self.message = message - - @property - def line(self): - return self._start_pos[0] - - @property - def column(self): - return self._start_pos[1] - - @property - def code(self): - # The class name start - first = self.__class__.__name__[0] - return first + str(CODES[self.name][0]) - - def __unicode__(self): - return '%s:%s:%s: %s %s' % (self.path, self.line, self.column, - self.code, self.message) - - def __str__(self): - return self.__unicode__() - - def __eq__(self, other): - return (self.path == other.path and self.name == other.name - and self._start_pos == other._start_pos) - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return hash((self.path, self._start_pos, self.name)) - - def __repr__(self): - return '<%s %s: %s@%s,%s>' % (self.__class__.__name__, - self.name, self.path, - self._start_pos[0], self._start_pos[1]) - - -class Warning(Error): - pass - - -def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None): - from jedi.evaluate.iterable import MergedNodes - while isinstance(jedi_obj, MergedNodes): - if len(jedi_obj) != 1: - # TODO is this kosher? - return - jedi_obj = list(jedi_obj)[0] - - exception = CODES[name][1] - if _check_for_exception_catch(evaluator, jedi_obj, exception, payload): - return - - module_path = jedi_obj.get_parent_until().path - instance = typ(name, module_path, jedi_obj.start_pos, message) - debug.warning(str(instance), format=False) - evaluator.analysis.append(instance) - - -def _check_for_setattr(instance): - """ - Check if there's any setattr method inside an instance. If so, return True. - """ - module = instance.get_parent_until() - try: - stmts = module.used_names['setattr'] - except KeyError: - return False - - return any(instance.start_pos < stmt.start_pos < instance.end_pos - for stmt in stmts) - - -def add_attribute_error(evaluator, scope, name): - message = ('AttributeError: %s has no attribute %s.' % (scope, name)) - from jedi.evaluate.representation import Instance - # Check for __getattr__/__getattribute__ existance and issue a warning - # instead of an error, if that happens. - if isinstance(scope, Instance): - typ = Warning - try: - scope.get_subscope_by_name('__getattr__') - except KeyError: - try: - scope.get_subscope_by_name('__getattribute__') - except KeyError: - if not _check_for_setattr(scope): - typ = Error - else: - typ = Error - - payload = scope, name - add(evaluator, 'attribute-error', name, message, typ, payload) - - -def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None): - """ - Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and - doesn't count as an error (if equal to `exception`). - Also checks `hasattr` for AttributeErrors and uses the `payload` to compare - it. - Returns True if the exception was catched. - """ - def check_match(cls, exception): - try: - return isinstance(cls, CompiledObject) and issubclass(exception, cls.obj) - except TypeError: - return False - - def check_try_for_except(obj, exception): - # Only nodes in try - iterator = iter(obj.children) - for branch_type in iterator: - colon = next(iterator) - suite = next(iterator) - if branch_type == 'try' \ - and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos): - return False - - for node in obj.except_clauses(): - if node is None: - return True # An exception block that catches everything. - else: - except_classes = evaluator.eval_element(node) - for cls in except_classes: - from jedi.evaluate import iterable - if isinstance(cls, iterable.Array) and cls.type == 'tuple': - # multiple exceptions - for typ in unite(cls.py__iter__()): - if check_match(typ, exception): - return True - else: - if check_match(cls, exception): - return True - - def check_hasattr(node, suite): - try: - assert suite.start_pos <= jedi_obj.start_pos < suite.end_pos - assert node.type in ('power', 'atom_expr') - base = node.children[0] - assert base.type == 'name' and base.value == 'hasattr' - trailer = node.children[1] - assert trailer.type == 'trailer' - arglist = trailer.children[1] - assert arglist.type == 'arglist' - from jedi.evaluate.param import Arguments - args = list(Arguments(evaluator, arglist).unpack()) - # Arguments should be very simple - assert len(args) == 2 - - # Check name - key, values = args[1] - assert len(values) == 1 - names = list(evaluator.eval_element(values[0])) - assert len(names) == 1 and isinstance(names[0], CompiledObject) - assert names[0].obj == str(payload[1]) - - # Check objects - key, values = args[0] - assert len(values) == 1 - objects = evaluator.eval_element(values[0]) - return payload[0] in objects - except AssertionError: - return False - - obj = jedi_obj - while obj is not None and not obj.isinstance(tree.Function, tree.Class): - if obj.isinstance(tree.Flow): - # try/except catch check - if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception): - return True - # hasattr check - if exception == AttributeError and obj.isinstance(tree.IfStmt, tree.WhileStmt): - if check_hasattr(obj.children[1], obj.children[3]): - return True - obj = obj.parent - - return False diff --git a/pythonFiles/preview/jedi/evaluate/cache.py b/pythonFiles/preview/jedi/evaluate/cache.py deleted file mode 100644 index b44f812accd2..000000000000 --- a/pythonFiles/preview/jedi/evaluate/cache.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -- the popular ``memoize_default`` works like a typical memoize and returns the - default otherwise. -- ``CachedMetaClass`` uses ``memoize_default`` to do the same with classes. -""" - -import inspect - -NO_DEFAULT = object() - - -def memoize_default(default=NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False): - """ This is a typical memoization decorator, BUT there is one difference: - To prevent recursion it sets defaults. - - Preventing recursion is in this case the much bigger use than speed. I - don't think, that there is a big speed difference, but there are many cases - where recursion could happen (think about a = b; b = a). - """ - def func(function): - def wrapper(obj, *args, **kwargs): - if evaluator_is_first_arg: - cache = obj.memoize_cache - elif second_arg_is_evaluator: # needed for meta classes - cache = args[0].memoize_cache - else: - cache = obj._evaluator.memoize_cache - - try: - memo = cache[function] - except KeyError: - memo = {} - cache[function] = memo - - key = (obj, args, frozenset(kwargs.items())) - if key in memo: - return memo[key] - else: - if default is not NO_DEFAULT: - memo[key] = default - rv = function(obj, *args, **kwargs) - if inspect.isgenerator(rv): - rv = list(rv) - memo[key] = rv - return rv - return wrapper - return func - - -class CachedMetaClass(type): - """ - This is basically almost the same than the decorator above, it just caches - class initializations. Either you do it this way or with decorators, but - with decorators you lose class access (isinstance, etc). - """ - @memoize_default(None, second_arg_is_evaluator=True) - def __call__(self, *args, **kwargs): - return super(CachedMetaClass, self).__call__(*args, **kwargs) diff --git a/pythonFiles/preview/jedi/evaluate/compiled/__init__.py b/pythonFiles/preview/jedi/evaluate/compiled/__init__.py deleted file mode 100644 index f004a142a7e4..000000000000 --- a/pythonFiles/preview/jedi/evaluate/compiled/__init__.py +++ /dev/null @@ -1,544 +0,0 @@ -""" -Imitate the parser representation. -""" -import inspect -import re -import sys -import os -from functools import partial - -from jedi._compatibility import builtins as _builtins, unicode -from jedi import debug -from jedi.cache import underscore_memoization, memoize_method -from jedi.parser.tree import Param, Base, Operator, zero_position_modifier -from jedi.evaluate.helpers import FakeName -from . import fake - - -_sep = os.path.sep -if os.path.altsep is not None: - _sep += os.path.altsep -_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) -del _sep - - -class CheckAttribute(object): - """Raises an AttributeError if the attribute X isn't available.""" - def __init__(self, func): - self.func = func - # Remove the py in front of e.g. py__call__. - self.check_name = func.__name__[2:] - - def __get__(self, instance, owner): - # This might raise an AttributeError. That's wanted. - getattr(instance.obj, self.check_name) - return partial(self.func, instance) - - -class CompiledObject(Base): - # comply with the parser - start_pos = 0, 0 - path = None # modules have this attribute - set it to None. - used_names = {} # To be consistent with modules. - - def __init__(self, evaluator, obj, parent=None): - self._evaluator = evaluator - self.obj = obj - self.parent = parent - - @CheckAttribute - def py__call__(self, params): - if inspect.isclass(self.obj): - from jedi.evaluate.representation import Instance - return set([Instance(self._evaluator, self, params)]) - else: - return set(self._execute_function(params)) - - @CheckAttribute - def py__class__(self): - return create(self._evaluator, self.obj.__class__) - - @CheckAttribute - def py__mro__(self): - return tuple(create(self._evaluator, cls) for cls in self.obj.__mro__) - - @CheckAttribute - def py__bases__(self): - return tuple(create(self._evaluator, cls) for cls in self.obj.__bases__) - - def py__bool__(self): - return bool(self.obj) - - def py__file__(self): - return self.obj.__file__ - - def is_class(self): - return inspect.isclass(self.obj) - - @property - def doc(self): - return inspect.getdoc(self.obj) or '' - - @property - def params(self): - params_str, ret = self._parse_function_doc() - tokens = params_str.split(',') - if inspect.ismethoddescriptor(self.obj): - tokens.insert(0, 'self') - params = [] - for p in tokens: - parts = [FakeName(part) for part in p.strip().split('=')] - if len(parts) > 1: - parts.insert(1, Operator(zero_position_modifier, '=', (0, 0))) - params.append(Param(parts, self)) - return params - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, repr(self.obj)) - - @underscore_memoization - def _parse_function_doc(self): - if self.doc is None: - return '', '' - - return _parse_function_doc(self.doc) - - def api_type(self): - obj = self.obj - if inspect.isclass(obj): - return 'class' - elif inspect.ismodule(obj): - return 'module' - elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \ - or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj): - return 'function' - # Everything else... - return 'instance' - - @property - def type(self): - """Imitate the tree.Node.type values.""" - cls = self._get_class() - if inspect.isclass(cls): - return 'classdef' - elif inspect.ismodule(cls): - return 'file_input' - elif inspect.isbuiltin(cls) or inspect.ismethod(cls) or \ - inspect.ismethoddescriptor(cls): - return 'funcdef' - - @underscore_memoization - def _cls(self): - """ - We used to limit the lookups for instantiated objects like list(), but - this is not the case anymore. Python itself - """ - # Ensures that a CompiledObject is returned that is not an instance (like list) - return self - - def _get_class(self): - if not fake.is_class_instance(self.obj) or \ - inspect.ismethoddescriptor(self.obj): # slots - return self.obj - - try: - return self.obj.__class__ - except AttributeError: - # happens with numpy.core.umath._UFUNC_API (you get it - # automatically by doing `import numpy`. - return type - - @property - def names_dict(self): - # For compatibility with `representation.Class`. - return self.names_dicts(False)[0] - - def names_dicts(self, search_global, is_instance=False): - return self._names_dict_ensure_one_dict(is_instance) - - @memoize_method - def _names_dict_ensure_one_dict(self, is_instance): - """ - search_global shouldn't change the fact that there's one dict, this way - there's only one `object`. - """ - return [LazyNamesDict(self._evaluator, self, is_instance)] - - def get_subscope_by_name(self, name): - if name in dir(self.obj): - return CompiledName(self._evaluator, self, name).parent - else: - raise KeyError("CompiledObject doesn't have an attribute '%s'." % name) - - @CheckAttribute - def py__getitem__(self, index): - if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): - # Get rid of side effects, we won't call custom `__getitem__`s. - return set() - - return set([create(self._evaluator, self.obj[index])]) - - @CheckAttribute - def py__iter__(self): - if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): - # Get rid of side effects, we won't call custom `__getitem__`s. - return - - for part in self.obj: - yield set([create(self._evaluator, part)]) - - @property - def name(self): - try: - name = self._get_class().__name__ - except AttributeError: - name = repr(self.obj) - return FakeName(name, self) - - def _execute_function(self, params): - if self.type != 'funcdef': - return - - for name in self._parse_function_doc()[1].split(): - try: - bltn_obj = getattr(_builtins, name) - except AttributeError: - continue - else: - if bltn_obj is None: - # We want to evaluate everything except None. - # TODO do we? - continue - bltn_obj = create(self._evaluator, bltn_obj) - for result in self._evaluator.execute(bltn_obj, params): - yield result - - @property - @underscore_memoization - def subscopes(self): - """ - Returns only the faked scopes - the other ones are not important for - internal analysis. - """ - module = self.get_parent_until() - faked_subscopes = [] - for name in dir(self.obj): - try: - faked_subscopes.append( - fake.get_faked(module.obj, self.obj, parent=self, name=name) - ) - except fake.FakeDoesNotExist: - pass - return faked_subscopes - - def is_scope(self): - return True - - def get_self_attributes(self): - return [] # Instance compatibility - - def get_imports(self): - return [] # Builtins don't have imports - - -class CompiledName(FakeName): - def __init__(self, evaluator, compiled_obj, name): - super(CompiledName, self).__init__(name) - self._evaluator = evaluator - self._compiled_obj = compiled_obj - self.name = name - - def __repr__(self): - try: - name = self._compiled_obj.name # __name__ is not defined all the time - except AttributeError: - name = None - return '<%s: (%s).%s>' % (type(self).__name__, name, self.name) - - def is_definition(self): - return True - - @property - @underscore_memoization - def parent(self): - module = self._compiled_obj.get_parent_until() - return _create_from_name(self._evaluator, module, self._compiled_obj, self.name) - - @parent.setter - def parent(self, value): - pass # Just ignore this, FakeName tries to overwrite the parent attribute. - - -class LazyNamesDict(object): - """ - A names_dict instance for compiled objects, resembles the parser.tree. - """ - name_class = CompiledName - - def __init__(self, evaluator, compiled_obj, is_instance=False): - self._evaluator = evaluator - self._compiled_obj = compiled_obj - self._is_instance = is_instance - - def __iter__(self): - return (v[0].value for v in self.values()) - - @memoize_method - def __getitem__(self, name): - try: - getattr(self._compiled_obj.obj, name) - except AttributeError: - raise KeyError('%s in %s not found.' % (name, self._compiled_obj)) - except Exception: - # This is a bit ugly. We're basically returning this to make - # lookups possible without having the actual attribute. However - # this makes proper completion possible. - return [FakeName(name, create(self._evaluator, None), is_definition=True)] - return [self.name_class(self._evaluator, self._compiled_obj, name)] - - def values(self): - obj = self._compiled_obj.obj - - values = [] - for name in dir(obj): - try: - values.append(self[name]) - except KeyError: - # The dir function can be wrong. - pass - - is_instance = self._is_instance or fake.is_class_instance(obj) - # ``dir`` doesn't include the type names. - if not inspect.ismodule(obj) and obj != type and not is_instance: - values += create(self._evaluator, type).names_dict.values() - return values - - -def dotted_from_fs_path(fs_path, sys_path): - """ - Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e. - compares the path with sys.path and then returns the dotted_path. If the - path is not in the sys.path, just returns None. - """ - if os.path.basename(fs_path).startswith('__init__.'): - # We are calculating the path. __init__ files are not interesting. - fs_path = os.path.dirname(fs_path) - - # prefer - # - UNIX - # /path/to/pythonX.Y/lib-dynload - # /path/to/pythonX.Y/site-packages - # - Windows - # C:\path\to\DLLs - # C:\path\to\Lib\site-packages - # over - # - UNIX - # /path/to/pythonX.Y - # - Windows - # C:\path\to\Lib - path = '' - for s in sys_path: - if (fs_path.startswith(s) and len(path) < len(s)): - path = s - - # - Window - # X:\path\to\lib-dynload/datetime.pyd => datetime - module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/') - # - Window - # Replace like X:\path\to\something/foo/bar.py - return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.') - - -def load_module(evaluator, path=None, name=None): - sys_path = evaluator.sys_path - if path is not None: - dotted_path = dotted_from_fs_path(path, sys_path=sys_path) - else: - dotted_path = name - - if dotted_path is None: - p, _, dotted_path = path.partition(os.path.sep) - sys_path.insert(0, p) - - temp, sys.path = sys.path, sys_path - try: - __import__(dotted_path) - except RuntimeError: - if 'PySide' in dotted_path or 'PyQt' in dotted_path: - # RuntimeError: the PyQt4.QtCore and PyQt5.QtCore modules both wrap - # the QObject class. - # See https://github.com/davidhalter/jedi/pull/483 - return None - raise - except ImportError: - # If a module is "corrupt" or not really a Python module or whatever. - debug.warning('Module %s not importable.', path) - return None - finally: - sys.path = temp - - # Just access the cache after import, because of #59 as well as the very - # complicated import structure of Python. - module = sys.modules[dotted_path] - - return create(evaluator, module) - - -docstr_defaults = { - 'floating point number': 'float', - 'character': 'str', - 'integer': 'int', - 'dictionary': 'dict', - 'string': 'str', -} - - -def _parse_function_doc(doc): - """ - Takes a function and returns the params and return value as a tuple. - This is nothing more than a docstring parser. - - TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None - TODO docstrings like 'tuple of integers' - """ - # parse round parentheses: def func(a, (b,c)) - try: - count = 0 - start = doc.index('(') - for i, s in enumerate(doc[start:]): - if s == '(': - count += 1 - elif s == ')': - count -= 1 - if count == 0: - end = start + i - break - param_str = doc[start + 1:end] - except (ValueError, UnboundLocalError): - # ValueError for doc.index - # UnboundLocalError for undefined end in last line - debug.dbg('no brackets found - no param') - end = 0 - param_str = '' - else: - # remove square brackets, that show an optional param ( = None) - def change_options(m): - args = m.group(1).split(',') - for i, a in enumerate(args): - if a and '=' not in a: - args[i] += '=None' - return ','.join(args) - - while True: - param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', - change_options, param_str) - if changes == 0: - break - param_str = param_str.replace('-', '_') # see: isinstance.__doc__ - - # parse return value - r = re.search('-[>-]* ', doc[end:end + 7]) - if r is None: - ret = '' - else: - index = end + r.end() - # get result type, which can contain newlines - pattern = re.compile(r'(,\n|[^\n-])+') - ret_str = pattern.match(doc, index).group(0).strip() - # New object -> object() - ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) - - ret = docstr_defaults.get(ret_str, ret_str) - - return param_str, ret - - -def _create_from_name(evaluator, module, parent, name): - try: - return fake.get_faked(module.obj, parent.obj, parent=parent, name=name) - except fake.FakeDoesNotExist: - pass - - try: - obj = getattr(parent.obj, name) - except AttributeError: - # Happens e.g. in properties of - # PyQt4.QtGui.QStyleOptionComboBox.currentText - # -> just set it to None - obj = None - return create(evaluator, obj, parent) - - -def builtin_from_name(evaluator, string): - bltn_obj = getattr(_builtins, string) - return create(evaluator, bltn_obj) - - -def _a_generator(foo): - """Used to have an object to return for generators.""" - yield 42 - yield foo - - -_SPECIAL_OBJECTS = { - 'FUNCTION_CLASS': type(load_module), - 'METHOD_CLASS': type(CompiledObject.is_class), - 'MODULE_CLASS': type(os), - 'GENERATOR_OBJECT': _a_generator(1.0), - 'BUILTINS': _builtins, -} - - -def get_special_object(evaluator, identifier): - obj = _SPECIAL_OBJECTS[identifier] - return create(evaluator, obj, parent=create(evaluator, _builtins)) - - -def compiled_objects_cache(attribute_name): - def decorator(func): - """ - This decorator caches just the ids, oopposed to caching the object itself. - Caching the id has the advantage that an object doesn't need to be - hashable. - """ - def wrapper(evaluator, obj, parent=None, module=None): - cache = getattr(evaluator, attribute_name) - # Do a very cheap form of caching here. - key = id(obj), id(parent) - try: - return cache[key][0] - except KeyError: - # TODO this whole decorator looks way too ugly and this if - # doesn't make it better. Find a more generic solution. - if parent or module: - result = func(evaluator, obj, parent, module) - else: - result = func(evaluator, obj) - # Need to cache all of them, otherwise the id could be overwritten. - cache[key] = result, obj, parent, module - return result - return wrapper - - return decorator - - -@compiled_objects_cache('compiled_cache') -def create(evaluator, obj, parent=None, module=None): - """ - A very weird interface class to this module. The more options provided the - more acurate loading compiled objects is. - """ - if inspect.ismodule(obj): - if parent is not None: - # Modules don't have parents, be careful with caching: recurse. - return create(evaluator, obj) - else: - if parent is None and obj != _builtins: - return create(evaluator, obj, create(evaluator, _builtins)) - - try: - return fake.get_faked(module and module.obj, obj, parent=parent) - except fake.FakeDoesNotExist: - pass - - return CompiledObject(evaluator, obj, parent) diff --git a/pythonFiles/preview/jedi/evaluate/compiled/fake.py b/pythonFiles/preview/jedi/evaluate/compiled/fake.py deleted file mode 100644 index 0338b962335c..000000000000 --- a/pythonFiles/preview/jedi/evaluate/compiled/fake.py +++ /dev/null @@ -1,203 +0,0 @@ -""" -Loads functions that are mixed in to the standard library. E.g. builtins are -written in C (binaries), but my autocompletion only understands Python code. By -mixing in Python code, the autocompletion should work much better for builtins. -""" - -import os -import inspect -import types - -from jedi._compatibility import is_py3, builtins, unicode, is_py34 -from jedi.parser import ParserWithRecovery, load_grammar -from jedi.parser import tree as pt -from jedi.evaluate.helpers import FakeName - -modules = {} - - -MethodDescriptorType = type(str.replace) -# These are not considered classes and access is granted even though they have -# a __class__ attribute. -NOT_CLASS_TYPES = ( - types.BuiltinFunctionType, - types.CodeType, - types.FrameType, - types.FunctionType, - types.GeneratorType, - types.GetSetDescriptorType, - types.LambdaType, - types.MemberDescriptorType, - types.MethodType, - types.ModuleType, - types.TracebackType, - MethodDescriptorType -) - -if is_py3: - NOT_CLASS_TYPES += ( - types.MappingProxyType, - types.SimpleNamespace - ) - if is_py34: - NOT_CLASS_TYPES += (types.DynamicClassAttribute,) - - -class FakeDoesNotExist(Exception): - pass - - -def _load_faked_module(module): - module_name = module.__name__ - if module_name == '__builtin__' and not is_py3: - module_name = 'builtins' - - try: - return modules[module_name] - except KeyError: - path = os.path.dirname(os.path.abspath(__file__)) - try: - with open(os.path.join(path, 'fake', module_name) + '.pym') as f: - source = f.read() - except IOError: - modules[module_name] = None - return - grammar = load_grammar(version='3.4') - module = ParserWithRecovery(grammar, unicode(source), module_name).module - modules[module_name] = module - - if module_name == 'builtins' and not is_py3: - # There are two implementations of `open` for either python 2/3. - # -> Rename the python2 version (`look at fake/builtins.pym`). - open_func = search_scope(module, 'open') - open_func.children[1] = FakeName('open_python3') - open_func = search_scope(module, 'open_python2') - open_func.children[1] = FakeName('open') - return module - - -def search_scope(scope, obj_name): - for s in scope.subscopes: - if str(s.name) == obj_name: - return s - - -def get_module(obj): - if inspect.ismodule(obj): - return obj - try: - obj = obj.__objclass__ - except AttributeError: - pass - - try: - imp_plz = obj.__module__ - except AttributeError: - # Unfortunately in some cases like `int` there's no __module__ - return builtins - else: - if imp_plz is None: - # Happens for example in `(_ for _ in []).send.__module__`. - return builtins - else: - try: - return __import__(imp_plz) - except ImportError: - # __module__ can be something arbitrary that doesn't exist. - return builtins - - -def _faked(module, obj, name): - # Crazy underscore actions to try to escape all the internal madness. - if module is None: - module = get_module(obj) - - faked_mod = _load_faked_module(module) - if faked_mod is None: - return None - - # Having the module as a `parser.representation.module`, we need to scan - # for methods. - if name is None: - if inspect.isbuiltin(obj): - return search_scope(faked_mod, obj.__name__) - elif not inspect.isclass(obj): - # object is a method or descriptor - try: - objclass = obj.__objclass__ - except AttributeError: - return None - else: - cls = search_scope(faked_mod, objclass.__name__) - if cls is None: - return None - return search_scope(cls, obj.__name__) - else: - if obj == module: - return search_scope(faked_mod, name) - else: - try: - cls_name = obj.__name__ - except AttributeError: - return None - cls = search_scope(faked_mod, cls_name) - if cls is None: - return None - return search_scope(cls, name) - - -def memoize_faked(obj): - """ - A typical memoize function that ignores issues with non hashable results. - """ - cache = obj.cache = {} - - def memoizer(*args, **kwargs): - key = (obj, args, frozenset(kwargs.items())) - try: - result = cache[key] - except TypeError: - return obj(*args, **kwargs) - except KeyError: - result = obj(*args, **kwargs) - if result is not None: - cache[key] = obj(*args, **kwargs) - return result - else: - return result - return memoizer - - -@memoize_faked -def _get_faked(module, obj, name=None): - obj = type(obj) if is_class_instance(obj) else obj - result = _faked(module, obj, name) - if result is None or isinstance(result, pt.Class): - # We're not interested in classes. What we want is functions. - raise FakeDoesNotExist - else: - # Set the docstr which was previously not set (faked modules don't - # contain it). - doc = '"""%s"""' % obj.__doc__ # TODO need escapes. - suite = result.children[-1] - string = pt.String(pt.zero_position_modifier, doc, (0, 0), '') - new_line = pt.Newline('\n', (0, 0), '') - docstr_node = pt.Node('simple_stmt', [string, new_line]) - suite.children.insert(2, docstr_node) - return result - - -def get_faked(module, obj, name=None, parent=None): - faked = _get_faked(module, obj, name) - faked.parent = parent - return faked - - -def is_class_instance(obj): - """Like inspect.* methods.""" - try: - cls = obj.__class__ - except AttributeError: - return False - else: - return cls != type and not issubclass(cls, NOT_CLASS_TYPES) diff --git a/pythonFiles/preview/jedi/evaluate/compiled/fake/_weakref.pym b/pythonFiles/preview/jedi/evaluate/compiled/fake/_weakref.pym deleted file mode 100644 index 8d21a2c4a7c6..000000000000 --- a/pythonFiles/preview/jedi/evaluate/compiled/fake/_weakref.pym +++ /dev/null @@ -1,8 +0,0 @@ -def proxy(object, callback=None): - return object - -class weakref(): - def __init__(self, object, callback=None): - self.__object = object - def __call__(self): - return self.__object diff --git a/pythonFiles/preview/jedi/evaluate/compiled/fake/builtins.pym b/pythonFiles/preview/jedi/evaluate/compiled/fake/builtins.pym deleted file mode 100644 index 1d5314bde97a..000000000000 --- a/pythonFiles/preview/jedi/evaluate/compiled/fake/builtins.pym +++ /dev/null @@ -1,262 +0,0 @@ -""" -Pure Python implementation of some builtins. -This code is not going to be executed anywhere. -These implementations are not always correct, but should work as good as -possible for the auto completion. -""" - - -def next(iterator, default=None): - if random.choice([0, 1]): - if hasattr("next"): - return iterator.next() - else: - return iterator.__next__() - else: - if default is not None: - return default - - -def iter(collection, sentinel=None): - if sentinel: - yield collection() - else: - for c in collection: - yield c - - -def range(start, stop=None, step=1): - return [0] - - -class file(): - def __iter__(self): - yield '' - def next(self): - return '' - - -class xrange(): - # Attention: this function doesn't exist in Py3k (there it is range). - def __iter__(self): - yield 1 - - def count(self): - return 1 - - def index(self): - return 1 - - -def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True): - import io - return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd) - - -def open_python2(name, mode=None, buffering=None): - return file(name, mode, buffering) - - -#-------------------------------------------------------- -# descriptors -#-------------------------------------------------------- -class property(): - def __init__(self, fget, fset=None, fdel=None, doc=None): - self.fget = fget - self.fset = fset - self.fdel = fdel - self.__doc__ = doc - - def __get__(self, obj, cls): - return self.fget(obj) - - def __set__(self, obj, value): - self.fset(obj, value) - - def __delete__(self, obj): - self.fdel(obj) - - def setter(self, func): - self.fset = func - return self - - def getter(self, func): - self.fget = func - return self - - def deleter(self, func): - self.fdel = func - return self - - -class staticmethod(): - def __init__(self, func): - self.__func = func - - def __get__(self, obj, cls): - return self.__func - - -class classmethod(): - def __init__(self, func): - self.__func = func - - def __get__(self, obj, cls): - def _method(*args, **kwargs): - return self.__func(cls, *args, **kwargs) - return _method - - -#-------------------------------------------------------- -# array stuff -#-------------------------------------------------------- -class list(): - def __init__(self, iterable=[]): - self.__iterable = [] - for i in iterable: - self.__iterable += [i] - - def __iter__(self): - for i in self.__iterable: - yield i - - def __getitem__(self, y): - return self.__iterable[y] - - def pop(self): - return self.__iterable[int()] - - -class tuple(): - def __init__(self, iterable=[]): - self.__iterable = [] - for i in iterable: - self.__iterable += [i] - - def __iter__(self): - for i in self.__iterable: - yield i - - def __getitem__(self, y): - return self.__iterable[y] - - def index(self): - return 1 - - def count(self): - return 1 - - -class set(): - def __init__(self, iterable=[]): - self.__iterable = iterable - - def __iter__(self): - for i in self.__iterable: - yield i - - def pop(self): - return list(self.__iterable)[-1] - - def copy(self): - return self - - def difference(self, other): - return self - other - - def intersection(self, other): - return self & other - - def symmetric_difference(self, other): - return self ^ other - - def union(self, other): - return self | other - - -class frozenset(): - def __init__(self, iterable=[]): - self.__iterable = iterable - - def __iter__(self): - for i in self.__iterable: - yield i - - def copy(self): - return self - - -class dict(): - def __init__(self, **elements): - self.__elements = elements - - def clear(self): - # has a strange docstr - pass - - def get(self, k, d=None): - # TODO implement - try: - #return self.__elements[k] - pass - except KeyError: - return d - - def values(self): - return self.__elements.values() - - def setdefault(self, k, d): - # TODO maybe also return the content - return d - - -class enumerate(): - def __init__(self, sequence, start=0): - self.__sequence = sequence - - def __iter__(self): - for i in self.__sequence: - yield 1, i - - def __next__(self): - return next(self.__iter__()) - - def next(self): - return next(self.__iter__()) - - -class reversed(): - def __init__(self, sequence): - self.__sequence = sequence - - def __iter__(self): - for i in self.__sequence: - yield i - - def __next__(self): - return next(self.__iter__()) - - def next(self): - return next(self.__iter__()) - - -def sorted(iterable, cmp=None, key=None, reverse=False): - return iterable - - -#-------------------------------------------------------- -# basic types -#-------------------------------------------------------- -class int(): - def __init__(self, x, base=None): - pass - - -class str(): - def __init__(self, obj): - pass - - -class type(): - def mro(): - return [object] diff --git a/pythonFiles/preview/jedi/evaluate/compiled/fake/io.pym b/pythonFiles/preview/jedi/evaluate/compiled/fake/io.pym deleted file mode 100644 index 87b02eed4321..000000000000 --- a/pythonFiles/preview/jedi/evaluate/compiled/fake/io.pym +++ /dev/null @@ -1,6 +0,0 @@ -class TextIOWrapper(): - def __next__(self): - return str() - - def __iter__(self): - yield str() diff --git a/pythonFiles/preview/jedi/evaluate/compiled/mixed.py b/pythonFiles/preview/jedi/evaluate/compiled/mixed.py deleted file mode 100644 index 69119f181f6d..000000000000 --- a/pythonFiles/preview/jedi/evaluate/compiled/mixed.py +++ /dev/null @@ -1,158 +0,0 @@ -""" -Used only for REPL Completion. -""" - -import inspect -import os - -from jedi import common -from jedi.parser.fast import FastParser -from jedi.evaluate import compiled -from jedi.cache import underscore_memoization - - -class MixedObject(object): - """ - A ``MixedObject`` is used in two ways: - - 1. It uses the default logic of ``parser.tree`` objects, - 2. except for getattr calls. The names dicts are generated in a fashion - like ``CompiledObject``. - - This combined logic makes it possible to provide more powerful REPL - completion. It allows side effects that are not noticable with the default - parser structure to still be completeable. - - The biggest difference from CompiledObject to MixedObject is that we are - generally dealing with Python code and not with C code. This will generate - fewer special cases, because we in Python you don't have the same freedoms - to modify the runtime. - """ - def __init__(self, evaluator, obj, node_name): - self._evaluator = evaluator - self.obj = obj - self.node_name = node_name - self.definition = node_name.get_definition() - - @property - def names_dict(self): - return LazyMixedNamesDict(self._evaluator, self) - - def names_dicts(self, search_global): - # TODO is this needed? - assert search_global is False - return [self.names_dict] - - def api_type(self): - mappings = { - 'expr_stmt': 'statement', - 'classdef': 'class', - 'funcdef': 'function', - 'file_input': 'module', - } - return mappings[self.definition.type] - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, repr(self.obj)) - - def __getattr__(self, name): - return getattr(self.definition, name) - - -class MixedName(compiled.CompiledName): - """ - The ``CompiledName._compiled_object`` is our MixedObject. - """ - @property - @underscore_memoization - def parent(self): - return create(self._evaluator, getattr(self._compiled_obj.obj, self.name)) - - @parent.setter - def parent(self, value): - pass # Just ignore this, Name tries to overwrite the parent attribute. - - @property - def start_pos(self): - if isinstance(self.parent, MixedObject): - return self.parent.node_name.start_pos - - # This means a start_pos that doesn't exist (compiled objects). - return (0, 0) - - -class LazyMixedNamesDict(compiled.LazyNamesDict): - name_class = MixedName - - -def parse(grammar, path): - with open(path) as f: - source = f.read() - source = common.source_to_unicode(source) - return FastParser(grammar, source, path) - - -def _load_module(evaluator, path, python_object): - module = parse(evaluator.grammar, path).module - python_module = inspect.getmodule(python_object) - - evaluator.modules[python_module.__name__] = module - return module - - -def find_syntax_node_name(evaluator, python_object): - try: - path = inspect.getsourcefile(python_object) - except TypeError: - # The type might not be known (e.g. class_with_dict.__weakref__) - return None - if path is None or not os.path.exists(path): - # The path might not exist or be e.g. . - return None - - module = _load_module(evaluator, path, python_object) - - if inspect.ismodule(python_object): - # We don't need to check names for modules, because there's not really - # a way to write a module in a module in Python (and also __name__ can - # be something like ``email.utils``). - return module - - name_str = python_object.__name__ - if name_str == '': - return None # It's too hard to find lambdas. - - names = module.used_names[name_str] - names = [n for n in names if n.is_definition()] - - try: - code = python_object.__code__ - # By using the line number of a code object we make the lookup in a - # file pretty easy. There's still a possibility of people defining - # stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people - # do so we just don't care. - line_nr = code.co_firstlineno - except AttributeError: - pass - else: - line_names = [name for name in names if name.start_pos[0] == line_nr] - # There's a chance that the object is not available anymore, because - # the code has changed in the background. - if line_names: - return line_names[-1] - - # It's really hard to actually get the right definition, here as a last - # resort we just return the last one. This chance might lead to odd - # completions at some points but will lead to mostly correct type - # inference, because people tend to define a public name in a module only - # once. - return names[-1] - - -@compiled.compiled_objects_cache('mixed_cache') -def create(evaluator, obj): - name = find_syntax_node_name(evaluator, obj) - if name is None: - return compiled.create(evaluator, obj) - else: - return MixedObject(evaluator, obj, name) diff --git a/pythonFiles/preview/jedi/evaluate/docstrings.py b/pythonFiles/preview/jedi/evaluate/docstrings.py deleted file mode 100644 index d2ab34ed10c6..000000000000 --- a/pythonFiles/preview/jedi/evaluate/docstrings.py +++ /dev/null @@ -1,204 +0,0 @@ -""" -Docstrings are another source of information for functions and classes. -:mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while -the docstring parsing is much easier. There are two different types of -docstrings that |jedi| understands: - -- `Sphinx `_ -- `Epydoc `_ - -For example, the sphinx annotation ``:type foo: str`` clearly states that the -type of ``foo`` is ``str``. - -As an addition to parameter searching, this module also provides return -annotations. -""" - -from ast import literal_eval -import re -from itertools import chain -from textwrap import dedent - -from jedi.evaluate.cache import memoize_default -from jedi.parser import ParserWithRecovery, load_grammar -from jedi.parser.tree import Class -from jedi.common import indent_block -from jedi.evaluate.iterable import Array, FakeSequence, AlreadyEvaluated - - -DOCSTRING_PARAM_PATTERNS = [ - r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx - r'\s*:param\s+(\w+)\s+%s:[^\n]+', # Sphinx param with type - r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc -] - -DOCSTRING_RETURN_PATTERNS = [ - re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx - re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc -] - -REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') - - -try: - from numpydoc.docscrape import NumpyDocString -except ImportError: - def _search_param_in_numpydocstr(docstr, param_str): - return [] -else: - def _search_param_in_numpydocstr(docstr, param_str): - """Search `docstr` (in numpydoc format) for type(-s) of `param_str`.""" - params = NumpyDocString(docstr)._parsed_data['Parameters'] - for p_name, p_type, p_descr in params: - if p_name == param_str: - m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) - if m: - p_type = m.group(1) - - if p_type.startswith('{'): - types = set(type(x).__name__ for x in literal_eval(p_type)) - return list(types) - else: - return [p_type] - return [] - - -def _search_param_in_docstr(docstr, param_str): - """ - Search `docstr` for type(-s) of `param_str`. - - >>> _search_param_in_docstr(':type param: int', 'param') - ['int'] - >>> _search_param_in_docstr('@type param: int', 'param') - ['int'] - >>> _search_param_in_docstr( - ... ':type param: :class:`threading.Thread`', 'param') - ['threading.Thread'] - >>> bool(_search_param_in_docstr('no document', 'param')) - False - >>> _search_param_in_docstr(':param int param: some description', 'param') - ['int'] - - """ - # look at #40 to see definitions of those params - patterns = [re.compile(p % re.escape(param_str)) - for p in DOCSTRING_PARAM_PATTERNS] - for pattern in patterns: - match = pattern.search(docstr) - if match: - return [_strip_rst_role(match.group(1))] - - return (_search_param_in_numpydocstr(docstr, param_str) or - []) - - -def _strip_rst_role(type_str): - """ - Strip off the part looks like a ReST role in `type_str`. - - >>> _strip_rst_role(':class:`ClassName`') # strip off :class: - 'ClassName' - >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain - 'module.Object' - >>> _strip_rst_role('ClassName') # do nothing when not ReST role - 'ClassName' - - See also: - http://sphinx-doc.org/domains.html#cross-referencing-python-objects - - """ - match = REST_ROLE_PATTERN.match(type_str) - if match: - return match.group(1) - else: - return type_str - - -def _evaluate_for_statement_string(evaluator, string, module): - code = dedent(""" - def pseudo_docstring_stuff(): - # Create a pseudo function for docstring statements. - %s - """) - if string is None: - return [] - - for element in re.findall('((?:\w+\.)*\w+)\.', string): - # Try to import module part in dotted name. - # (e.g., 'threading' in 'threading.Thread'). - string = 'import %s\n' % element + string - - # Take the default grammar here, if we load the Python 2.7 grammar here, it - # will be impossible to use `...` (Ellipsis) as a token. Docstring types - # don't need to conform with the current grammar. - p = ParserWithRecovery(load_grammar(), code % indent_block(string)) - try: - pseudo_cls = p.module.subscopes[0] - # First pick suite, then simple_stmt (-2 for DEDENT) and then the node, - # which is also not the last item, because there's a newline. - stmt = pseudo_cls.children[-1].children[-2].children[-2] - except (AttributeError, IndexError): - return [] - - # Use the module of the param. - # TODO this module is not the module of the param in case of a function - # call. In that case it's the module of the function call. - # stuffed with content from a function call. - pseudo_cls.parent = module - return list(_execute_types_in_stmt(evaluator, stmt)) - - -def _execute_types_in_stmt(evaluator, stmt): - """ - Executing all types or general elements that we find in a statement. This - doesn't include tuple, list and dict literals, because the stuff they - contain is executed. (Used as type information). - """ - definitions = evaluator.eval_element(stmt) - return chain.from_iterable(_execute_array_values(evaluator, d) for d in definitions) - - -def _execute_array_values(evaluator, array): - """ - Tuples indicate that there's not just one return value, but the listed - ones. `(str, int)` means that it returns a tuple with both types. - """ - if isinstance(array, Array): - values = [] - for types in array.py__iter__(): - objects = set(chain.from_iterable(_execute_array_values(evaluator, typ) for typ in types)) - values.append(AlreadyEvaluated(objects)) - return [FakeSequence(evaluator, values, array.type)] - else: - return evaluator.execute(array) - - -@memoize_default(None, evaluator_is_first_arg=True) -def follow_param(evaluator, param): - def eval_docstring(docstring): - return set( - [p for param_str in _search_param_in_docstr(docstring, str(param.name)) - for p in _evaluate_for_statement_string(evaluator, param_str, module)] - ) - func = param.parent_function - module = param.get_parent_until() - - types = eval_docstring(func.raw_doc) - if func.name.value == '__init__': - cls = func.get_parent_until(Class) - if cls.type == 'classdef': - types |= eval_docstring(cls.raw_doc) - - return types - - -@memoize_default(None, evaluator_is_first_arg=True) -def find_return_types(evaluator, func): - def search_return_in_docstr(code): - for p in DOCSTRING_RETURN_PATTERNS: - match = p.search(code) - if match: - return _strip_rst_role(match.group(1)) - - type_str = search_return_in_docstr(func.raw_doc) - return _evaluate_for_statement_string(evaluator, type_str, func.get_parent_until()) diff --git a/pythonFiles/preview/jedi/evaluate/dynamic.py b/pythonFiles/preview/jedi/evaluate/dynamic.py deleted file mode 100644 index d0570b59a7c5..000000000000 --- a/pythonFiles/preview/jedi/evaluate/dynamic.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -One of the really important features of |jedi| is to have an option to -understand code like this:: - - def foo(bar): - bar. # completion here - foo(1) - -There's no doubt wheter bar is an ``int`` or not, but if there's also a call -like ``foo('str')``, what would happen? Well, we'll just show both. Because -that's what a human would expect. - -It works as follows: - -- |Jedi| sees a param -- search for function calls named ``foo`` -- execute these calls and check the input. This work with a ``ParamListener``. -""" -from itertools import chain - -from jedi._compatibility import unicode -from jedi.parser import tree -from jedi import settings -from jedi import debug -from jedi.evaluate.cache import memoize_default -from jedi.evaluate import imports - - -MAX_PARAM_SEARCHES = 20 - - -class ParamListener(object): - """ - This listener is used to get the params for a function. - """ - def __init__(self): - self.param_possibilities = [] - - def execute(self, params): - self.param_possibilities += params - - -@debug.increase_indent -def search_params(evaluator, param): - """ - A dynamic search for param values. If you try to complete a type: - - >>> def func(foo): - ... foo - >>> func(1) - >>> func("") - - It is not known what the type ``foo`` without analysing the whole code. You - have to look for all calls to ``func`` to find out what ``foo`` possibly - is. - """ - if not settings.dynamic_params: - return set() - - evaluator.dynamic_params_depth += 1 - try: - func = param.get_parent_until(tree.Function) - debug.dbg('Dynamic param search for %s in %s.', param, str(func.name), color='MAGENTA') - # Compare the param names. - names = [n for n in search_function_call(evaluator, func) - if n.value == param.name.value] - # Evaluate the ExecutedParams to types. - result = set(chain.from_iterable(n.parent.eval(evaluator) for n in names)) - debug.dbg('Dynamic param result %s', result, color='MAGENTA') - return result - finally: - evaluator.dynamic_params_depth -= 1 - - -@memoize_default([], evaluator_is_first_arg=True) -def search_function_call(evaluator, func): - """ - Returns a list of param names. - """ - from jedi.evaluate import representation as er - - def get_possible_nodes(module, func_name): - try: - names = module.used_names[func_name] - except KeyError: - return - - for name in names: - bracket = name.get_next_leaf() - trailer = bracket.parent - if trailer.type == 'trailer' and bracket == '(': - yield name, trailer - - def undecorate(typ): - # We have to remove decorators, because they are not the - # "original" functions, this way we can easily compare. - # At the same time we also have to remove InstanceElements. - if typ.isinstance(er.Function, er.Instance) \ - and typ.decorates is not None: - return typ.decorates - elif isinstance(typ, er.InstanceElement): - return typ.var - else: - return typ - - current_module = func.get_parent_until() - func_name = unicode(func.name) - compare = func - if func_name == '__init__': - cls = func.get_parent_scope() - if isinstance(cls, tree.Class): - func_name = unicode(cls.name) - compare = cls - - # add the listener - listener = ParamListener() - func.listeners.add(listener) - - try: - result = [] - i = 0 - for mod in imports.get_modules_containing_name(evaluator, [current_module], func_name): - for name, trailer in get_possible_nodes(mod, func_name): - i += 1 - - # This is a simple way to stop Jedi's dynamic param recursion - # from going wild: The deeper Jedi's in the recursin, the less - # code should be evaluated. - if i * evaluator.dynamic_params_depth > MAX_PARAM_SEARCHES: - return listener.param_possibilities - - for typ in evaluator.goto_definitions(name): - undecorated = undecorate(typ) - if evaluator.wrap(compare) == undecorated: - # Only if we have the correct function we execute - # it, otherwise just ignore it. - evaluator.eval_trailer([typ], trailer) - - result = listener.param_possibilities - - # If there are results after processing a module, we're probably - # good to process. - if result: - return result - finally: - # cleanup: remove the listener; important: should not stick. - func.listeners.remove(listener) - - return set() diff --git a/pythonFiles/preview/jedi/evaluate/finder.py b/pythonFiles/preview/jedi/evaluate/finder.py deleted file mode 100644 index 2095959e93aa..000000000000 --- a/pythonFiles/preview/jedi/evaluate/finder.py +++ /dev/null @@ -1,632 +0,0 @@ -""" -Searching for names with given scope and name. This is very central in Jedi and -Python. The name resolution is quite complicated with descripter, -``__getattribute__``, ``__getattr__``, ``global``, etc. - -If you want to understand name resolution, please read the first few chapters -in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/. - -Flow checks -+++++++++++ - -Flow checks are not really mature. There's only a check for ``isinstance``. It -would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``. -Unfortunately every other thing is being ignored (e.g. a == '' would be easy to -check for -> a is a string). There's big potential in these checks. -""" -from itertools import chain - -from jedi._compatibility import unicode -from jedi.parser import tree -from jedi import debug -from jedi import common -from jedi.common import unite -from jedi import settings -from jedi.evaluate import representation as er -from jedi.evaluate import dynamic -from jedi.evaluate import compiled -from jedi.evaluate import docstrings -from jedi.evaluate import pep0484 -from jedi.evaluate import iterable -from jedi.evaluate import imports -from jedi.evaluate import analysis -from jedi.evaluate import flow_analysis -from jedi.evaluate import param -from jedi.evaluate import helpers -from jedi.evaluate.cache import memoize_default - - -def filter_after_position(names, position): - """ - Removes all names after a certain position. If position is None, just - returns the names list. - """ - if position is None: - return names - - names_new = [] - for n in names: - # Filter positions and also allow list comprehensions and lambdas. - if n.start_pos[0] is not None and n.start_pos < position \ - or isinstance(n.get_definition(), (tree.CompFor, tree.Lambda)): - names_new.append(n) - return names_new - - -def filter_definition_names(names, origin, position=None): - """ - Filter names that are actual definitions in a scope. Names that are just - used will be ignored. - """ - if not names: - return [] - - # Just calculate the scope from the first - stmt = names[0].get_definition() - scope = stmt.get_parent_scope() - - if not (isinstance(scope, er.FunctionExecution) and - isinstance(scope.base, er.LambdaWrapper)): - names = filter_after_position(names, position) - names = [name for name in names if name.is_definition()] - - # Private name mangling (compile.c) disallows access on names - # preceeded by two underscores `__` if used outside of the class. Names - # that also end with two underscores (e.g. __id__) are not affected. - for name in list(names): - if name.value.startswith('__') and not name.value.endswith('__'): - if filter_private_variable(scope, origin): - names.remove(name) - return names - - -class NameFinder(object): - def __init__(self, evaluator, scope, name_str, position=None): - self._evaluator = evaluator - # Make sure that it's not just a syntax tree node. - self.scope = evaluator.wrap(scope) - self.name_str = name_str - self.position = position - self._found_predefined_if_name = None - - @debug.increase_indent - def find(self, scopes, attribute_lookup): - """ - :params bool attribute_lookup: Tell to logic if we're accessing the - attribute or the contents of e.g. a function. - """ - # TODO rename scopes to names_dicts - - names = self.filter_name(scopes) - if self._found_predefined_if_name is not None: - return self._found_predefined_if_name - - types = self._names_to_types(names, attribute_lookup) - - if not names and not types \ - and not (isinstance(self.name_str, tree.Name) and - isinstance(self.name_str.parent.parent, tree.Param)): - if not isinstance(self.name_str, (str, unicode)): # TODO Remove? - if attribute_lookup: - analysis.add_attribute_error(self._evaluator, - self.scope, self.name_str) - else: - message = ("NameError: name '%s' is not defined." - % self.name_str) - analysis.add(self._evaluator, 'name-error', self.name_str, - message) - - debug.dbg('finder._names_to_types: %s -> %s', names, types) - return types - - def scopes(self, search_global=False): - if search_global: - return global_names_dict_generator(self._evaluator, self.scope, self.position) - else: - return ((n, None) for n in self.scope.names_dicts(search_global)) - - def names_dict_lookup(self, names_dict, position): - def get_param(scope, el): - if isinstance(el.get_parent_until(tree.Param), tree.Param): - return scope.param_by_name(str(el)) - return el - - search_str = str(self.name_str) - try: - names = names_dict[search_str] - if not names: # We want names, otherwise stop. - return [] - except KeyError: - return [] - - names = filter_definition_names(names, self.name_str, position) - - name_scope = None - # Only the names defined in the last position are valid definitions. - last_names = [] - for name in reversed(sorted(names, key=lambda name: name.start_pos)): - stmt = name.get_definition() - name_scope = self._evaluator.wrap(stmt.get_parent_scope()) - - if isinstance(self.scope, er.Instance) and not isinstance(name_scope, er.Instance): - # Instances should not be checked for positioning, because we - # don't know in which order the functions are called. - last_names.append(name) - continue - - if isinstance(name_scope, compiled.CompiledObject): - # Let's test this. TODO need comment. shouldn't this be - # filtered before? - last_names.append(name) - continue - - if isinstance(stmt, er.ModuleWrapper): - # In case of REPL completion, we can infer modules names that - # don't really have a definition (because they are really just - # namespaces). In this case we can just add it. - last_names.append(name) - continue - - if isinstance(name, compiled.CompiledName) \ - or isinstance(name, er.InstanceName) and isinstance(name._origin_name, compiled.CompiledName): - last_names.append(name) - continue - - if isinstance(self.name_str, tree.Name): - origin_scope = self.name_str.get_parent_until(tree.Scope, reverse=True) - scope = self.name_str - check = None - while True: - scope = scope.parent - if scope.type in ("if_stmt", "for_stmt", "comp_for"): - try: - name_dict = self._evaluator.predefined_if_name_dict_dict[scope] - types = set(name_dict[str(self.name_str)]) - except KeyError: - continue - else: - if self.name_str.start_pos < scope.children[1].end_pos: - # It doesn't make any sense to check if - # statements in the if statement itself, just - # deliver types. - self._found_predefined_if_name = types - else: - check = flow_analysis.break_check(self._evaluator, self.scope, - origin_scope) - if check is flow_analysis.UNREACHABLE: - self._found_predefined_if_name = set() - else: - self._found_predefined_if_name = types - break - if isinstance(scope, tree.IsScope) or scope is None: - break - else: - origin_scope = None - - if isinstance(stmt.parent, compiled.CompiledObject): - # TODO seriously? this is stupid. - continue - check = flow_analysis.break_check(self._evaluator, name_scope, - stmt, origin_scope) - if check is not flow_analysis.UNREACHABLE: - last_names.append(name) - - if check is flow_analysis.REACHABLE: - break - - if isinstance(name_scope, er.FunctionExecution): - # Replace params - return [get_param(name_scope, n) for n in last_names] - return last_names - - def filter_name(self, names_dicts): - """ - Searches names that are defined in a scope (the different - `names_dicts`), until a name fits. - """ - names = [] - for names_dict, position in names_dicts: - names = self.names_dict_lookup(names_dict, position) - if names: - break - - debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self.name_str, - self.scope, names, self.position) - return list(self._clean_names(names)) - - def _clean_names(self, names): - """ - ``NameFinder.filter_name`` should only output names with correct - wrapper parents. We don't want to see AST classes out in the - evaluation, so remove them already here! - """ - for n in names: - definition = n.parent - if isinstance(definition, (compiled.CompiledObject, - iterable.BuiltinMethod)): - # TODO this if should really be removed by changing the type of - # those classes. - yield n - elif definition.type in ('funcdef', 'classdef', 'file_input'): - yield self._evaluator.wrap(definition).name - else: - yield n - - def _check_getattr(self, inst): - """Checks for both __getattr__ and __getattribute__ methods""" - result = set() - # str is important, because it shouldn't be `Name`! - name = compiled.create(self._evaluator, str(self.name_str)) - with common.ignored(KeyError): - result = inst.execute_subscope_by_name('__getattr__', name) - if not result: - # This is a little bit special. `__getattribute__` is in Python - # executed before `__getattr__`. But: I know no use case, where - # this could be practical and where jedi would return wrong types. - # If you ever find something, let me know! - # We are inversing this, because a hand-crafted `__getattribute__` - # could still call another hand-crafted `__getattr__`, but not the - # other way around. - with common.ignored(KeyError): - result = inst.execute_subscope_by_name('__getattribute__', name) - return result - - def _names_to_types(self, names, attribute_lookup): - types = set() - - # Add isinstance and other if/assert knowledge. - if isinstance(self.name_str, tree.Name): - # Ignore FunctionExecution parents for now. - flow_scope = self.name_str - until = flow_scope.get_parent_until(er.FunctionExecution) - while not isinstance(until, er.FunctionExecution): - flow_scope = flow_scope.get_parent_scope(include_flows=True) - if flow_scope is None: - break - # TODO check if result is in scope -> no evaluation necessary - n = check_flow_information(self._evaluator, flow_scope, - self.name_str, self.position) - if n: - return n - - for name in names: - new_types = _name_to_types(self._evaluator, name, self.scope) - if isinstance(self.scope, (er.Class, er.Instance)) and attribute_lookup: - types |= set(self._resolve_descriptors(name, new_types)) - else: - types |= set(new_types) - if not names and isinstance(self.scope, er.Instance): - # handling __getattr__ / __getattribute__ - return self._check_getattr(self.scope) - - return types - - def _resolve_descriptors(self, name, types): - # The name must not be in the dictionary, but part of the class - # definition. __get__ is only called if the descriptor is defined in - # the class dictionary. - name_scope = name.get_definition().get_parent_scope() - if not isinstance(name_scope, (er.Instance, tree.Class)): - return types - - result = set() - for r in types: - try: - desc_return = r.get_descriptor_returns - except AttributeError: - result.add(r) - else: - result |= desc_return(self.scope) - return result - - -def _get_global_stmt_scopes(evaluator, global_stmt, name): - global_stmt_scope = global_stmt.get_parent_scope() - module = global_stmt_scope.get_parent_until() - for used_name in module.used_names[str(name)]: - if used_name.parent.type == 'global_stmt': - yield evaluator.wrap(used_name.get_parent_scope()) - - -@memoize_default(set(), evaluator_is_first_arg=True) -def _name_to_types(evaluator, name, scope): - types = [] - typ = name.get_definition() - if typ.isinstance(tree.ForStmt): - types = pep0484.find_type_from_comment_hint_for(evaluator, typ, name) - if types: - return types - if typ.isinstance(tree.WithStmt): - types = pep0484.find_type_from_comment_hint_with(evaluator, typ, name) - if types: - return types - if typ.isinstance(tree.ForStmt, tree.CompFor): - container_types = evaluator.eval_element(typ.children[3]) - for_types = iterable.py__iter__types(evaluator, container_types, typ.children[3]) - types = check_tuple_assignments(evaluator, for_types, name) - elif isinstance(typ, tree.Param): - types = _eval_param(evaluator, typ, scope) - elif typ.isinstance(tree.ExprStmt): - types = _remove_statements(evaluator, typ, name) - elif typ.isinstance(tree.WithStmt): - types = evaluator.eval_element(typ.node_from_name(name)) - elif isinstance(typ, tree.Import): - types = imports.ImportWrapper(evaluator, name).follow() - elif typ.type == 'global_stmt': - for s in _get_global_stmt_scopes(evaluator, typ, name): - finder = NameFinder(evaluator, s, str(name)) - names_dicts = finder.scopes(search_global=True) - # For global_stmt lookups, we only need the first possible scope, - # which means the function itself. - names_dicts = [next(names_dicts)] - types += finder.find(names_dicts, attribute_lookup=False) - elif isinstance(typ, tree.TryStmt): - # TODO an exception can also be a tuple. Check for those. - # TODO check for types that are not classes and add it to - # the static analysis report. - exceptions = evaluator.eval_element(name.get_previous_sibling().get_previous_sibling()) - types = set(chain.from_iterable(evaluator.execute(t) for t in exceptions)) - else: - if typ.isinstance(er.Function): - typ = typ.get_decorated_func() - types = set([typ]) - return types - - -def _remove_statements(evaluator, stmt, name): - """ - This is the part where statements are being stripped. - - Due to lazy evaluation, statements like a = func; b = a; b() have to be - evaluated. - """ - types = set() - # Remove the statement docstr stuff for now, that has to be - # implemented with the evaluator class. - #if stmt.docstr: - #res_new.append(stmt) - - check_instance = None - if isinstance(stmt, er.InstanceElement) and stmt.is_class_var: - check_instance = stmt.instance - stmt = stmt.var - - pep0484types = \ - pep0484.find_type_from_comment_hint_assign(evaluator, stmt, name) - if pep0484types: - return pep0484types - types |= evaluator.eval_statement(stmt, seek_name=name) - - if check_instance is not None: - # class renames - types = set([er.get_instance_el(evaluator, check_instance, a, True) - if isinstance(a, (er.Function, tree.Function)) - else a for a in types]) - return types - - -def _eval_param(evaluator, param, scope): - res_new = set() - func = param.get_parent_scope() - - cls = func.parent.get_parent_until((tree.Class, tree.Function)) - - from jedi.evaluate.param import ExecutedParam, Arguments - if isinstance(cls, tree.Class) and param.position_nr == 0 \ - and not isinstance(param, ExecutedParam): - # This is where we add self - if it has never been - # instantiated. - if isinstance(scope, er.InstanceElement): - res_new.add(scope.instance) - else: - inst = er.Instance(evaluator, evaluator.wrap(cls), - Arguments(evaluator, ()), is_generated=True) - res_new.add(inst) - return res_new - - # Instances are typically faked, if the instance is not called from - # outside. Here we check it for __init__ functions and return. - if isinstance(func, er.InstanceElement) \ - and func.instance.is_generated and str(func.name) == '__init__': - param = func.var.params[param.position_nr] - - # Add pep0484 and docstring knowledge. - pep0484_hints = pep0484.follow_param(evaluator, param) - doc_params = docstrings.follow_param(evaluator, param) - if pep0484_hints or doc_params: - return list(set(pep0484_hints) | set(doc_params)) - - if isinstance(param, ExecutedParam): - return res_new | param.eval(evaluator) - else: - # Param owns no information itself. - res_new |= dynamic.search_params(evaluator, param) - if not res_new: - if param.stars: - t = 'tuple' if param.stars == 1 else 'dict' - typ = list(evaluator.find_types(evaluator.BUILTINS, t))[0] - res_new = evaluator.execute(typ) - if param.default: - res_new |= evaluator.eval_element(param.default) - return res_new - - -def check_flow_information(evaluator, flow, search_name, pos): - """ Try to find out the type of a variable just with the information that - is given by the flows: e.g. It is also responsible for assert checks.:: - - if isinstance(k, str): - k. # <- completion here - - ensures that `k` is a string. - """ - if not settings.dynamic_flow_information: - return None - - result = set() - if flow.is_scope(): - # Check for asserts. - try: - names = reversed(flow.names_dict[search_name.value]) - except (KeyError, AttributeError): - names = [] - - for name in names: - ass = name.get_parent_until(tree.AssertStmt) - if isinstance(ass, tree.AssertStmt) and pos is not None and ass.start_pos < pos: - result = _check_isinstance_type(evaluator, ass.assertion(), search_name) - if result: - break - - if isinstance(flow, (tree.IfStmt, tree.WhileStmt)): - potential_ifs = [c for c in flow.children[1::4] if c != ':'] - for if_test in reversed(potential_ifs): - if search_name.start_pos > if_test.end_pos: - return _check_isinstance_type(evaluator, if_test, search_name) - return result - - -def _check_isinstance_type(evaluator, element, search_name): - try: - assert element.type in ('power', 'atom_expr') - # this might be removed if we analyze and, etc - assert len(element.children) == 2 - first, trailer = element.children - assert isinstance(first, tree.Name) and first.value == 'isinstance' - assert trailer.type == 'trailer' and trailer.children[0] == '(' - assert len(trailer.children) == 3 - - # arglist stuff - arglist = trailer.children[1] - args = param.Arguments(evaluator, arglist, trailer) - lst = list(args.unpack()) - # Disallow keyword arguments - assert len(lst) == 2 and lst[0][0] is None and lst[1][0] is None - name = lst[0][1][0] # first argument, values, first value - # Do a simple get_code comparison. They should just have the same code, - # and everything will be all right. - classes = lst[1][1][0] - call = helpers.call_of_leaf(search_name) - assert name.get_code(normalized=True) == call.get_code(normalized=True) - except AssertionError: - return set() - - result = set() - for cls_or_tup in evaluator.eval_element(classes): - if isinstance(cls_or_tup, iterable.Array) and cls_or_tup.type == 'tuple': - for typ in unite(cls_or_tup.py__iter__()): - result |= evaluator.execute(typ) - else: - result |= evaluator.execute(cls_or_tup) - return result - - -def global_names_dict_generator(evaluator, scope, position): - """ - For global name lookups. Yields tuples of (names_dict, position). If the - position is None, the position does not matter anymore in that scope. - - This function is used to include names from outer scopes. For example, when - the current scope is function: - - >>> from jedi._compatibility import u, no_unicode_pprint - >>> from jedi.parser import ParserWithRecovery, load_grammar - >>> parser = ParserWithRecovery(load_grammar(), u(''' - ... x = ['a', 'b', 'c'] - ... def func(): - ... y = None - ... ''')) - >>> scope = parser.module.subscopes[0] - >>> scope - - - `global_names_dict_generator` is a generator. First it yields names from - most inner scope. - - >>> from jedi.evaluate import Evaluator - >>> evaluator = Evaluator(load_grammar()) - >>> scope = evaluator.wrap(scope) - >>> pairs = list(global_names_dict_generator(evaluator, scope, (4, 0))) - >>> no_unicode_pprint(pairs[0]) - ({'func': [], 'y': []}, (4, 0)) - - Then it yields the names from one level "lower". In this example, this - is the most outer scope. As you can see, the position in the tuple is now - None, because typically the whole module is loaded before the function is - called. - - >>> no_unicode_pprint(pairs[1]) - ({'func': [], 'x': []}, None) - - After that we have a few underscore names that are part of the module. - - >>> sorted(pairs[2][0].keys()) - ['__doc__', '__file__', '__name__', '__package__'] - >>> pairs[3] # global names -> there are none in our example. - ({}, None) - >>> pairs[4] # package modules -> Also none. - ({}, None) - - Finally, it yields names from builtin, if `include_builtin` is - true (default). - - >>> pairs[5][0].values() #doctest: +ELLIPSIS - [[], ...] - """ - in_func = False - while scope is not None: - if not (scope.type == 'classdef' and in_func): - # Names in methods cannot be resolved within the class. - - for names_dict in scope.names_dicts(True): - yield names_dict, position - if hasattr(scope, 'resets_positions'): - # TODO This is so ugly, seriously. However there's - # currently no good way of influencing - # global_names_dict_generator when it comes to certain - # objects. - position = None - if scope.type == 'funcdef': - # The position should be reset if the current scope is a function. - in_func = True - position = None - scope = evaluator.wrap(scope.get_parent_scope()) - - # Add builtins to the global scope. - for names_dict in evaluator.BUILTINS.names_dicts(True): - yield names_dict, None - - -def check_tuple_assignments(evaluator, types, name): - """ - Checks if tuples are assigned. - """ - for index, node in name.assignment_indexes(): - iterated = iterable.py__iter__(evaluator, types, node) - for _ in range(index + 1): - try: - types = next(iterated) - except StopIteration: - # We could do this with the default param in next. But this - # would allow this loop to run for a very long time if the - # index number is high. Therefore break if the loop is - # finished. - types = set() - break - return types - - -def filter_private_variable(scope, origin_node): - """Check if a variable is defined inside the same class or outside.""" - instance = scope.get_parent_scope() - coming_from = origin_node - while coming_from is not None \ - and not isinstance(coming_from, (tree.Class, compiled.CompiledObject)): - coming_from = coming_from.get_parent_scope() - - # CompiledObjects don't have double underscore attributes, but Jedi abuses - # those for fakes (builtins.pym -> list). - if isinstance(instance, compiled.CompiledObject): - return instance != coming_from - else: - return isinstance(instance, er.Instance) and instance.base.base != coming_from diff --git a/pythonFiles/preview/jedi/evaluate/flow_analysis.py b/pythonFiles/preview/jedi/evaluate/flow_analysis.py deleted file mode 100644 index e188264bc0d8..000000000000 --- a/pythonFiles/preview/jedi/evaluate/flow_analysis.py +++ /dev/null @@ -1,91 +0,0 @@ -from jedi.parser import tree - - -class Status(object): - lookup_table = {} - - def __init__(self, value, name): - self._value = value - self._name = name - Status.lookup_table[value] = self - - def invert(self): - if self is REACHABLE: - return UNREACHABLE - elif self is UNREACHABLE: - return REACHABLE - else: - return UNSURE - - def __and__(self, other): - if UNSURE in (self, other): - return UNSURE - else: - return REACHABLE if self._value and other._value else UNREACHABLE - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self._name) - - -REACHABLE = Status(True, 'reachable') -UNREACHABLE = Status(False, 'unreachable') -UNSURE = Status(None, 'unsure') - - -def break_check(evaluator, base_scope, stmt, origin_scope=None): - element_scope = evaluator.wrap(stmt.get_parent_scope(include_flows=True)) - # Direct parents get resolved, we filter scopes that are separate branches. - # This makes sense for autocompletion and static analysis. For actual - # Python it doesn't matter, because we're talking about potentially - # unreachable code. - # e.g. `if 0:` would cause all name lookup within the flow make - # unaccessible. This is not a "problem" in Python, because the code is - # never called. In Jedi though, we still want to infer types. - while origin_scope is not None: - if element_scope == origin_scope: - return REACHABLE - origin_scope = origin_scope.parent - x = _break_check(evaluator, stmt, base_scope, element_scope) - return x - - -def _break_check(evaluator, stmt, base_scope, element_scope): - element_scope = evaluator.wrap(element_scope) - base_scope = evaluator.wrap(base_scope) - - reachable = REACHABLE - if isinstance(element_scope, tree.IfStmt): - if element_scope.node_after_else(stmt): - for check_node in element_scope.check_nodes(): - reachable = _check_if(evaluator, check_node) - if reachable in (REACHABLE, UNSURE): - break - reachable = reachable.invert() - else: - node = element_scope.node_in_which_check_node(stmt) - if node is not None: - reachable = _check_if(evaluator, node) - elif isinstance(element_scope, (tree.TryStmt, tree.WhileStmt)): - return UNSURE - - # Only reachable branches need to be examined further. - if reachable in (UNREACHABLE, UNSURE): - return reachable - - if element_scope.type == 'file_input': - # The definition is in another module and therefore just return what we - # have generated. - return reachable - if base_scope != element_scope and base_scope != element_scope.parent: - return reachable & _break_check(evaluator, stmt, base_scope, element_scope.parent) - else: - return reachable - - -def _check_if(evaluator, node): - types = evaluator.eval_element(node) - values = set(x.py__bool__() for x in types) - if len(values) == 1: - return Status.lookup_table[values.pop()] - else: - return UNSURE diff --git a/pythonFiles/preview/jedi/evaluate/helpers.py b/pythonFiles/preview/jedi/evaluate/helpers.py deleted file mode 100644 index 27293edaf875..000000000000 --- a/pythonFiles/preview/jedi/evaluate/helpers.py +++ /dev/null @@ -1,200 +0,0 @@ -import copy -from itertools import chain - -from jedi.parser import tree - - -def deep_ast_copy(obj, parent=None, new_elements=None): - """ - Much, much faster than copy.deepcopy, but just for Parser elements (Doesn't - copy parents). - """ - - if new_elements is None: - new_elements = {} - - def copy_node(obj): - # If it's already in the cache, just return it. - try: - return new_elements[obj] - except KeyError: - # Actually copy and set attributes. - new_obj = copy.copy(obj) - new_elements[obj] = new_obj - - # Copy children - new_children = [] - for child in obj.children: - typ = child.type - if typ in ('newline', 'operator', 'keyword', 'number', 'string', - 'indent', 'dedent', 'endmarker', 'error_leaf'): - # At the moment we're not actually copying those primitive - # elements, because there's really no need to. The parents are - # obviously wrong, but that's not an issue. - new_child = child - elif typ == 'name': - new_elements[child] = new_child = copy.copy(child) - new_child.parent = new_obj - else: # Is a BaseNode. - new_child = copy_node(child) - new_child.parent = new_obj - new_children.append(new_child) - new_obj.children = new_children - - # Copy the names_dict (if there is one). - try: - names_dict = obj.names_dict - except AttributeError: - pass - else: - try: - new_obj.names_dict = new_names_dict = {} - except AttributeError: # Impossible to set CompFor.names_dict - pass - else: - for string, names in names_dict.items(): - new_names_dict[string] = [new_elements[n] for n in names] - return new_obj - - if isinstance(obj, tree.BaseNode): - new_obj = copy_node(obj) - else: - # Special case of a Name object. - new_elements[obj] = new_obj = copy.copy(obj) - - if parent is not None: - new_obj.parent = parent - return new_obj - - -def call_of_leaf(leaf, cut_own_trailer=False): - """ - Creates a "call" node that consist of all ``trailer`` and ``power`` - objects. E.g. if you call it with ``append``:: - - list([]).append(3) or None - - You would get a node with the content ``list([]).append`` back. - - This generates a copy of the original ast node. - - If you're using the leaf, e.g. the bracket `)` it will return ``list([])``. - - # TODO remove cut_own_trailer option, since its always used with it. Just - # ignore it, It's not what we want anyway. Or document it better? - """ - trailer = leaf.parent - # The leaf may not be the last or first child, because there exist three - # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples - # we should not match anything more than x. - if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]): - if trailer.type == 'atom': - return trailer - return leaf - - power = trailer.parent - index = power.children.index(trailer) - power = deep_ast_copy(power) - if cut_own_trailer: - cut = index - else: - cut = index + 1 - power.children[cut:] = [] - - if power.type == 'error_node': - start = index - while True: - start -= 1 - if power.children[start].type != 'trailer': - break - transformed = tree.Node('power', power.children[start:]) - transformed.parent = power.parent - return transformed - - return power - - -def get_names_of_node(node): - try: - children = node.children - except AttributeError: - if node.type == 'name': - return [node] - else: - return [] - else: - return list(chain.from_iterable(get_names_of_node(c) for c in children)) - - -def get_module_names(module, all_scopes): - """ - Returns a dictionary with name parts as keys and their call paths as - values. - """ - if all_scopes: - dct = module.used_names - else: - dct = module.names_dict - return chain.from_iterable(dct.values()) - - -class FakeImport(tree.ImportName): - def __init__(self, name, parent, level=0): - super(FakeImport, self).__init__([]) - self.parent = parent - self._level = level - self.name = name - - def get_defined_names(self): - return [self.name] - - def aliases(self): - return {} - - @property - def level(self): - return self._level - - @property - def start_pos(self): - return 0, 0 - - def paths(self): - return [[self.name]] - - def is_definition(self): - return True - - -class FakeName(tree.Name): - def __init__(self, name_str, parent=None, start_pos=(0, 0), is_definition=None): - """ - In case is_definition is defined (not None), that bool value will be - returned. - """ - super(FakeName, self).__init__(tree.zero_position_modifier, name_str, start_pos) - self.parent = parent - self._is_definition = is_definition - - def get_definition(self): - return self.parent - - def is_definition(self): - if self._is_definition is None: - return super(FakeName, self).is_definition() - else: - return self._is_definition - - -class LazyName(FakeName): - def __init__(self, name, parent_callback, is_definition=None): - super(LazyName, self).__init__(name, is_definition=is_definition) - self._parent_callback = parent_callback - - @property - def parent(self): - return self._parent_callback() - - @parent.setter - def parent(self, value): - pass # Do nothing, super classes can try to set the parent. diff --git a/pythonFiles/preview/jedi/evaluate/imports.py b/pythonFiles/preview/jedi/evaluate/imports.py deleted file mode 100644 index d65d897b5413..000000000000 --- a/pythonFiles/preview/jedi/evaluate/imports.py +++ /dev/null @@ -1,517 +0,0 @@ -""" -:mod:`jedi.evaluate.imports` is here to resolve import statements and return -the modules/classes/functions/whatever, which they stand for. However there's -not any actual importing done. This module is about finding modules in the -filesystem. This can be quite tricky sometimes, because Python imports are not -always that simple. - -This module uses imp for python up to 3.2 and importlib for python 3.3 on; the -correct implementation is delegated to _compatibility. - -This module also supports import autocompletion, which means to complete -statements like ``from datetim`` (curser at the end would return ``datetime``). -""" -import imp -import os -import pkgutil -import sys -from itertools import chain - -from jedi._compatibility import find_module, unicode -from jedi import common -from jedi import debug -from jedi.parser import fast -from jedi.parser import tree -from jedi.parser.utils import save_parser, load_parser, parser_cache -from jedi.evaluate import sys_path -from jedi.evaluate import helpers -from jedi import settings -from jedi.common import source_to_unicode -from jedi.evaluate import compiled -from jedi.evaluate import analysis -from jedi.evaluate.cache import memoize_default, NO_DEFAULT - - -def completion_names(evaluator, imp, pos): - name = imp.name_for_position(pos) - module = evaluator.wrap(imp.get_parent_until()) - if name is None: - level = 0 - for node in imp.children: - if node.end_pos <= pos: - if node in ('.', '...'): - level += len(node.value) - import_path = [] - else: - # Completion on an existing name. - - # The import path needs to be reduced by one, because we're completing. - import_path = imp.path_for_name(name)[:-1] - level = imp.level - - importer = Importer(evaluator, tuple(import_path), module, level) - if isinstance(imp, tree.ImportFrom): - c = imp.children - only_modules = c[c.index('import')].start_pos >= pos - else: - only_modules = True - return importer.completion_names(evaluator, only_modules) - - -class ImportWrapper(tree.Base): - def __init__(self, evaluator, name): - self._evaluator = evaluator - self._name = name - - self._import = name.get_parent_until(tree.Import) - self.import_path = self._import.path_for_name(name) - - @memoize_default() - def follow(self, is_goto=False): - module = self._evaluator.wrap(self._import.get_parent_until()) - import_path = self._import.path_for_name(self._name) - from_import_name = None - try: - from_names = self._import.get_from_names() - except AttributeError: - # Is an import_name - pass - else: - if len(from_names) + 1 == len(import_path): - # We have to fetch the from_names part first and then check - # if from_names exists in the modules. - from_import_name = import_path[-1] - import_path = from_names - - importer = Importer(self._evaluator, tuple(import_path), - module, self._import.level) - - types = importer.follow() - - #if self._import.is_nested() and not self.nested_resolve: - # scopes = [NestedImportModule(module, self._import)] - - if from_import_name is not None: - types = set(chain.from_iterable( - self._evaluator.find_types(t, unicode(from_import_name), - is_goto=is_goto) - for t in types)) - - if not types: - path = import_path + [from_import_name] - importer = Importer(self._evaluator, tuple(path), - module, self._import.level) - types = importer.follow() - # goto only accepts `Name` - if is_goto: - types = set(s.name for s in types) - else: - # goto only accepts `Name` - if is_goto: - types = set(s.name for s in types) - - debug.dbg('after import: %s', types) - return types - - -class NestedImportModule(tree.Module): - """ - TODO while there's no use case for nested import module right now, we might - be able to use them for static analysis checks later on. - """ - def __init__(self, module, nested_import): - self._module = module - self._nested_import = nested_import - - def _get_nested_import_name(self): - """ - Generates an Import statement, that can be used to fake nested imports. - """ - i = self._nested_import - # This is not an existing Import statement. Therefore, set position to - # 0 (0 is not a valid line number). - zero = (0, 0) - names = [unicode(name) for name in i.namespace_names[1:]] - name = helpers.FakeName(names, self._nested_import) - new = tree.Import(i._sub_module, zero, zero, name) - new.parent = self._module - debug.dbg('Generated a nested import: %s', new) - return helpers.FakeName(str(i.namespace_names[1]), new) - - def __getattr__(self, name): - return getattr(self._module, name) - - def __repr__(self): - return "<%s: %s of %s>" % (self.__class__.__name__, self._module, - self._nested_import) - - -def _add_error(evaluator, name, message=None): - if hasattr(name, 'parent'): - # Should be a name, not a string! - analysis.add(evaluator, 'import-error', name, message) - - -def get_init_path(directory_path): - """ - The __init__ file can be searched in a directory. If found return it, else - None. - """ - for suffix, _, _ in imp.get_suffixes(): - path = os.path.join(directory_path, '__init__' + suffix) - if os.path.exists(path): - return path - return None - - -class Importer(object): - def __init__(self, evaluator, import_path, module, level=0): - """ - An implementation similar to ``__import__``. Use `follow` - to actually follow the imports. - - *level* specifies whether to use absolute or relative imports. 0 (the - default) means only perform absolute imports. Positive values for level - indicate the number of parent directories to search relative to the - directory of the module calling ``__import__()`` (see PEP 328 for the - details). - - :param import_path: List of namespaces (strings or Names). - """ - debug.speed('import %s' % (import_path,)) - self._evaluator = evaluator - self.level = level - self.module = module - try: - self.file_path = module.py__file__() - except AttributeError: - # Can be None for certain compiled modules like 'builtins'. - self.file_path = None - - if level: - base = module.py__package__().split('.') - if base == ['']: - base = [] - if level > len(base): - path = module.py__file__() - if path is not None: - import_path = list(import_path) - for i in range(level): - path = os.path.dirname(path) - dir_name = os.path.basename(path) - # This is not the proper way to do relative imports. However, since - # Jedi cannot be sure about the entry point, we just calculate an - # absolute path here. - if dir_name: - import_path.insert(0, dir_name) - else: - _add_error(self._evaluator, import_path[-1]) - import_path = [] - # TODO add import error. - debug.warning('Attempted relative import beyond top-level package.') - else: - # Here we basically rewrite the level to 0. - import_path = tuple(base) + tuple(import_path) - self.import_path = import_path - - @property - def str_import_path(self): - """Returns the import path as pure strings instead of `Name`.""" - return tuple(str(name) for name in self.import_path) - - @memoize_default() - def sys_path_with_modifications(self): - in_path = [] - sys_path_mod = list(sys_path.sys_path_with_modifications(self._evaluator, self.module)) - if self.file_path is not None: - # If you edit e.g. gunicorn, there will be imports like this: - # `from gunicorn import something`. But gunicorn is not in the - # sys.path. Therefore look if gunicorn is a parent directory, #56. - if self.import_path: # TODO is this check really needed? - for path in sys_path.traverse_parents(self.file_path): - if os.path.basename(path) == self.str_import_path[0]: - in_path.append(os.path.dirname(path)) - - # Since we know nothing about the call location of the sys.path, - # it's a possibility that the current directory is the origin of - # the Python execution. - sys_path_mod.insert(0, os.path.dirname(self.file_path)) - - return in_path + sys_path_mod - - @memoize_default(NO_DEFAULT) - def follow(self): - if not self.import_path: - return set() - return self._do_import(self.import_path, self.sys_path_with_modifications()) - - def _do_import(self, import_path, sys_path): - """ - This method is very similar to importlib's `_gcd_import`. - """ - import_parts = [str(i) for i in import_path] - - # Handle "magic" Flask extension imports: - # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. - if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']: - # New style. - ipath = ('flask_' + str(import_parts[2]),) + import_path[3:] - modules = self._do_import(ipath, sys_path) - if modules: - return modules - else: - # Old style - return self._do_import(('flaskext',) + import_path[2:], sys_path) - - module_name = '.'.join(import_parts) - try: - return set([self._evaluator.modules[module_name]]) - except KeyError: - pass - - if len(import_path) > 1: - # This is a recursive way of importing that works great with - # the module cache. - bases = self._do_import(import_path[:-1], sys_path) - if not bases: - return set() - # We can take the first element, because only the os special - # case yields multiple modules, which is not important for - # further imports. - parent_module = list(bases)[0] - - # This is a huge exception, we follow a nested import - # ``os.path``, because it's a very important one in Python - # that is being achieved by messing with ``sys.modules`` in - # ``os``. - if [str(i) for i in import_path] == ['os', 'path']: - return self._evaluator.find_types(parent_module, 'path') - - try: - paths = parent_module.py__path__() - except AttributeError: - # The module is not a package. - _add_error(self._evaluator, import_path[-1]) - return set() - else: - debug.dbg('search_module %s in paths %s', module_name, paths) - for path in paths: - # At the moment we are only using one path. So this is - # not important to be correct. - try: - module_file, module_path, is_pkg = \ - find_module(import_parts[-1], [path]) - break - except ImportError: - module_path = None - if module_path is None: - _add_error(self._evaluator, import_path[-1]) - return set() - else: - parent_module = None - try: - debug.dbg('search_module %s in %s', import_parts[-1], self.file_path) - # Override the sys.path. It works only good that way. - # Injecting the path directly into `find_module` did not work. - sys.path, temp = sys_path, sys.path - try: - module_file, module_path, is_pkg = \ - find_module(import_parts[-1]) - finally: - sys.path = temp - except ImportError: - # The module is not a package. - _add_error(self._evaluator, import_path[-1]) - return set() - - source = None - if is_pkg: - # In this case, we don't have a file yet. Search for the - # __init__ file. - if module_path.endswith(('.zip', '.egg')): - source = module_file.loader.get_source(module_name) - else: - module_path = get_init_path(module_path) - elif module_file: - source = module_file.read() - module_file.close() - - if module_file is None and not module_path.endswith(('.py', '.zip', '.egg')): - module = compiled.load_module(self._evaluator, module_path) - else: - module = _load_module(self._evaluator, module_path, source, sys_path, parent_module) - - if module is None: - # The file might raise an ImportError e.g. and therefore not be - # importable. - return set() - - self._evaluator.modules[module_name] = module - return set([module]) - - def _generate_name(self, name): - # Create a pseudo import to be able to follow them. - name = helpers.FakeName(name) - imp = helpers.FakeImport(name, parent=self.module) - name.parent = imp - return name - - def _get_module_names(self, search_path=None): - """ - Get the names of all modules in the search_path. This means file names - and not names defined in the files. - """ - - names = [] - # add builtin module names - if search_path is None: - names += [self._generate_name(name) for name in sys.builtin_module_names] - - if search_path is None: - search_path = self.sys_path_with_modifications() - for module_loader, name, is_pkg in pkgutil.iter_modules(search_path): - names.append(self._generate_name(name)) - return names - - def completion_names(self, evaluator, only_modules=False): - """ - :param only_modules: Indicates wheter it's possible to import a - definition that is not defined in a module. - """ - from jedi.evaluate import finder - names = [] - if self.import_path: - # flask - if self.str_import_path == ('flask', 'ext'): - # List Flask extensions like ``flask_foo`` - for mod in self._get_module_names(): - modname = str(mod) - if modname.startswith('flask_'): - extname = modname[len('flask_'):] - names.append(self._generate_name(extname)) - # Now the old style: ``flaskext.foo`` - for dir in self.sys_path_with_modifications(): - flaskext = os.path.join(dir, 'flaskext') - if os.path.isdir(flaskext): - names += self._get_module_names([flaskext]) - - for scope in self.follow(): - # Non-modules are not completable. - if not scope.type == 'file_input': # not a module - continue - - # namespace packages - if isinstance(scope, tree.Module) and scope.path.endswith('__init__.py'): - paths = scope.py__path__() - names += self._get_module_names(paths) - - if only_modules: - # In the case of an import like `from x.` we don't need to - # add all the variables. - if ('os',) == self.str_import_path and not self.level: - # os.path is a hardcoded exception, because it's a - # ``sys.modules`` modification. - names.append(self._generate_name('path')) - - continue - - for names_dict in scope.names_dicts(search_global=False): - _names = list(chain.from_iterable(names_dict.values())) - if not _names: - continue - _names = finder.filter_definition_names(_names, scope) - names += _names - else: - # Empty import path=completion after import - if not self.level: - names += self._get_module_names() - - if self.file_path is not None: - path = os.path.abspath(self.file_path) - for i in range(self.level - 1): - path = os.path.dirname(path) - names += self._get_module_names([path]) - - return names - - -def _load_module(evaluator, path=None, source=None, sys_path=None, parent_module=None): - def load(source): - dotted_path = path and compiled.dotted_from_fs_path(path, sys_path) - if path is not None and path.endswith(('.py', '.zip', '.egg')) \ - and dotted_path not in settings.auto_import_modules: - if source is None: - with open(path, 'rb') as f: - source = f.read() - else: - return compiled.load_module(evaluator, path) - p = path - p = fast.FastParser(evaluator.grammar, common.source_to_unicode(source), p) - save_parser(path, p) - from jedi.evaluate.representation import ModuleWrapper - return ModuleWrapper(evaluator, p.module, parent_module) - - if sys_path is None: - sys_path = evaluator.sys_path - - cached = load_parser(path) - module = load(source) if cached is None else cached.module - module = evaluator.wrap(module) - return module - - -def add_module(evaluator, module_name, module): - if '.' not in module_name: - # We cannot add paths with dots, because that would collide with - # the sepatator dots for nested packages. Therefore we return - # `__main__` in ModuleWrapper.py__name__(), which is similar to - # Python behavior. - evaluator.modules[module_name] = module - - -def get_modules_containing_name(evaluator, mods, name): - """ - Search a name in the directories of modules. - """ - def check_python_file(path): - try: - return parser_cache[path].parser.module - except KeyError: - try: - return check_fs(path) - except IOError: - return None - - def check_fs(path): - with open(path, 'rb') as f: - source = source_to_unicode(f.read()) - if name in source: - module_name = os.path.basename(path)[:-3] # Remove `.py`. - module = _load_module(evaluator, path, source) - add_module(evaluator, module_name, module) - return module - - # skip non python modules - mods = set(m for m in mods if not isinstance(m, compiled.CompiledObject)) - mod_paths = set() - for m in mods: - mod_paths.add(m.path) - yield m - - if settings.dynamic_params_for_other_modules: - paths = set(settings.additional_dynamic_modules) - for p in mod_paths: - if p is not None: - # We need abspath, because the seetings paths might not already - # have been converted to absolute paths. - d = os.path.dirname(os.path.abspath(p)) - for entry in os.listdir(d): - if entry not in mod_paths: - if entry.endswith('.py'): - paths.add(d + os.path.sep + entry) - - for p in sorted(paths): - # make testing easier, sort it - same results on every interpreter - c = check_python_file(p) - if c is not None and c not in mods and not isinstance(c, compiled.CompiledObject): - yield c diff --git a/pythonFiles/preview/jedi/evaluate/iterable.py b/pythonFiles/preview/jedi/evaluate/iterable.py deleted file mode 100644 index 79a1a0e26d22..000000000000 --- a/pythonFiles/preview/jedi/evaluate/iterable.py +++ /dev/null @@ -1,863 +0,0 @@ -""" -Contains all classes and functions to deal with lists, dicts, generators and -iterators in general. - -Array modifications -******************* - -If the content of an array (``set``/``list``) is requested somewhere, the -current module will be checked for appearances of ``arr.append``, -``arr.insert``, etc. If the ``arr`` name points to an actual array, the -content will be added - -This can be really cpu intensive, as you can imagine. Because |jedi| has to -follow **every** ``append`` and check wheter it's the right array. However this -works pretty good, because in *slow* cases, the recursion detector and other -settings will stop this process. - -It is important to note that: - -1. Array modfications work only in the current module. -2. Jedi only checks Array additions; ``list.pop``, etc are ignored. -""" -from jedi.common import unite, safe_property -from jedi import debug -from jedi import settings -from jedi._compatibility import use_metaclass, unicode, zip_longest -from jedi.parser import tree -from jedi.evaluate import compiled -from jedi.evaluate import helpers -from jedi.evaluate.cache import CachedMetaClass, memoize_default -from jedi.evaluate import analysis -from jedi.evaluate import pep0484 -from jedi import common - - -class IterableWrapper(tree.Base): - def is_class(self): - return False - - @memoize_default() - def _get_names_dict(self, names_dict): - builtin_methods = {} - for cls in reversed(type(self).mro()): - try: - builtin_methods.update(cls.builtin_methods) - except AttributeError: - pass - - if not builtin_methods: - return names_dict - - dct = {} - for names in names_dict.values(): - for name in names: - name_str = name.value - try: - method = builtin_methods[name_str, self.type] - except KeyError: - dct[name_str] = [name] - else: - parent = BuiltinMethod(self, method, name.parent) - dct[name_str] = [helpers.FakeName(name_str, parent, is_definition=True)] - return dct - - -class BuiltinMethod(IterableWrapper): - """``Generator.__next__`` ``dict.values`` methods and so on.""" - def __init__(self, builtin, method, builtin_func): - self._builtin = builtin - self._method = method - self._builtin_func = builtin_func - - def py__call__(self, params): - return self._method(self._builtin) - - def __getattr__(self, name): - return getattr(self._builtin_func, name) - - -def has_builtin_methods(cls): - cls.builtin_methods = {} - for func in cls.__dict__.values(): - try: - cls.builtin_methods.update(func.registered_builtin_methods) - except AttributeError: - pass - return cls - - -def register_builtin_method(method_name, type=None): - def wrapper(func): - dct = func.__dict__.setdefault('registered_builtin_methods', {}) - dct[method_name, type] = func - return func - return wrapper - - -@has_builtin_methods -class GeneratorMixin(object): - type = None - - @register_builtin_method('send') - @register_builtin_method('next') - @register_builtin_method('__next__') - def py__next__(self): - # TODO add TypeError if params are given. - return unite(self.py__iter__()) - - @memoize_default() - def names_dicts(self, search_global=False): # is always False - gen_obj = compiled.get_special_object(self._evaluator, 'GENERATOR_OBJECT') - yield self._get_names_dict(gen_obj.names_dict) - - def py__bool__(self): - return True - - def py__class__(self): - gen_obj = compiled.get_special_object(self._evaluator, 'GENERATOR_OBJECT') - return gen_obj.py__class__() - - -class Generator(use_metaclass(CachedMetaClass, IterableWrapper, GeneratorMixin)): - """Handling of `yield` functions.""" - - def __init__(self, evaluator, func, var_args): - super(Generator, self).__init__() - self._evaluator = evaluator - self.func = func - self.var_args = var_args - - def py__iter__(self): - from jedi.evaluate.representation import FunctionExecution - f = FunctionExecution(self._evaluator, self.func, self.var_args) - return f.get_yield_types() - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'parent', 'get_imports', - 'doc', 'docstr', 'get_parent_until', - 'get_code', 'subscopes']: - raise AttributeError("Accessing %s of %s is not allowed." - % (self, name)) - return getattr(self.func, name) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.func) - - -class Comprehension(IterableWrapper): - @staticmethod - def from_atom(evaluator, atom): - bracket = atom.children[0] - if bracket == '{': - if atom.children[1].children[1] == ':': - cls = DictComprehension - else: - cls = SetComprehension - elif bracket == '(': - cls = GeneratorComprehension - elif bracket == '[': - cls = ListComprehension - return cls(evaluator, atom) - - def __init__(self, evaluator, atom): - self._evaluator = evaluator - self._atom = atom - - def _get_comprehension(self): - # The atom contains a testlist_comp - return self._atom.children[1] - - def _get_comp_for(self): - # The atom contains a testlist_comp - return self._get_comprehension().children[1] - - @memoize_default() - def _eval_node(self, index=0): - """ - The first part `x + 1` of the list comprehension: - - [x + 1 for x in foo] - """ - comp_for = self._get_comp_for() - # For nested comprehensions we need to search the last one. - from jedi.evaluate.representation import InstanceElement - node = self._get_comprehension().children[index] - if isinstance(node, InstanceElement): - # This seems to be a strange case that I haven't found a way to - # write tests against. However since it's my new goal to get rid of - # InstanceElement anyway, I don't care. - node = node.var - last_comp = list(comp_for.get_comp_fors())[-1] - return helpers.deep_ast_copy(node, parent=last_comp) - - def _nested(self, comp_fors): - evaluator = self._evaluator - comp_for = comp_fors[0] - input_node = comp_for.children[3] - input_types = evaluator.eval_element(input_node) - - iterated = py__iter__(evaluator, input_types, input_node) - exprlist = comp_for.children[1] - for i, types in enumerate(iterated): - evaluator.predefined_if_name_dict_dict[comp_for] = \ - unpack_tuple_to_dict(evaluator, types, exprlist) - try: - for result in self._nested(comp_fors[1:]): - yield result - except IndexError: - iterated = evaluator.eval_element(self._eval_node()) - if self.type == 'dict': - yield iterated, evaluator.eval_element(self._eval_node(2)) - else: - yield iterated - finally: - del evaluator.predefined_if_name_dict_dict[comp_for] - - @memoize_default(default=[]) - @common.to_list - def _iterate(self): - comp_fors = tuple(self._get_comp_for().get_comp_fors()) - for result in self._nested(comp_fors): - yield result - - def py__iter__(self): - return self._iterate() - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self._atom) - - -@has_builtin_methods -class ArrayMixin(object): - @memoize_default() - def names_dicts(self, search_global=False): # Always False. - # `array.type` is a string with the type, e.g. 'list'. - scope = compiled.builtin_from_name(self._evaluator, self.type) - # builtins only have one class -> [0] - scopes = self._evaluator.execute_evaluated(scope, self) - names_dicts = list(scopes)[0].names_dicts(search_global) - #yield names_dicts[0] - yield self._get_names_dict(names_dicts[1]) - - def py__bool__(self): - return None # We don't know the length, because of appends. - - def py__class__(self): - return compiled.builtin_from_name(self._evaluator, self.type) - - @safe_property - def parent(self): - return self._evaluator.BUILTINS - - @property - def name(self): - return FakeSequence(self._evaluator, [], self.type).name - - @memoize_default() - def dict_values(self): - return unite(self._evaluator.eval_element(v) for k, v in self._items()) - - @register_builtin_method('values', type='dict') - def _imitate_values(self): - items = self.dict_values() - return create_evaluated_sequence_set(self._evaluator, items, sequence_type='list') - #return set([FakeSequence(self._evaluator, [AlreadyEvaluated(items)], 'tuple')]) - - @register_builtin_method('items', type='dict') - def _imitate_items(self): - items = [set([FakeSequence(self._evaluator, (k, v), 'tuple')]) - for k, v in self._items()] - - return create_evaluated_sequence_set(self._evaluator, *items, sequence_type='list') - - -class ListComprehension(Comprehension, ArrayMixin): - type = 'list' - - def py__getitem__(self, index): - all_types = list(self.py__iter__()) - result = all_types[index] - if isinstance(index, slice): - return create_evaluated_sequence_set( - self._evaluator, - unite(result), - sequence_type='list' - ) - return result - - -class SetComprehension(Comprehension, ArrayMixin): - type = 'set' - - -@has_builtin_methods -class DictComprehension(Comprehension, ArrayMixin): - type = 'dict' - - def _get_comp_for(self): - return self._get_comprehension().children[3] - - def py__iter__(self): - for keys, values in self._iterate(): - yield keys - - def py__getitem__(self, index): - for keys, values in self._iterate(): - for k in keys: - if isinstance(k, compiled.CompiledObject): - if k.obj == index: - return values - return self.dict_values() - - def dict_values(self): - return unite(values for keys, values in self._iterate()) - - @register_builtin_method('items', type='dict') - def _imitate_items(self): - items = set(FakeSequence(self._evaluator, - (AlreadyEvaluated(keys), AlreadyEvaluated(values)), 'tuple') - for keys, values in self._iterate()) - - return create_evaluated_sequence_set(self._evaluator, items, sequence_type='list') - - -class GeneratorComprehension(Comprehension, GeneratorMixin): - pass - - -class Array(IterableWrapper, ArrayMixin): - mapping = {'(': 'tuple', - '[': 'list', - '{': 'dict'} - - def __init__(self, evaluator, atom): - self._evaluator = evaluator - self.atom = atom - self.type = Array.mapping[atom.children[0]] - """The builtin name of the array (list, set, tuple or dict).""" - - c = self.atom.children - array_node = c[1] - if self.type == 'dict' and array_node != '}' \ - and (not hasattr(array_node, 'children') - or ':' not in array_node.children): - self.type = 'set' - - @property - def name(self): - return helpers.FakeName(self.type, parent=self) - - def py__getitem__(self, index): - """Here the index is an int/str. Raises IndexError/KeyError.""" - if self.type == 'dict': - for key, value in self._items(): - for k in self._evaluator.eval_element(key): - if isinstance(k, compiled.CompiledObject) \ - and index == k.obj: - return self._evaluator.eval_element(value) - raise KeyError('No key found in dictionary %s.' % self) - - # Can raise an IndexError - if isinstance(index, slice): - return set([self]) - else: - return self._evaluator.eval_element(self._items()[index]) - - def __getattr__(self, name): - if name not in ['start_pos', 'get_only_subelement', 'parent', - 'get_parent_until', 'items']: - raise AttributeError('Strange access on %s: %s.' % (self, name)) - return getattr(self.atom, name) - - # @memoize_default() - def py__iter__(self): - """ - While values returns the possible values for any array field, this - function returns the value for a certain index. - """ - if self.type == 'dict': - # Get keys. - types = set() - for k, _ in self._items(): - types |= self._evaluator.eval_element(k) - # We don't know which dict index comes first, therefore always - # yield all the types. - for _ in types: - yield types - else: - for value in self._items(): - yield self._evaluator.eval_element(value) - - additions = check_array_additions(self._evaluator, self) - if additions: - yield additions - - def _values(self): - """Returns a list of a list of node.""" - if self.type == 'dict': - return unite(v for k, v in self._items()) - else: - return self._items() - - def _items(self): - c = self.atom.children - array_node = c[1] - if array_node in (']', '}', ')'): - return [] # Direct closing bracket, doesn't contain items. - - if tree.is_node(array_node, 'testlist_comp'): - return array_node.children[::2] - elif tree.is_node(array_node, 'dictorsetmaker'): - kv = [] - iterator = iter(array_node.children) - for key in iterator: - op = next(iterator, None) - if op is None or op == ',': - kv.append(key) # A set. - else: - assert op == ':' # A dict. - kv.append((key, next(iterator))) - next(iterator, None) # Possible comma. - return kv - else: - return [array_node] - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.atom) - - -class _FakeArray(Array): - def __init__(self, evaluator, container, type): - self.type = type - self._evaluator = evaluator - self.atom = container - - -class ImplicitTuple(_FakeArray): - def __init__(self, evaluator, testlist): - super(ImplicitTuple, self).__init__(evaluator, testlist, 'tuple') - self._testlist = testlist - - def _items(self): - return self._testlist.children[::2] - - -class FakeSequence(_FakeArray): - def __init__(self, evaluator, sequence_values, type): - """ - type should be one of "tuple", "list" - """ - super(FakeSequence, self).__init__(evaluator, sequence_values, type) - self._sequence_values = sequence_values - - def _items(self): - return self._sequence_values - - -def create_evaluated_sequence_set(evaluator, *types_order, **kwargs): - """ - ``sequence_type`` is a named argument, that doesn't work in Python2. For backwards - compatibility reasons, we're now using kwargs. - """ - sequence_type = kwargs.pop('sequence_type') - assert not kwargs - - sets = tuple(AlreadyEvaluated(types) for types in types_order) - return set([FakeSequence(evaluator, sets, sequence_type)]) - - -class AlreadyEvaluated(frozenset): - """A simple container to add already evaluated objects to an array.""" - def get_code(self, normalized=False): - # For debugging purposes. - return str(self) - - -class MergedNodes(frozenset): - pass - - -class FakeDict(_FakeArray): - def __init__(self, evaluator, dct): - super(FakeDict, self).__init__(evaluator, dct, 'dict') - self._dct = dct - - def py__iter__(self): - yield set(compiled.create(self._evaluator, key) for key in self._dct) - - def py__getitem__(self, index): - return unite(self._evaluator.eval_element(v) for v in self._dct[index]) - - def _items(self): - for key, values in self._dct.items(): - # TODO this is not proper. The values could be multiple values?! - yield key, values[0] - - -class MergedArray(_FakeArray): - def __init__(self, evaluator, arrays): - super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].type) - self._arrays = arrays - - def py__iter__(self): - for array in self._arrays: - for types in array.py__iter__(): - yield types - - def py__getitem__(self, index): - return unite(self.py__iter__()) - - def _items(self): - for array in self._arrays: - for a in array._items(): - yield a - - def __len__(self): - return sum(len(a) for a in self._arrays) - - -def unpack_tuple_to_dict(evaluator, types, exprlist): - """ - Unpacking tuple assignments in for statements and expr_stmts. - """ - if exprlist.type == 'name': - return {exprlist.value: types} - elif exprlist.type == 'atom' and exprlist.children[0] in '([': - return unpack_tuple_to_dict(evaluator, types, exprlist.children[1]) - elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist', - 'testlist_star_expr'): - dct = {} - parts = iter(exprlist.children[::2]) - n = 0 - for iter_types in py__iter__(evaluator, types, exprlist): - n += 1 - try: - part = next(parts) - except StopIteration: - analysis.add(evaluator, 'value-error-too-many-values', part, - message="ValueError: too many values to unpack (expected %s)" % n) - else: - dct.update(unpack_tuple_to_dict(evaluator, iter_types, part)) - has_parts = next(parts, None) - if types and has_parts is not None: - analysis.add(evaluator, 'value-error-too-few-values', has_parts, - message="ValueError: need more than %s values to unpack" % n) - return dct - elif exprlist.type == 'power' or exprlist.type == 'atom_expr': - # Something like ``arr[x], var = ...``. - # This is something that is not yet supported, would also be difficult - # to write into a dict. - return {} - elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings - # Currently we're not supporting them. - return {} - raise NotImplementedError - - -def py__iter__(evaluator, types, node=None): - debug.dbg('py__iter__') - type_iters = [] - for typ in types: - try: - iter_method = typ.py__iter__ - except AttributeError: - if node is not None: - analysis.add(evaluator, 'type-error-not-iterable', node, - message="TypeError: '%s' object is not iterable" % typ) - else: - type_iters.append(iter_method()) - #for result in iter_method(): - #yield result - - for t in zip_longest(*type_iters, fillvalue=set()): - yield unite(t) - - -def py__iter__types(evaluator, types, node=None): - """ - Calls `py__iter__`, but ignores the ordering in the end and just returns - all types that it contains. - """ - return unite(py__iter__(evaluator, types, node)) - - -def py__getitem__(evaluator, types, trailer): - from jedi.evaluate.representation import Class - result = set() - - trailer_op, node, trailer_cl = trailer.children - assert trailer_op == "[" - assert trailer_cl == "]" - - # special case: PEP0484 typing module, see - # https://github.com/davidhalter/jedi/issues/663 - for typ in list(types): - if isinstance(typ, Class): - typing_module_types = \ - pep0484.get_types_for_typing_module(evaluator, typ, node) - if typing_module_types is not None: - types.remove(typ) - result |= typing_module_types - - if not types: - # all consumed by special cases - return result - - for index in create_index_types(evaluator, node): - if isinstance(index, (compiled.CompiledObject, Slice)): - index = index.obj - - if type(index) not in (float, int, str, unicode, slice): - # If the index is not clearly defined, we have to get all the - # possiblities. - for typ in list(types): - if isinstance(typ, Array) and typ.type == 'dict': - types.remove(typ) - result |= typ.dict_values() - return result | py__iter__types(evaluator, types) - - for typ in types: - # The actual getitem call. - try: - getitem = typ.py__getitem__ - except AttributeError: - analysis.add(evaluator, 'type-error-not-subscriptable', trailer_op, - message="TypeError: '%s' object is not subscriptable" % typ) - else: - try: - result |= getitem(index) - except IndexError: - result |= py__iter__types(evaluator, set([typ])) - except KeyError: - # Must be a dict. Lists don't raise KeyErrors. - result |= typ.dict_values() - return result - - -def check_array_additions(evaluator, array): - """ Just a mapper function for the internal _check_array_additions """ - if array.type not in ('list', 'set'): - # TODO also check for dict updates - return set() - - is_list = array.type == 'list' - try: - current_module = array.atom.get_parent_until() - except AttributeError: - # If there's no get_parent_until, it's a FakeSequence or another Fake - # type. Those fake types are used inside Jedi's engine. No values may - # be added to those after their creation. - return set() - return _check_array_additions(evaluator, array, current_module, is_list) - - -@memoize_default(default=set(), evaluator_is_first_arg=True) -@debug.increase_indent -def _check_array_additions(evaluator, compare_array, module, is_list): - """ - Checks if a `Array` has "add" (append, insert, extend) statements: - - >>> a = [""] - >>> a.append(1) - """ - debug.dbg('Dynamic array search for %s' % compare_array, color='MAGENTA') - if not settings.dynamic_array_additions or isinstance(module, compiled.CompiledObject): - debug.dbg('Dynamic array search aborted.', color='MAGENTA') - return set() - - def check_additions(arglist, add_name): - params = list(param.Arguments(evaluator, arglist).unpack()) - result = set() - if add_name in ['insert']: - params = params[1:] - if add_name in ['append', 'add', 'insert']: - for key, nodes in params: - result |= unite(evaluator.eval_element(node) for node in nodes) - elif add_name in ['extend', 'update']: - for key, nodes in params: - for node in nodes: - types = evaluator.eval_element(node) - result |= py__iter__types(evaluator, types, node) - return result - - from jedi.evaluate import representation as er, param - - def get_execution_parent(element): - """ Used to get an Instance/FunctionExecution parent """ - if isinstance(element, Array): - node = element.atom - else: - # Is an Instance with an - # Arguments([AlreadyEvaluated([_ArrayInstance])]) inside - # Yeah... I know... It's complicated ;-) - node = list(element.var_args.argument_node[0])[0].var_args.trailer - if isinstance(node, er.InstanceElement) or node is None: - return node - return node.get_parent_until(er.FunctionExecution) - - temp_param_add, settings.dynamic_params_for_other_modules = \ - settings.dynamic_params_for_other_modules, False - - search_names = ['append', 'extend', 'insert'] if is_list else ['add', 'update'] - comp_arr_parent = get_execution_parent(compare_array) - - added_types = set() - for add_name in search_names: - try: - possible_names = module.used_names[add_name] - except KeyError: - continue - else: - for name in possible_names: - # Check if the original scope is an execution. If it is, one - # can search for the same statement, that is in the module - # dict. Executions are somewhat special in jedi, since they - # literally copy the contents of a function. - if isinstance(comp_arr_parent, er.FunctionExecution): - if comp_arr_parent.start_pos < name.start_pos < comp_arr_parent.end_pos: - name = comp_arr_parent.name_for_position(name.start_pos) - else: - # Don't check definitions that are not defined in the - # same function. This is not "proper" anyway. It also - # improves Jedi's speed for array lookups, since we - # don't have to check the whole source tree anymore. - continue - trailer = name.parent - power = trailer.parent - trailer_pos = power.children.index(trailer) - try: - execution_trailer = power.children[trailer_pos + 1] - except IndexError: - continue - else: - if execution_trailer.type != 'trailer' \ - or execution_trailer.children[0] != '(' \ - or execution_trailer.children[1] == ')': - continue - power = helpers.call_of_leaf(name, cut_own_trailer=True) - # InstanceElements are special, because they don't get copied, - # but have this wrapper around them. - if isinstance(comp_arr_parent, er.InstanceElement): - power = er.get_instance_el(evaluator, comp_arr_parent.instance, power) - - if evaluator.recursion_detector.push_stmt(power): - # Check for recursion. Possible by using 'extend' in - # combination with function calls. - continue - try: - if compare_array in evaluator.eval_element(power): - # The arrays match. Now add the results - added_types |= check_additions(execution_trailer.children[1], add_name) - finally: - evaluator.recursion_detector.pop_stmt() - # reset settings - settings.dynamic_params_for_other_modules = temp_param_add - debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA') - return added_types - - -def check_array_instances(evaluator, instance): - """Used for set() and list() instances.""" - if not settings.dynamic_array_additions: - return instance.var_args - - ai = _ArrayInstance(evaluator, instance) - from jedi.evaluate import param - return param.Arguments(evaluator, [AlreadyEvaluated([ai])]) - - -class _ArrayInstance(IterableWrapper): - """ - Used for the usage of set() and list(). - This is definitely a hack, but a good one :-) - It makes it possible to use set/list conversions. - - In contrast to Array, ListComprehension and all other iterable types, this - is something that is only used inside `evaluate/compiled/fake/builtins.py` - and therefore doesn't need `names_dicts`, `py__bool__` and so on, because - we don't use these operations in `builtins.py`. - """ - def __init__(self, evaluator, instance): - self._evaluator = evaluator - self.instance = instance - self.var_args = instance.var_args - - def py__iter__(self): - try: - _, first_nodes = next(self.var_args.unpack()) - except StopIteration: - types = set() - else: - types = unite(self._evaluator.eval_element(node) for node in first_nodes) - for types in py__iter__(self._evaluator, types, first_nodes[0]): - yield types - - module = self.var_args.get_parent_until() - if module is None: - return - is_list = str(self.instance.name) == 'list' - additions = _check_array_additions(self._evaluator, self.instance, module, is_list) - if additions: - yield additions - - -class Slice(object): - def __init__(self, evaluator, start, stop, step): - self._evaluator = evaluator - # all of them are either a Precedence or None. - self._start = start - self._stop = stop - self._step = step - - @property - def obj(self): - """ - Imitate CompiledObject.obj behavior and return a ``builtin.slice()`` - object. - """ - def get(element): - if element is None: - return None - - result = self._evaluator.eval_element(element) - if len(result) != 1: - # For simplicity, we want slices to be clear defined with just - # one type. Otherwise we will return an empty slice object. - raise IndexError - try: - return list(result)[0].obj - except AttributeError: - return None - - try: - return slice(get(self._start), get(self._stop), get(self._step)) - except IndexError: - return slice(None, None, None) - - -def create_index_types(evaluator, index): - """ - Handles slices in subscript nodes. - """ - if index == ':': - # Like array[:] - return set([Slice(evaluator, None, None, None)]) - elif tree.is_node(index, 'subscript'): # subscript is a slice operation. - # Like array[:3] - result = [] - for el in index.children: - if el == ':': - if not result: - result.append(None) - elif tree.is_node(el, 'sliceop'): - if len(el.children) == 2: - result.append(el.children[1]) - else: - result.append(el) - result += [None] * (3 - len(result)) - - return set([Slice(evaluator, *result)]) - - # No slices - return evaluator.eval_element(index) diff --git a/pythonFiles/preview/jedi/evaluate/param.py b/pythonFiles/preview/jedi/evaluate/param.py deleted file mode 100644 index 997e07992b24..000000000000 --- a/pythonFiles/preview/jedi/evaluate/param.py +++ /dev/null @@ -1,438 +0,0 @@ -from collections import defaultdict -from itertools import chain - -from jedi._compatibility import unicode, zip_longest -from jedi import debug -from jedi import common -from jedi.parser import tree -from jedi.evaluate import iterable -from jedi.evaluate import analysis -from jedi.evaluate import precedence -from jedi.evaluate.helpers import FakeName -from jedi.cache import underscore_memoization - - -def try_iter_content(types, depth=0): - """Helper method for static analysis.""" - if depth > 10: - # It's possible that a loop has references on itself (especially with - # CompiledObject). Therefore don't loop infinitely. - return - - for typ in types: - try: - f = typ.py__iter__ - except AttributeError: - pass - else: - for iter_types in f(): - try_iter_content(iter_types, depth + 1) - - -class Arguments(tree.Base): - def __init__(self, evaluator, argument_node, trailer=None): - """ - The argument_node is either a parser node or a list of evaluated - objects. Those evaluated objects may be lists of evaluated objects - themselves (one list for the first argument, one for the second, etc). - - :param argument_node: May be an argument_node or a list of nodes. - """ - self.argument_node = argument_node - self._evaluator = evaluator - self.trailer = trailer # Can be None, e.g. in a class definition. - - def _split(self): - if isinstance(self.argument_node, (tuple, list)): - for el in self.argument_node: - yield 0, el - else: - if not (tree.is_node(self.argument_node, 'arglist') or ( - # in python 3.5 **arg is an argument, not arglist - (tree.is_node(self.argument_node, 'argument') and - self.argument_node.children[0] in ('*', '**')))): - yield 0, self.argument_node - return - - iterator = iter(self.argument_node.children) - for child in iterator: - if child == ',': - continue - elif child in ('*', '**'): - yield len(child.value), next(iterator) - elif tree.is_node(child, 'argument') and \ - child.children[0] in ('*', '**'): - assert len(child.children) == 2 - yield len(child.children[0].value), child.children[1] - else: - yield 0, child - - def get_parent_until(self, *args, **kwargs): - if self.trailer is None: - try: - element = self.argument_node[0] - from jedi.evaluate.iterable import AlreadyEvaluated - if isinstance(element, AlreadyEvaluated): - element = list(self._evaluator.eval_element(element))[0] - except IndexError: - return None - else: - return element.get_parent_until(*args, **kwargs) - else: - return self.trailer.get_parent_until(*args, **kwargs) - - def as_tuple(self): - for stars, argument in self._split(): - if tree.is_node(argument, 'argument'): - argument, default = argument.children[::2] - else: - default = None - yield argument, default, stars - - def unpack(self, func=None): - named_args = [] - for stars, el in self._split(): - if stars == 1: - arrays = self._evaluator.eval_element(el) - iterators = [_iterate_star_args(self._evaluator, a, el, func) - for a in arrays] - iterators = list(iterators) - for values in list(zip_longest(*iterators)): - yield None, [v for v in values if v is not None] - elif stars == 2: - arrays = self._evaluator.eval_element(el) - dicts = [_star_star_dict(self._evaluator, a, el, func) - for a in arrays] - for dct in dicts: - for key, values in dct.items(): - yield key, values - else: - if tree.is_node(el, 'argument'): - c = el.children - if len(c) == 3: # Keyword argument. - named_args.append((c[0].value, (c[2],))) - else: # Generator comprehension. - # Include the brackets with the parent. - comp = iterable.GeneratorComprehension( - self._evaluator, self.argument_node.parent) - yield None, (iterable.AlreadyEvaluated([comp]),) - elif isinstance(el, (list, tuple)): - yield None, el - else: - yield None, (el,) - - # Reordering var_args is necessary, because star args sometimes appear - # after named argument, but in the actual order it's prepended. - for key_arg in named_args: - yield key_arg - - def _reorder_var_args(var_args): - named_index = None - new_args = [] - for i, stmt in enumerate(var_args): - if isinstance(stmt, tree.ExprStmt): - if named_index is None and stmt.assignment_details: - named_index = i - - if named_index is not None: - expression_list = stmt.expression_list() - if expression_list and expression_list[0] == '*': - new_args.insert(named_index, stmt) - named_index += 1 - continue - - new_args.append(stmt) - return new_args - - def eval_argument_clinic(self, arguments): - """Uses a list with argument clinic information (see PEP 436).""" - iterator = self.unpack() - for i, (name, optional, allow_kwargs) in enumerate(arguments): - key, va_values = next(iterator, (None, [])) - if key is not None: - raise NotImplementedError - if not va_values and not optional: - debug.warning('TypeError: %s expected at least %s arguments, got %s', - name, len(arguments), i) - raise ValueError - values = set(chain.from_iterable(self._evaluator.eval_element(el) - for el in va_values)) - if not values and not optional: - # For the stdlib we always want values. If we don't get them, - # that's ok, maybe something is too hard to resolve, however, - # we will not proceed with the evaluation of that function. - debug.warning('argument_clinic "%s" not resolvable.', name) - raise ValueError - yield values - - def scope(self): - # Returns the scope in which the arguments are used. - return (self.trailer or self.argument_node).get_parent_until(tree.IsScope) - - def eval_args(self): - # TODO this method doesn't work with named args and a lot of other - # things. Use unpack. - return [self._evaluator.eval_element(el) for stars, el in self._split()] - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self.argument_node) - - def get_calling_var_args(self): - if tree.is_node(self.argument_node, 'arglist', 'argument') \ - or self.argument_node == () and self.trailer is not None: - return _get_calling_var_args(self._evaluator, self) - else: - return None - - def eval_all(self, func=None): - """ - Evaluates all arguments as a support for static analysis - (normally Jedi). - """ - for key, element_values in self.unpack(): - for element in element_values: - types = self._evaluator.eval_element(element) - try_iter_content(types) - - -class ExecutedParam(tree.Param): - """Fake a param and give it values.""" - def __init__(self, original_param, var_args, values): - self._original_param = original_param - self.var_args = var_args - self._values = values - - def eval(self, evaluator): - types = set() - for v in self._values: - types |= evaluator.eval_element(v) - return types - - @property - def position_nr(self): - # Need to use the original logic here, because it uses the parent. - return self._original_param.position_nr - - @property - @underscore_memoization - def name(self): - return FakeName(str(self._original_param.name), self, self.start_pos) - - def __getattr__(self, name): - return getattr(self._original_param, name) - - -def _get_calling_var_args(evaluator, var_args): - old_var_args = None - while var_args != old_var_args: - old_var_args = var_args - for name, default, stars in reversed(list(var_args.as_tuple())): - if not stars or not isinstance(name, tree.Name): - continue - - names = evaluator.goto(name) - if len(names) != 1: - break - param = names[0].get_definition() - if not isinstance(param, ExecutedParam): - if isinstance(param, tree.Param): - # There is no calling var_args in this case - there's just - # a param without any input. - return None - break - # We never want var_args to be a tuple. This should be enough for - # now, we can change it later, if we need to. - if isinstance(param.var_args, Arguments): - var_args = param.var_args - return var_args.argument_node or var_args.trailer - - -def get_params(evaluator, func, var_args): - param_names = [] - param_dict = {} - for param in func.params: - param_dict[str(param.name)] = param - unpacked_va = list(var_args.unpack(func)) - from jedi.evaluate.representation import InstanceElement - if isinstance(func, InstanceElement): - # Include self at this place. - unpacked_va.insert(0, (None, [iterable.AlreadyEvaluated([func.instance])])) - var_arg_iterator = common.PushBackIterator(iter(unpacked_va)) - - non_matching_keys = defaultdict(lambda: []) - keys_used = {} - keys_only = False - had_multiple_value_error = False - for param in func.params: - # The value and key can both be null. There, the defaults apply. - # args / kwargs will just be empty arrays / dicts, respectively. - # Wrong value count is just ignored. If you try to test cases that are - # not allowed in Python, Jedi will maybe not show any completions. - default = [] if param.default is None else [param.default] - key, va_values = next(var_arg_iterator, (None, default)) - while key is not None: - keys_only = True - k = unicode(key) - try: - key_param = param_dict[unicode(key)] - except KeyError: - non_matching_keys[key] = va_values - else: - param_names.append(ExecutedParam(key_param, var_args, va_values).name) - - if k in keys_used: - had_multiple_value_error = True - m = ("TypeError: %s() got multiple values for keyword argument '%s'." - % (func.name, k)) - calling_va = _get_calling_var_args(evaluator, var_args) - if calling_va is not None: - analysis.add(evaluator, 'type-error-multiple-values', - calling_va, message=m) - else: - try: - keys_used[k] = param_names[-1] - except IndexError: - # TODO this is wrong stupid and whatever. - pass - key, va_values = next(var_arg_iterator, (None, ())) - - values = [] - if param.stars == 1: - # *args param - lst_values = [iterable.MergedNodes(va_values)] if va_values else [] - for key, va_values in var_arg_iterator: - # Iterate until a key argument is found. - if key: - var_arg_iterator.push_back((key, va_values)) - break - if va_values: - lst_values.append(iterable.MergedNodes(va_values)) - seq = iterable.FakeSequence(evaluator, lst_values, 'tuple') - values = [iterable.AlreadyEvaluated([seq])] - elif param.stars == 2: - # **kwargs param - dct = iterable.FakeDict(evaluator, dict(non_matching_keys)) - values = [iterable.AlreadyEvaluated([dct])] - non_matching_keys = {} - else: - # normal param - if va_values: - values = va_values - else: - # No value: Return an empty container - values = [] - if not keys_only: - calling_va = var_args.get_calling_var_args() - if calling_va is not None: - m = _error_argument_count(func, len(unpacked_va)) - analysis.add(evaluator, 'type-error-too-few-arguments', - calling_va, message=m) - - # Now add to result if it's not one of the previously covered cases. - if (not keys_only or param.stars == 2): - param_names.append(ExecutedParam(param, var_args, values).name) - keys_used[unicode(param.name)] = param_names[-1] - - if keys_only: - # All arguments should be handed over to the next function. It's not - # about the values inside, it's about the names. Jedi needs to now that - # there's nothing to find for certain names. - for k in set(param_dict) - set(keys_used): - param = param_dict[k] - values = [] if param.default is None else [param.default] - param_names.append(ExecutedParam(param, var_args, values).name) - - if not (non_matching_keys or had_multiple_value_error - or param.stars or param.default): - # add a warning only if there's not another one. - calling_va = _get_calling_var_args(evaluator, var_args) - if calling_va is not None: - m = _error_argument_count(func, len(unpacked_va)) - analysis.add(evaluator, 'type-error-too-few-arguments', - calling_va, message=m) - - for key, va_values in non_matching_keys.items(): - m = "TypeError: %s() got an unexpected keyword argument '%s'." \ - % (func.name, key) - for value in va_values: - analysis.add(evaluator, 'type-error-keyword-argument', value.parent, message=m) - - remaining_params = list(var_arg_iterator) - if remaining_params: - m = _error_argument_count(func, len(unpacked_va)) - # Just report an error for the first param that is not needed (like - # cPython). - first_key, first_values = remaining_params[0] - for v in first_values: - if first_key is not None: - # Is a keyword argument, return the whole thing instead of just - # the value node. - v = v.parent - try: - non_kw_param = keys_used[first_key] - except KeyError: - pass - else: - origin_args = non_kw_param.parent.var_args.argument_node - # TODO calculate the var_args tree and check if it's in - # the tree (if not continue). - # print('\t\tnonkw', non_kw_param.parent.var_args.argument_node, ) - if origin_args not in [f.parent.parent for f in first_values]: - continue - analysis.add(evaluator, 'type-error-too-many-arguments', - v, message=m) - return param_names - - -def _iterate_star_args(evaluator, array, input_node, func=None): - from jedi.evaluate.representation import Instance - if isinstance(array, iterable.Array): - # TODO ._items is not the call we want here. Replace in the future. - for node in array._items(): - yield node - elif isinstance(array, iterable.Generator): - for types in array.py__iter__(): - yield iterable.AlreadyEvaluated(types) - elif isinstance(array, Instance) and array.name.get_code() == 'tuple': - debug.warning('Ignored a tuple *args input %s' % array) - else: - if func is not None: - m = "TypeError: %s() argument after * must be a sequence, not %s" \ - % (func.name.value, array) - analysis.add(evaluator, 'type-error-star', input_node, message=m) - - -def _star_star_dict(evaluator, array, input_node, func): - dct = defaultdict(lambda: []) - from jedi.evaluate.representation import Instance - if isinstance(array, Instance) and array.name.get_code() == 'dict': - # For now ignore this case. In the future add proper iterators and just - # make one call without crazy isinstance checks. - return {} - - if isinstance(array, iterable.FakeDict): - return array._dct - elif isinstance(array, iterable.Array) and array.type == 'dict': - # TODO bad call to non-public API - for key_node, value in array._items(): - for key in evaluator.eval_element(key_node): - if precedence.is_string(key): - dct[key.obj].append(value) - - else: - if func is not None: - m = "TypeError: %s argument after ** must be a mapping, not %s" \ - % (func.name.value, array) - analysis.add(evaluator, 'type-error-star-star', input_node, message=m) - return dict(dct) - - -def _error_argument_count(func, actual_count): - default_arguments = sum(1 for p in func.params if p.default or p.stars) - - if default_arguments == 0: - before = 'exactly ' - else: - before = 'from %s to ' % (len(func.params) - default_arguments) - return ('TypeError: %s() takes %s%s arguments (%s given).' - % (func.name, before, len(func.params), actual_count)) diff --git a/pythonFiles/preview/jedi/evaluate/pep0484.py b/pythonFiles/preview/jedi/evaluate/pep0484.py deleted file mode 100644 index 2387fe64a1e1..000000000000 --- a/pythonFiles/preview/jedi/evaluate/pep0484.py +++ /dev/null @@ -1,195 +0,0 @@ -""" -PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints -through function annotations. There is a strong suggestion in this document -that only the type of type hinting defined in PEP0484 should be allowed -as annotations in future python versions. - -The (initial / probably incomplete) implementation todo list for pep-0484: -v Function parameter annotations with builtin/custom type classes -v Function returntype annotations with builtin/custom type classes -v Function parameter annotations with strings (forward reference) -v Function return type annotations with strings (forward reference) -v Local variable type hints -v Assigned types: `Url = str\ndef get(url:Url) -> str:` -v Type hints in `with` statements -x Stub files support -x support `@no_type_check` and `@no_type_check_decorator` -x support for typing.cast() operator -x support for type hint comments for functions, `# type: (int, str) -> int`. - See comment from Guido https://github.com/davidhalter/jedi/issues/662 -""" - -import itertools - -import os -from jedi.parser import \ - Parser, load_grammar, ParseError, ParserWithRecovery, tree -from jedi.evaluate.cache import memoize_default -from jedi.common import unite -from jedi.evaluate import compiled -from jedi import debug -from jedi import _compatibility -import re - - -def _evaluate_for_annotation(evaluator, annotation, index=None): - """ - Evaluates a string-node, looking for an annotation - If index is not None, the annotation is expected to be a tuple - and we're interested in that index - """ - if annotation is not None: - definitions = evaluator.eval_element( - _fix_forward_reference(evaluator, annotation)) - if index is not None: - definitions = list(itertools.chain.from_iterable( - definition.py__getitem__(index) for definition in definitions - if definition.type == 'tuple' and - len(list(definition.py__iter__())) >= index)) - return list(itertools.chain.from_iterable( - evaluator.execute(d) for d in definitions)) - else: - return [] - - -def _fix_forward_reference(evaluator, node): - evaled_nodes = evaluator.eval_element(node) - if len(evaled_nodes) != 1: - debug.warning("Eval'ed typing index %s should lead to 1 object, " - " not %s" % (node, evaled_nodes)) - return node - evaled_node = list(evaled_nodes)[0] - if isinstance(evaled_node, compiled.CompiledObject) and \ - isinstance(evaled_node.obj, str): - try: - p = Parser(load_grammar(), _compatibility.unicode(evaled_node.obj), - start_symbol='eval_input') - newnode = p.get_parsed_node() - except ParseError: - debug.warning('Annotation not parsed: %s' % evaled_node.obj) - return node - else: - module = node.get_parent_until() - p.position_modifier.line = module.end_pos[0] - newnode.parent = module - return newnode - else: - return node - - -@memoize_default(None, evaluator_is_first_arg=True) -def follow_param(evaluator, param): - annotation = param.annotation() - return _evaluate_for_annotation(evaluator, annotation) - - -@memoize_default(None, evaluator_is_first_arg=True) -def find_return_types(evaluator, func): - annotation = func.py__annotations__().get("return", None) - return _evaluate_for_annotation(evaluator, annotation) - - -_typing_module = None - - -def _get_typing_replacement_module(): - """ - The idea is to return our jedi replacement for the PEP-0484 typing module - as discussed at https://github.com/davidhalter/jedi/issues/663 - """ - global _typing_module - if _typing_module is None: - typing_path = \ - os.path.abspath(os.path.join(__file__, "../jedi_typing.py")) - with open(typing_path) as f: - code = _compatibility.unicode(f.read()) - p = ParserWithRecovery(load_grammar(), code) - _typing_module = p.module - return _typing_module - - -def get_types_for_typing_module(evaluator, typ, node): - from jedi.evaluate.iterable import FakeSequence - if not typ.base.get_parent_until().name.value == "typing": - return None - # we assume that any class using [] in a module called - # "typing" with a name for which we have a replacement - # should be replaced by that class. This is not 100% - # airtight but I don't have a better idea to check that it's - # actually the PEP-0484 typing module and not some other - if tree.is_node(node, "subscriptlist"): - nodes = node.children[::2] # skip the commas - else: - nodes = [node] - del node - - nodes = [_fix_forward_reference(evaluator, node) for node in nodes] - - # hacked in Union and Optional, since it's hard to do nicely in parsed code - if typ.name.value == "Union": - return unite(evaluator.eval_element(node) for node in nodes) - if typ.name.value == "Optional": - return evaluator.eval_element(nodes[0]) - - typing = _get_typing_replacement_module() - factories = evaluator.find_types(typing, "factory") - assert len(factories) == 1 - factory = list(factories)[0] - assert factory - function_body_nodes = factory.children[4].children - valid_classnames = set(child.name.value - for child in function_body_nodes - if isinstance(child, tree.Class)) - if typ.name.value not in valid_classnames: - return None - compiled_classname = compiled.create(evaluator, typ.name.value) - - args = FakeSequence(evaluator, nodes, "tuple") - - result = evaluator.execute_evaluated(factory, compiled_classname, args) - return result - - -def find_type_from_comment_hint_for(evaluator, node, name): - return \ - _find_type_from_comment_hint(evaluator, node, node.children[1], name) - - -def find_type_from_comment_hint_with(evaluator, node, name): - assert len(node.children[1].children) == 3, \ - "Can only be here when children[1] is 'foo() as f'" - return _find_type_from_comment_hint( - evaluator, node, node.children[1].children[2], name) - - -def find_type_from_comment_hint_assign(evaluator, node, name): - return \ - _find_type_from_comment_hint(evaluator, node, node.children[0], name) - - -def _find_type_from_comment_hint(evaluator, node, varlist, name): - index = None - if varlist.type in ("testlist_star_expr", "exprlist"): - # something like "a, b = 1, 2" - index = 0 - for child in varlist.children: - if child == name: - break - if child.type == "operator": - continue - index += 1 - else: - return [] - - comment = node.get_following_comment_same_line() - if comment is None: - return [] - match = re.match(r"^#\s*type:\s*([^#]*)", comment) - if not match: - return [] - annotation = tree.String( - tree.zero_position_modifier, - repr(str(match.group(1).strip())), - node.start_pos) - annotation.parent = node.parent - return _evaluate_for_annotation(evaluator, annotation, index) diff --git a/pythonFiles/preview/jedi/evaluate/precedence.py b/pythonFiles/preview/jedi/evaluate/precedence.py deleted file mode 100644 index 5225aa6818ff..000000000000 --- a/pythonFiles/preview/jedi/evaluate/precedence.py +++ /dev/null @@ -1,178 +0,0 @@ -""" -Handles operator precedence. -""" -import operator - -from jedi._compatibility import unicode -from jedi.parser import tree -from jedi import debug -from jedi.evaluate.compiled import CompiledObject, create, builtin_from_name -from jedi.evaluate import analysis - -# Maps Python syntax to the operator module. -COMPARISON_OPERATORS = { - '==': operator.eq, - '!=': operator.ne, - 'is': operator.is_, - 'is not': operator.is_not, - '<': operator.lt, - '<=': operator.le, - '>': operator.gt, - '>=': operator.ge, -} - - -def literals_to_types(evaluator, result): - # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), - # int(), float(), etc). - new_result = set() - for typ in result: - if is_literal(typ): - # Literals are only valid as long as the operations are - # correct. Otherwise add a value-free instance. - cls = builtin_from_name(evaluator, typ.name.value) - new_result |= evaluator.execute(cls) - else: - new_result.add(typ) - return new_result - - -def calculate_children(evaluator, children): - """ - Calculate a list of children with operators. - """ - iterator = iter(children) - types = evaluator.eval_element(next(iterator)) - for operator in iterator: - right = next(iterator) - if tree.is_node(operator, 'comp_op'): # not in / is not - operator = ' '.join(str(c.value) for c in operator.children) - - # handle lazy evaluation of and/or here. - if operator in ('and', 'or'): - left_bools = set([left.py__bool__() for left in types]) - if left_bools == set([True]): - if operator == 'and': - types = evaluator.eval_element(right) - elif left_bools == set([False]): - if operator != 'and': - types = evaluator.eval_element(right) - # Otherwise continue, because of uncertainty. - else: - types = calculate(evaluator, types, operator, - evaluator.eval_element(right)) - debug.dbg('calculate_children types %s', types) - return types - - -def calculate(evaluator, left_result, operator, right_result): - result = set() - if not left_result or not right_result: - # illegal slices e.g. cause left/right_result to be None - result = (left_result or set()) | (right_result or set()) - result = literals_to_types(evaluator, result) - else: - # I don't think there's a reasonable chance that a string - # operation is still correct, once we pass something like six - # objects. - if len(left_result) * len(right_result) > 6: - result = literals_to_types(evaluator, left_result | right_result) - else: - for left in left_result: - for right in right_result: - result |= _element_calculate(evaluator, left, operator, right) - return result - - -def factor_calculate(evaluator, types, operator): - """ - Calculates `+`, `-`, `~` and `not` prefixes. - """ - for typ in types: - if operator == '-': - if _is_number(typ): - yield create(evaluator, -typ.obj) - elif operator == 'not': - value = typ.py__bool__() - if value is None: # Uncertainty. - return - yield create(evaluator, not value) - else: - yield typ - - -def _is_number(obj): - return isinstance(obj, CompiledObject) \ - and isinstance(obj.obj, (int, float)) - - -def is_string(obj): - return isinstance(obj, CompiledObject) \ - and isinstance(obj.obj, (str, unicode)) - - -def is_literal(obj): - return _is_number(obj) or is_string(obj) - - -def _is_tuple(obj): - from jedi.evaluate import iterable - return isinstance(obj, iterable.Array) and obj.type == 'tuple' - - -def _is_list(obj): - from jedi.evaluate import iterable - return isinstance(obj, iterable.Array) and obj.type == 'list' - - -def _element_calculate(evaluator, left, operator, right): - from jedi.evaluate import iterable, representation as er - l_is_num = _is_number(left) - r_is_num = _is_number(right) - if operator == '*': - # for iterables, ignore * operations - if isinstance(left, iterable.Array) or is_string(left): - return set([left]) - elif isinstance(right, iterable.Array) or is_string(right): - return set([right]) - elif operator == '+': - if l_is_num and r_is_num or is_string(left) and is_string(right): - return set([create(evaluator, left.obj + right.obj)]) - elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): - return set([iterable.MergedArray(evaluator, (left, right))]) - elif operator == '-': - if l_is_num and r_is_num: - return set([create(evaluator, left.obj - right.obj)]) - elif operator == '%': - # With strings and numbers the left type typically remains. Except for - # `int() % float()`. - return set([left]) - elif operator in COMPARISON_OPERATORS: - operation = COMPARISON_OPERATORS[operator] - if isinstance(left, CompiledObject) and isinstance(right, CompiledObject): - # Possible, because the return is not an option. Just compare. - left = left.obj - right = right.obj - - try: - result = operation(left, right) - except TypeError: - # Could be True or False. - return set([create(evaluator, True), create(evaluator, False)]) - else: - return set([create(evaluator, result)]) - elif operator == 'in': - return set() - - def check(obj): - """Checks if a Jedi object is either a float or an int.""" - return isinstance(obj, er.Instance) and obj.name.get_code() in ('int', 'float') - - # Static analysis, one is a number, the other one is not. - if operator in ('+', '-') and l_is_num != r_is_num \ - and not (check(left) or check(right)): - message = "TypeError: unsupported operand type(s) for +: %s and %s" - analysis.add(evaluator, 'type-error-operation', operator, - message % (left, right)) - - return set([left, right]) diff --git a/pythonFiles/preview/jedi/evaluate/recursion.py b/pythonFiles/preview/jedi/evaluate/recursion.py deleted file mode 100644 index 9b95fad5adfd..000000000000 --- a/pythonFiles/preview/jedi/evaluate/recursion.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Recursions are the recipe of |jedi| to conquer Python code. However, someone -must stop recursions going mad. Some settings are here to make |jedi| stop at -the right time. You can read more about them :ref:`here `. - -Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not -thread-safe. Why? ``execution_recursion_decorator`` uses class variables to -count the function calls. -""" -from jedi import debug -from jedi import settings -from jedi.evaluate import iterable - - -def recursion_decorator(func): - def run(evaluator, stmt, *args, **kwargs): - rec_detect = evaluator.recursion_detector - if rec_detect.push_stmt(stmt): - return set() - else: - result = func(evaluator, stmt, *args, **kwargs) - rec_detect.pop_stmt() - return result - return run - - -class RecursionDetector(object): - """ - A decorator to detect recursions in statements. In a recursion a statement - at the same place, in the same module may not be executed two times. - """ - def __init__(self, evaluator): - self.top = None - self.current = None - self._evaluator = evaluator - - def push_stmt(self, stmt): - self.current = _RecursionNode(self._evaluator, stmt, self.current) - check = self._check_recursion() - if check: - debug.warning('catched stmt recursion: %s against %s @%s', stmt, - check.stmt, stmt.start_pos) - self.pop_stmt() - return True - return False - - def pop_stmt(self): - if self.current is not None: - # I don't know how current can be None, but sometimes it happens - # with Python3. - self.current = self.current.parent - - def _check_recursion(self): - test = self.current - while True: - test = test.parent - if self.current == test: - return test - if not test: - return False - - def node_statements(self): - result = [] - n = self.current - while n: - result.insert(0, n.stmt) - n = n.parent - return result - - -class _RecursionNode(object): - """ A node of the RecursionDecorator. """ - def __init__(self, evaluator, stmt, parent): - self._evaluator = evaluator - self.script = stmt.get_parent_until() - self.position = stmt.start_pos - self.parent = parent - self.stmt = stmt - - # Don't check param instances, they are not causing recursions - # The same's true for the builtins, because the builtins are really - # simple. - self.is_ignored = self.script == self._evaluator.BUILTINS - - def __eq__(self, other): - if not other: - return None - - return self.script == other.script \ - and self.position == other.position \ - and not self.is_ignored and not other.is_ignored - - -def execution_recursion_decorator(func): - def run(execution, **kwargs): - detector = execution._evaluator.execution_recursion_detector - if detector.push_execution(execution): - result = set() - else: - result = func(execution, **kwargs) - detector.pop_execution() - return result - - return run - - -class ExecutionRecursionDetector(object): - """ - Catches recursions of executions. - """ - def __init__(self, evaluator): - self.recursion_level = 0 - self.parent_execution_funcs = [] - self.execution_funcs = set() - self.execution_count = 0 - self._evaluator = evaluator - - def __call__(self, execution): - debug.dbg('Execution recursions: %s', execution, self.recursion_level, - self.execution_count, len(self.execution_funcs)) - if self.check_recursion(execution): - result = set() - else: - result = self.func(execution) - self.pop_execution() - return result - - def pop_execution(self): - self.parent_execution_funcs.pop() - self.recursion_level -= 1 - - def push_execution(self, execution): - in_par_execution_funcs = execution.base in self.parent_execution_funcs - in_execution_funcs = execution.base in self.execution_funcs - self.recursion_level += 1 - self.execution_count += 1 - self.execution_funcs.add(execution.base) - self.parent_execution_funcs.append(execution.base) - - if self.execution_count > settings.max_executions: - return True - - if isinstance(execution.base, (iterable.Array, iterable.Generator)): - return False - module = execution.get_parent_until() - if module == self._evaluator.BUILTINS: - return False - - if in_par_execution_funcs: - if self.recursion_level > settings.max_function_recursion_level: - return True - if in_execution_funcs and \ - len(self.execution_funcs) > settings.max_until_execution_unique: - return True - if self.execution_count > settings.max_executions_without_builtins: - return True - return False diff --git a/pythonFiles/preview/jedi/evaluate/representation.py b/pythonFiles/preview/jedi/evaluate/representation.py deleted file mode 100644 index 8f3d642cc762..000000000000 --- a/pythonFiles/preview/jedi/evaluate/representation.py +++ /dev/null @@ -1,974 +0,0 @@ -""" -Like described in the :mod:`jedi.parser.tree` module, -there's a need for an ast like module to represent the states of parsed -modules. - -But now there are also structures in Python that need a little bit more than -that. An ``Instance`` for example is only a ``Class`` before it is -instantiated. This class represents these cases. - -So, why is there also a ``Class`` class here? Well, there are decorators and -they change classes in Python 3. - -Representation modules also define "magic methods". Those methods look like -``py__foo__`` and are typically mappable to the Python equivalents ``__call__`` -and others. Here's a list: - -====================================== ======================================== -**Method** **Description** --------------------------------------- ---------------------------------------- -py__call__(params: Array) On callable objects, returns types. -py__bool__() Returns True/False/None; None means that - there's no certainty. -py__bases__() Returns a list of base classes. -py__mro__() Returns a list of classes (the mro). -py__iter__() Returns a generator of a set of types. -py__class__() Returns the class of an instance. -py__getitem__(index: int/str) Returns a a set of types of the index. - Can raise an IndexError/KeyError. -py__file__() Only on modules. -py__package__() Only on modules. For the import system. -py__path__() Only on modules. For the import system. -====================================== ======================================== - -__ -""" -import os -import pkgutil -import imp -import re -from itertools import chain - -from jedi._compatibility import use_metaclass, unicode, Python3Method, is_py3 -from jedi.parser import tree -from jedi import debug -from jedi import common -from jedi.cache import underscore_memoization, cache_star_import -from jedi.evaluate.cache import memoize_default, CachedMetaClass, NO_DEFAULT -from jedi.evaluate import compiled -from jedi.evaluate.compiled import mixed -from jedi.evaluate import recursion -from jedi.evaluate import iterable -from jedi.evaluate import docstrings -from jedi.evaluate import pep0484 -from jedi.evaluate import helpers -from jedi.evaluate import param -from jedi.evaluate import flow_analysis -from jedi.evaluate import imports - - -class Executed(tree.Base): - """ - An instance is also an executable - because __init__ is called - :param var_args: The param input array, consist of a parser node or a list. - """ - def __init__(self, evaluator, base, var_args=()): - self._evaluator = evaluator - self.base = base - self.var_args = var_args - - def is_scope(self): - return True - - def get_parent_until(self, *args, **kwargs): - return tree.Base.get_parent_until(self, *args, **kwargs) - - @common.safe_property - def parent(self): - return self.base.parent - - -class Instance(use_metaclass(CachedMetaClass, Executed)): - """ - This class is used to evaluate instances. - """ - def __init__(self, evaluator, base, var_args, is_generated=False): - super(Instance, self).__init__(evaluator, base, var_args) - self.decorates = None - # Generated instances are classes that are just generated by self - # (No var_args) used. - self.is_generated = is_generated - - if base.name.get_code() in ['list', 'set'] \ - and evaluator.BUILTINS == base.get_parent_until(): - # compare the module path with the builtin name. - self.var_args = iterable.check_array_instances(evaluator, self) - elif not is_generated: - # Need to execute the __init__ function, because the dynamic param - # searching needs it. - try: - method = self.get_subscope_by_name('__init__') - except KeyError: - pass - else: - evaluator.execute(method, self.var_args) - - def is_class(self): - return False - - @property - def py__call__(self): - def actual(params): - return self._evaluator.execute(method, params) - - try: - method = self.get_subscope_by_name('__call__') - except KeyError: - # Means the Instance is not callable. - raise AttributeError - - return actual - - def py__class__(self): - return self.base - - def py__bool__(self): - # Signalize that we don't know about the bool type. - return None - - @memoize_default() - def _get_method_execution(self, func): - func = get_instance_el(self._evaluator, self, func, True) - return FunctionExecution(self._evaluator, func, self.var_args) - - def _get_func_self_name(self, func): - """ - Returns the name of the first param in a class method (which is - normally self. - """ - try: - return str(func.params[0].name) - except IndexError: - return None - - def _self_names_dict(self, add_mro=True): - names = {} - # This loop adds the names of the self object, copies them and removes - # the self. - for sub in self.base.subscopes: - if isinstance(sub, tree.Class): - continue - # Get the self name, if there's one. - self_name = self._get_func_self_name(sub) - if self_name is None: - continue - - if sub.name.value == '__init__' and not self.is_generated: - # ``__init__`` is special because the params need are injected - # this way. Therefore an execution is necessary. - if not sub.get_decorators(): - # __init__ decorators should generally just be ignored, - # because to follow them and their self variables is too - # complicated. - sub = self._get_method_execution(sub) - for name_list in sub.names_dict.values(): - for name in name_list: - if name.value == self_name and name.get_previous_sibling() is None: - trailer = name.get_next_sibling() - if tree.is_node(trailer, 'trailer') \ - and len(trailer.children) == 2 \ - and trailer.children[0] == '.': - name = trailer.children[1] # After dot. - if name.is_definition(): - arr = names.setdefault(name.value, []) - arr.append(get_instance_el(self._evaluator, self, name)) - return names - - def get_subscope_by_name(self, name): - sub = self.base.get_subscope_by_name(name) - return get_instance_el(self._evaluator, self, sub, True) - - def execute_subscope_by_name(self, name, *args): - method = self.get_subscope_by_name(name) - return self._evaluator.execute_evaluated(method, *args) - - def get_descriptor_returns(self, obj): - """ Throws a KeyError if there's no method. """ - # Arguments in __get__ descriptors are obj, class. - # `method` is the new parent of the array, don't know if that's good. - none_obj = compiled.create(self._evaluator, None) - args = [obj, obj.base] if isinstance(obj, Instance) else [none_obj, obj] - try: - return self.execute_subscope_by_name('__get__', *args) - except KeyError: - return set([self]) - - @memoize_default() - def names_dicts(self, search_global): - yield self._self_names_dict() - - for s in self.base.py__mro__()[1:]: - if not isinstance(s, compiled.CompiledObject): - # Compiled objects don't have `self.` names. - for inst in self._evaluator.execute(s): - yield inst._self_names_dict(add_mro=False) - - for names_dict in self.base.names_dicts(search_global=False, is_instance=True): - yield LazyInstanceDict(self._evaluator, self, names_dict) - - def py__getitem__(self, index): - try: - method = self.get_subscope_by_name('__getitem__') - except KeyError: - debug.warning('No __getitem__, cannot access the array.') - return set() - else: - index_obj = compiled.create(self._evaluator, index) - return self._evaluator.execute_evaluated(method, index_obj) - - def py__iter__(self): - try: - method = self.get_subscope_by_name('__iter__') - except KeyError: - debug.warning('No __iter__ on %s.' % self) - return - else: - iters = self._evaluator.execute(method) - for generator in iters: - if isinstance(generator, Instance): - # `__next__` logic. - name = '__next__' if is_py3 else 'next' - try: - yield generator.execute_subscope_by_name(name) - except KeyError: - debug.warning('Instance has no __next__ function in %s.', generator) - else: - for typ in generator.py__iter__(): - yield typ - - @property - @underscore_memoization - def name(self): - name = self.base.name - return helpers.FakeName(unicode(name), self, name.start_pos) - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'get_imports', 'type', - 'doc', 'raw_doc']: - raise AttributeError("Instance %s: Don't touch this (%s)!" - % (self, name)) - return getattr(self.base, name) - - def __repr__(self): - dec = '' - if self.decorates is not None: - dec = " decorates " + repr(self.decorates) - return "<%s of %s(%s)%s>" % (type(self).__name__, self.base, - self.var_args, dec) - - -class LazyInstanceDict(object): - def __init__(self, evaluator, instance, dct): - self._evaluator = evaluator - self._instance = instance - self._dct = dct - - def __getitem__(self, name): - return [get_instance_el(self._evaluator, self._instance, var, True) - for var in self._dct[name]] - - def values(self): - return [self[key] for key in self._dct] - - -class InstanceName(tree.Name): - def __init__(self, origin_name, parent): - super(InstanceName, self).__init__(tree.zero_position_modifier, - origin_name.value, - origin_name.start_pos) - self._origin_name = origin_name - self.parent = parent - - def is_definition(self): - return self._origin_name.is_definition() - - -def get_instance_el(evaluator, instance, var, is_class_var=False): - """ - Returns an InstanceElement if it makes sense, otherwise leaves the object - untouched. - - Basically having an InstanceElement is context information. That is needed - in quite a lot of cases, which includes Nodes like ``power``, that need to - know where a self name comes from for example. - """ - if isinstance(var, tree.Name): - parent = get_instance_el(evaluator, instance, var.parent, is_class_var) - return InstanceName(var, parent) - elif var.type != 'funcdef' \ - and isinstance(var, (Instance, compiled.CompiledObject, tree.Leaf, - tree.Module, FunctionExecution)): - return var - - var = evaluator.wrap(var) - return InstanceElement(evaluator, instance, var, is_class_var) - - -class InstanceElement(use_metaclass(CachedMetaClass, tree.Base)): - """ - InstanceElement is a wrapper for any object, that is used as an instance - variable (e.g. self.variable or class methods). - """ - def __init__(self, evaluator, instance, var, is_class_var): - self._evaluator = evaluator - self.instance = instance - self.var = var - self.is_class_var = is_class_var - - @common.safe_property - @memoize_default() - def parent(self): - par = self.var.parent - if isinstance(par, Class) and par == self.instance.base \ - or isinstance(par, tree.Class) \ - and par == self.instance.base.base: - par = self.instance - else: - par = get_instance_el(self._evaluator, self.instance, par, - self.is_class_var) - return par - - def get_parent_until(self, *args, **kwargs): - return tree.BaseNode.get_parent_until(self, *args, **kwargs) - - def get_definition(self): - return self.get_parent_until((tree.ExprStmt, tree.IsScope, tree.Import)) - - def get_decorated_func(self): - """ Needed because the InstanceElement should not be stripped """ - func = self.var.get_decorated_func() - func = get_instance_el(self._evaluator, self.instance, func) - return func - - def get_rhs(self): - return get_instance_el(self._evaluator, self.instance, - self.var.get_rhs(), self.is_class_var) - - def is_definition(self): - return self.var.is_definition() - - @property - def children(self): - # Copy and modify the array. - return [get_instance_el(self._evaluator, self.instance, command, self.is_class_var) - for command in self.var.children] - - @property - @memoize_default() - def name(self): - name = self.var.name - return helpers.FakeName(unicode(name), self, name.start_pos) - - def __iter__(self): - for el in self.var.__iter__(): - yield get_instance_el(self._evaluator, self.instance, el, - self.is_class_var) - - def __getitem__(self, index): - return get_instance_el(self._evaluator, self.instance, self.var[index], - self.is_class_var) - - def __getattr__(self, name): - return getattr(self.var, name) - - def isinstance(self, *cls): - return isinstance(self.var, cls) - - def is_scope(self): - """ - Since we inherit from Base, it would overwrite the action we want here. - """ - return self.var.is_scope() - - def py__call__(self, params): - if isinstance(self.var, compiled.CompiledObject): - # This check is a bit strange, but CompiledObject itself is a bit - # more complicated than we would it actually like to be. - return self.var.py__call__(params) - else: - return Function.py__call__(self, params) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.var) - - -class Wrapper(tree.Base): - def is_scope(self): - return True - - def is_class(self): - return False - - def py__bool__(self): - """ - Since Wrapper is a super class for classes, functions and modules, - the return value will always be true. - """ - return True - - @property - @underscore_memoization - def name(self): - name = self.base.name - return helpers.FakeName(unicode(name), self, name.start_pos) - - -class Class(use_metaclass(CachedMetaClass, Wrapper)): - """ - This class is not only important to extend `tree.Class`, it is also a - important for descriptors (if the descriptor methods are evaluated or not). - """ - def __init__(self, evaluator, base): - self._evaluator = evaluator - self.base = base - - @memoize_default(default=()) - def py__mro__(self): - def add(cls): - if cls not in mro: - mro.append(cls) - - mro = [self] - # TODO Do a proper mro resolution. Currently we are just listing - # classes. However, it's a complicated algorithm. - for cls in self.py__bases__(): - # TODO detect for TypeError: duplicate base class str, - # e.g. `class X(str, str): pass` - try: - mro_method = cls.py__mro__ - except AttributeError: - # TODO add a TypeError like: - """ - >>> class Y(lambda: test): pass - Traceback (most recent call last): - File "", line 1, in - TypeError: function() argument 1 must be code, not str - >>> class Y(1): pass - Traceback (most recent call last): - File "", line 1, in - TypeError: int() takes at most 2 arguments (3 given) - """ - pass - else: - add(cls) - for cls_new in mro_method(): - add(cls_new) - return tuple(mro) - - @memoize_default(default=()) - def py__bases__(self): - arglist = self.base.get_super_arglist() - if arglist: - args = param.Arguments(self._evaluator, arglist) - return list(chain.from_iterable(args.eval_args())) - else: - return [compiled.create(self._evaluator, object)] - - def py__call__(self, params): - return set([Instance(self._evaluator, self, params)]) - - def py__class__(self): - return compiled.create(self._evaluator, type) - - @property - def params(self): - try: - return self.get_subscope_by_name('__init__').params - except KeyError: - return [] # object.__init__ - - def names_dicts(self, search_global, is_instance=False): - if search_global: - yield self.names_dict - else: - for scope in self.py__mro__(): - if isinstance(scope, compiled.CompiledObject): - yield scope.names_dicts(False, is_instance)[0] - else: - yield scope.names_dict - - def is_class(self): - return True - - def get_subscope_by_name(self, name): - for s in self.py__mro__(): - for sub in reversed(s.subscopes): - if sub.name.value == name: - return sub - raise KeyError("Couldn't find subscope.") - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'parent', 'raw_doc', - 'doc', 'get_imports', 'get_parent_until', 'get_code', - 'subscopes', 'names_dict', 'type']: - raise AttributeError("Don't touch this: %s of %s !" % (name, self)) - return getattr(self.base, name) - - def __repr__(self): - return "" % (type(self).__name__, self.base) - - -class Function(use_metaclass(CachedMetaClass, Wrapper)): - """ - Needed because of decorators. Decorators are evaluated here. - """ - def __init__(self, evaluator, func, is_decorated=False): - """ This should not be called directly """ - self._evaluator = evaluator - self.base = self.base_func = func - self.is_decorated = is_decorated - # A property that is set by the decorator resolution. - self.decorates = None - - @memoize_default() - def get_decorated_func(self): - """ - Returns the function, that should to be executed in the end. - This is also the places where the decorators are processed. - """ - f = self.base_func - decorators = self.base_func.get_decorators() - - if not decorators or self.is_decorated: - return self - - # Only enter it, if has not already been processed. - if not self.is_decorated: - for dec in reversed(decorators): - debug.dbg('decorator: %s %s', dec, f) - dec_results = self._evaluator.eval_element(dec.children[1]) - trailer = dec.children[2:-1] - if trailer: - # Create a trailer and evaluate it. - trailer = tree.Node('trailer', trailer) - trailer.parent = dec - dec_results = self._evaluator.eval_trailer(dec_results, trailer) - - if not len(dec_results): - debug.warning('decorator not found: %s on %s', dec, self.base_func) - return self - decorator = dec_results.pop() - if dec_results: - debug.warning('multiple decorators found %s %s', - self.base_func, dec_results) - - # Create param array. - if isinstance(f, Function): - old_func = f # TODO this is just hacky. change. - elif f.type == 'funcdef': - old_func = Function(self._evaluator, f, is_decorated=True) - else: - old_func = f - - wrappers = self._evaluator.execute_evaluated(decorator, old_func) - if not len(wrappers): - debug.warning('no wrappers found %s', self.base_func) - return self - if len(wrappers) > 1: - # TODO resolve issue with multiple wrappers -> multiple types - debug.warning('multiple wrappers found %s %s', - self.base_func, wrappers) - f = list(wrappers)[0] - if isinstance(f, (Instance, Function)): - f.decorates = self - - debug.dbg('decorator end %s', f) - return f - - def names_dicts(self, search_global): - if search_global: - yield self.names_dict - else: - scope = self.py__class__() - for names_dict in scope.names_dicts(False): - yield names_dict - - @Python3Method - def py__call__(self, params): - if self.base.is_generator(): - return set([iterable.Generator(self._evaluator, self, params)]) - else: - return FunctionExecution(self._evaluator, self, params).get_return_types() - - @memoize_default() - def py__annotations__(self): - parser_func = self.base - return_annotation = parser_func.annotation() - if return_annotation: - dct = {'return': return_annotation} - else: - dct = {} - for function_param in parser_func.params: - param_annotation = function_param.annotation() - if param_annotation is not None: - dct[function_param.name.value] = param_annotation - return dct - - def py__class__(self): - # This differentiation is only necessary for Python2. Python3 does not - # use a different method class. - if isinstance(self.base.get_parent_scope(), tree.Class): - name = 'METHOD_CLASS' - else: - name = 'FUNCTION_CLASS' - return compiled.get_special_object(self._evaluator, name) - - def __getattr__(self, name): - return getattr(self.base_func, name) - - def __repr__(self): - dec = '' - if self.decorates is not None: - dec = " decorates " + repr(self.decorates) - return "" % (type(self).__name__, self.base_func, dec) - - -class LambdaWrapper(Function): - def get_decorated_func(self): - return self - - -class FunctionExecution(Executed): - """ - This class is used to evaluate functions and their returns. - - This is the most complicated class, because it contains the logic to - transfer parameters. It is even more complicated, because there may be - multiple calls to functions and recursion has to be avoided. But this is - responsibility of the decorators. - """ - type = 'funcdef' - - def __init__(self, evaluator, base, *args, **kwargs): - super(FunctionExecution, self).__init__(evaluator, base, *args, **kwargs) - self._copy_dict = {} - funcdef = base.base_func - if isinstance(funcdef, mixed.MixedObject): - # The extra information in mixed is not needed anymore. We can just - # unpack it and give it the tree object. - funcdef = funcdef.definition - - # Just overwrite the old version. We don't need it anymore. - funcdef = helpers.deep_ast_copy(funcdef, new_elements=self._copy_dict) - for child in funcdef.children: - if child.type not in ('operator', 'keyword'): - # Not all nodes are properly copied by deep_ast_copy. - child.parent = self - self.children = funcdef.children - self.names_dict = funcdef.names_dict - - @memoize_default(default=set()) - @recursion.execution_recursion_decorator - def get_return_types(self, check_yields=False): - func = self.base - - if func.isinstance(LambdaWrapper): - return self._evaluator.eval_element(self.children[-1]) - - if func.listeners: - # Feed the listeners, with the params. - for listener in func.listeners: - listener.execute(self._get_params()) - # If we do have listeners, that means that there's not a regular - # execution ongoing. In this case Jedi is interested in the - # inserted params, not in the actual execution of the function. - return set() - - if check_yields: - types = set() - returns = self.yields - else: - returns = self.returns - types = set(docstrings.find_return_types(self._evaluator, func)) - types |= set(pep0484.find_return_types(self._evaluator, func)) - - for r in returns: - check = flow_analysis.break_check(self._evaluator, self, r) - if check is flow_analysis.UNREACHABLE: - debug.dbg('Return unreachable: %s', r) - else: - if check_yields: - types |= iterable.unite(self._eval_yield(r)) - else: - types |= self._evaluator.eval_element(r.children[1]) - if check is flow_analysis.REACHABLE: - debug.dbg('Return reachable: %s', r) - break - return types - - def _eval_yield(self, yield_expr): - element = yield_expr.children[1] - if element.type == 'yield_arg': - # It must be a yield from. - yield_from_types = self._evaluator.eval_element(element.children[1]) - for result in iterable.py__iter__(self._evaluator, yield_from_types, element): - yield result - else: - yield self._evaluator.eval_element(element) - - @recursion.execution_recursion_decorator - def get_yield_types(self): - yields = self.yields - stopAt = tree.ForStmt, tree.WhileStmt, FunctionExecution, tree.IfStmt - for_parents = [(x, x.get_parent_until((stopAt))) for x in yields] - - # Calculate if the yields are placed within the same for loop. - yields_order = [] - last_for_stmt = None - for yield_, for_stmt in for_parents: - # For really simple for loops we can predict the order. Otherwise - # we just ignore it. - parent = for_stmt.parent - if parent.type == 'suite': - parent = parent.parent - if for_stmt.type == 'for_stmt' and parent == self \ - and for_stmt.defines_one_name(): # Simplicity for now. - if for_stmt == last_for_stmt: - yields_order[-1][1].append(yield_) - else: - yields_order.append((for_stmt, [yield_])) - elif for_stmt == self: - yields_order.append((None, [yield_])) - else: - yield self.get_return_types(check_yields=True) - return - last_for_stmt = for_stmt - - evaluator = self._evaluator - for for_stmt, yields in yields_order: - if for_stmt is None: - # No for_stmt, just normal yields. - for yield_ in yields: - for result in self._eval_yield(yield_): - yield result - else: - input_node = for_stmt.get_input_node() - for_types = evaluator.eval_element(input_node) - ordered = iterable.py__iter__(evaluator, for_types, input_node) - for index_types in ordered: - dct = {str(for_stmt.children[1]): index_types} - evaluator.predefined_if_name_dict_dict[for_stmt] = dct - for yield_in_same_for_stmt in yields: - for result in self._eval_yield(yield_in_same_for_stmt): - yield result - del evaluator.predefined_if_name_dict_dict[for_stmt] - - def names_dicts(self, search_global): - yield self.names_dict - - @memoize_default(default=NO_DEFAULT) - def _get_params(self): - """ - This returns the params for an TODO and is injected as a - 'hack' into the tree.Function class. - This needs to be here, because Instance can have __init__ functions, - which act the same way as normal functions. - """ - return param.get_params(self._evaluator, self.base, self.var_args) - - def param_by_name(self, name): - return [n for n in self._get_params() if str(n) == name][0] - - def name_for_position(self, position): - return tree.Function.name_for_position(self, position) - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'imports', 'name', 'type']: - raise AttributeError('Tried to access %s: %s. Why?' % (name, self)) - return getattr(self.base, name) - - @common.safe_property - @memoize_default() - def returns(self): - return tree.Scope._search_in_scope(self, tree.ReturnStmt) - - @common.safe_property - @memoize_default() - def yields(self): - return tree.Scope._search_in_scope(self, tree.YieldExpr) - - @common.safe_property - @memoize_default() - def statements(self): - return tree.Scope._search_in_scope(self, tree.ExprStmt) - - @common.safe_property - @memoize_default() - def subscopes(self): - return tree.Scope._search_in_scope(self, tree.Scope) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.base) - - -class GlobalName(helpers.FakeName): - def __init__(self, name): - """ - We need to mark global names somehow. Otherwise they are just normal - names that are not definitions. - """ - super(GlobalName, self).__init__(name.value, name.parent, - name.start_pos, is_definition=True) - - -class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)): - def __init__(self, evaluator, module, parent_module=None): - self._evaluator = evaluator - self.base = self._module = module - self._parent_module = parent_module - - def names_dicts(self, search_global): - yield self.base.names_dict - yield self._module_attributes_dict() - - for star_module in self.star_imports(): - yield star_module.names_dict - - yield dict((str(n), [GlobalName(n)]) for n in self.base.global_names) - yield self._sub_modules_dict() - - # I'm not sure if the star import cache is really that effective anymore - # with all the other really fast import caches. Recheck. Also we would need - # to push the star imports into Evaluator.modules, if we reenable this. - #@cache_star_import - @memoize_default([]) - def star_imports(self): - modules = [] - for i in self.base.imports: - if i.is_star_import(): - name = i.star_import_name() - new = imports.ImportWrapper(self._evaluator, name).follow() - for module in new: - if isinstance(module, tree.Module): - modules += module.star_imports() - modules += new - return modules - - @memoize_default() - def _module_attributes_dict(self): - def parent_callback(): - # Create a string type object (without a defined string in it): - return list(self._evaluator.execute(compiled.create(self._evaluator, str)))[0] - - names = ['__file__', '__package__', '__doc__', '__name__'] - # All the additional module attributes are strings. - return dict((n, [helpers.LazyName(n, parent_callback, is_definition=True)]) - for n in names) - - @property - @memoize_default() - def name(self): - return helpers.FakeName(unicode(self.base.name), self, (1, 0)) - - def _get_init_directory(self): - """ - :return: The path to the directory of a package. None in case it's not - a package. - """ - for suffix, _, _ in imp.get_suffixes(): - ending = '__init__' + suffix - py__file__ = self.py__file__() - if py__file__ is not None and py__file__.endswith(ending): - # Remove the ending, including the separator. - return self.py__file__()[:-len(ending) - 1] - return None - - def py__name__(self): - for name, module in self._evaluator.modules.items(): - if module == self: - return name - - return '__main__' - - def py__file__(self): - """ - In contrast to Python's __file__ can be None. - """ - if self._module.path is None: - return None - - return os.path.abspath(self._module.path) - - def py__package__(self): - if self._get_init_directory() is None: - return re.sub(r'\.?[^\.]+$', '', self.py__name__()) - else: - return self.py__name__() - - def _py__path__(self): - if self._parent_module is None: - search_path = self._evaluator.sys_path - else: - search_path = self._parent_module.py__path__() - init_path = self.py__file__() - if os.path.basename(init_path) == '__init__.py': - with open(init_path, 'rb') as f: - content = common.source_to_unicode(f.read()) - # these are strings that need to be used for namespace packages, - # the first one is ``pkgutil``, the second ``pkg_resources``. - options = ('declare_namespace(__name__)', 'extend_path(__path__') - if options[0] in content or options[1] in content: - # It is a namespace, now try to find the rest of the - # modules on sys_path or whatever the search_path is. - paths = set() - for s in search_path: - other = os.path.join(s, unicode(self.name)) - if os.path.isdir(other): - paths.add(other) - return list(paths) - # Default to this. - return [self._get_init_directory()] - - @property - def py__path__(self): - """ - Not seen here, since it's a property. The callback actually uses a - variable, so use it like:: - - foo.py__path__(sys_path) - - In case of a package, this returns Python's __path__ attribute, which - is a list of paths (strings). - Raises an AttributeError if the module is not a package. - """ - path = self._get_init_directory() - - if path is None: - raise AttributeError('Only packages have __path__ attributes.') - else: - return self._py__path__ - - @memoize_default() - def _sub_modules_dict(self): - """ - Lists modules in the directory of this module (if this module is a - package). - """ - path = self._module.path - names = {} - if path is not None and path.endswith(os.path.sep + '__init__.py'): - mods = pkgutil.iter_modules([os.path.dirname(path)]) - for module_loader, name, is_pkg in mods: - fake_n = helpers.FakeName(name) - # It's obviously a relative import to the current module. - imp = helpers.FakeImport(fake_n, self, level=1) - fake_n.parent = imp - names[name] = [fake_n] - - # TODO add something like this in the future, its cleaner than the - # import hacks. - # ``os.path`` is a hardcoded exception, because it's a - # ``sys.modules`` modification. - #if str(self.name) == 'os': - # names.append(helpers.FakeName('path', parent=self)) - - return names - - def py__class__(self): - return compiled.get_special_object(self._evaluator, 'MODULE_CLASS') - - def __getattr__(self, name): - return getattr(self._module, name) - - def __repr__(self): - return "<%s: %s>" % (type(self).__name__, self._module) diff --git a/pythonFiles/preview/jedi/evaluate/stdlib.py b/pythonFiles/preview/jedi/evaluate/stdlib.py deleted file mode 100644 index f0c4346d6852..000000000000 --- a/pythonFiles/preview/jedi/evaluate/stdlib.py +++ /dev/null @@ -1,280 +0,0 @@ -""" -Implementations of standard library functions, because it's not possible to -understand them with Jedi. - -To add a new implementation, create a function and add it to the -``_implemented`` dict at the bottom of this module. - -Note that this module exists only to implement very specific functionality in -the standard library. The usual way to understand the standard library is the -compiled module that returns the types for C-builtins. -""" -import collections -import re - -from jedi._compatibility import unicode -from jedi.common import unite -from jedi.evaluate import compiled -from jedi.evaluate import representation as er -from jedi.evaluate import iterable -from jedi.parser import ParserWithRecovery -from jedi.parser import tree -from jedi import debug -from jedi.evaluate import precedence -from jedi.evaluate import param -from jedi.evaluate import analysis - - -class NotInStdLib(LookupError): - pass - - -def execute(evaluator, obj, arguments): - try: - obj_name = str(obj.name) - except AttributeError: - pass - else: - if obj.parent == evaluator.BUILTINS: - module_name = 'builtins' - elif isinstance(obj.parent, tree.Module): - module_name = str(obj.parent.name) - else: - module_name = '' - - # for now we just support builtin functions. - try: - func = _implemented[module_name][obj_name] - except KeyError: - pass - else: - return func(evaluator, obj, arguments) - raise NotInStdLib() - - -def _follow_param(evaluator, arguments, index): - try: - key, values = list(arguments.unpack())[index] - except IndexError: - return set() - else: - return unite(evaluator.eval_element(v) for v in values) - - -def argument_clinic(string, want_obj=False, want_scope=False, want_arguments=False): - """ - Works like Argument Clinic (PEP 436), to validate function params. - """ - clinic_args = [] - allow_kwargs = False - optional = False - while string: - # Optional arguments have to begin with a bracket. And should always be - # at the end of the arguments. This is therefore not a proper argument - # clinic implementation. `range()` for exmple allows an optional start - # value at the beginning. - match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string) - string = string[len(match.group(0)):] - if not match.group(2): # A slash -> allow named arguments - allow_kwargs = True - continue - optional = optional or bool(match.group(1)) - word = match.group(2) - clinic_args.append((word, optional, allow_kwargs)) - - def f(func): - def wrapper(evaluator, obj, arguments): - debug.dbg('builtin start %s' % obj, color='MAGENTA') - try: - lst = list(arguments.eval_argument_clinic(clinic_args)) - except ValueError: - return set() - else: - kwargs = {} - if want_scope: - kwargs['scope'] = arguments.scope() - if want_obj: - kwargs['obj'] = obj - if want_arguments: - kwargs['arguments'] = arguments - return func(evaluator, *lst, **kwargs) - finally: - debug.dbg('builtin end', color='MAGENTA') - - return wrapper - return f - - -@argument_clinic('object, name[, default], /') -def builtins_getattr(evaluator, objects, names, defaults=None): - # follow the first param - for obj in objects: - if not isinstance(obj, (er.Instance, er.Class, tree.Module, compiled.CompiledObject)): - debug.warning('getattr called without instance') - continue - - for name in names: - if precedence.is_string(name): - return evaluator.find_types(obj, name.obj) - else: - debug.warning('getattr called without str') - continue - return set() - - -@argument_clinic('object[, bases, dict], /') -def builtins_type(evaluator, objects, bases, dicts): - if bases or dicts: - # It's a type creation... maybe someday... - return set() - else: - return set([o.py__class__() for o in objects]) - - -class SuperInstance(er.Instance): - """To be used like the object ``super`` returns.""" - def __init__(self, evaluator, cls): - su = cls.py_mro()[1] - super().__init__(evaluator, su and su[0] or self) - - -@argument_clinic('[type[, obj]], /', want_scope=True) -def builtins_super(evaluator, types, objects, scope): - # TODO make this able to detect multiple inheritance super - accept = (tree.Function, er.FunctionExecution) - if scope.isinstance(*accept): - wanted = (tree.Class, er.Instance) - cls = scope.get_parent_until(accept + wanted, - include_current=False) - if isinstance(cls, wanted): - if isinstance(cls, tree.Class): - cls = er.Class(evaluator, cls) - elif isinstance(cls, er.Instance): - cls = cls.base - su = cls.py__bases__() - if su: - return evaluator.execute(su[0]) - return set() - - -@argument_clinic('sequence, /', want_obj=True, want_arguments=True) -def builtins_reversed(evaluator, sequences, obj, arguments): - # While we could do without this variable (just by using sequences), we - # want static analysis to work well. Therefore we need to generated the - # values again. - first_arg = next(arguments.as_tuple())[0] - ordered = list(iterable.py__iter__(evaluator, sequences, first_arg)) - - rev = [iterable.AlreadyEvaluated(o) for o in reversed(ordered)] - # Repack iterator values and then run it the normal way. This is - # necessary, because `reversed` is a function and autocompletion - # would fail in certain cases like `reversed(x).__iter__` if we - # just returned the result directly. - rev = iterable.AlreadyEvaluated( - [iterable.FakeSequence(evaluator, rev, 'list')] - ) - return set([er.Instance(evaluator, obj, param.Arguments(evaluator, [rev]))]) - - -@argument_clinic('obj, type, /', want_arguments=True) -def builtins_isinstance(evaluator, objects, types, arguments): - bool_results = set([]) - for o in objects: - try: - mro_func = o.py__class__().py__mro__ - except AttributeError: - # This is temporary. Everything should have a class attribute in - # Python?! Maybe we'll leave it here, because some numpy objects or - # whatever might not. - return set([compiled.create(True), compiled.create(False)]) - - mro = mro_func() - - for cls_or_tup in types: - if cls_or_tup.is_class(): - bool_results.add(cls_or_tup in mro) - elif str(cls_or_tup.name) == 'tuple' \ - and cls_or_tup.get_parent_scope() == evaluator.BUILTINS: - # Check for tuples. - classes = unite(cls_or_tup.py__iter__()) - bool_results.add(any(cls in mro for cls in classes)) - else: - _, nodes = list(arguments.unpack())[1] - for node in nodes: - message = 'TypeError: isinstance() arg 2 must be a ' \ - 'class, type, or tuple of classes and types, ' \ - 'not %s.' % cls_or_tup - analysis.add(evaluator, 'type-error-isinstance', node, message) - - return set(compiled.create(evaluator, x) for x in bool_results) - - -def collections_namedtuple(evaluator, obj, arguments): - """ - Implementation of the namedtuple function. - - This has to be done by processing the namedtuple class template and - evaluating the result. - - .. note:: |jedi| only supports namedtuples on Python >2.6. - - """ - # Namedtuples are not supported on Python 2.6 - if not hasattr(collections, '_class_template'): - return set() - - # Process arguments - # TODO here we only use one of the types, we should use all. - name = list(_follow_param(evaluator, arguments, 0))[0].obj - _fields = list(_follow_param(evaluator, arguments, 1))[0] - if isinstance(_fields, compiled.CompiledObject): - fields = _fields.obj.replace(',', ' ').split() - elif isinstance(_fields, iterable.Array): - try: - fields = [v.obj for v in unite(_fields.py__iter__())] - except AttributeError: - return set() - else: - return set() - - # Build source - source = collections._class_template.format( - typename=name, - field_names=fields, - num_fields=len(fields), - arg_list=', '.join(fields), - repr_fmt=', '.join(collections._repr_template.format(name=name) for name in fields), - field_defs='\n'.join(collections._field_template.format(index=index, name=name) - for index, name in enumerate(fields)) - ) - - # Parse source - generated_class = ParserWithRecovery(evaluator.grammar, unicode(source)).module.subscopes[0] - return set([er.Class(evaluator, generated_class)]) - - -@argument_clinic('first, /') -def _return_first_param(evaluator, firsts): - return firsts - - -_implemented = { - 'builtins': { - 'getattr': builtins_getattr, - 'type': builtins_type, - 'super': builtins_super, - 'reversed': builtins_reversed, - 'isinstance': builtins_isinstance, - }, - 'copy': { - 'copy': _return_first_param, - 'deepcopy': _return_first_param, - }, - 'json': { - 'load': lambda *args: set(), - 'loads': lambda *args: set(), - }, - 'collections': { - 'namedtuple': collections_namedtuple, - }, -} diff --git a/pythonFiles/preview/jedi/evaluate/sys_path.py b/pythonFiles/preview/jedi/evaluate/sys_path.py deleted file mode 100644 index 4fbc6acacdc5..000000000000 --- a/pythonFiles/preview/jedi/evaluate/sys_path.py +++ /dev/null @@ -1,283 +0,0 @@ -import glob -import os -import sys -from jedi.evaluate.site import addsitedir - -from jedi._compatibility import exec_function, unicode -from jedi.parser import tree -from jedi.parser import ParserWithRecovery -from jedi.evaluate.cache import memoize_default -from jedi import debug -from jedi import common -from jedi.parser.utils import load_parser, save_parser - - -def get_venv_path(venv): - """Get sys.path for specified virtual environment.""" - sys_path = _get_venv_path_dirs(venv) - with common.ignored(ValueError): - sys_path.remove('') - sys_path = _get_sys_path_with_egglinks(sys_path) - # As of now, get_venv_path_dirs does not scan built-in pythonpath and - # user-local site-packages, let's approximate them using path from Jedi - # interpreter. - return sys_path + sys.path - - -def _get_sys_path_with_egglinks(sys_path): - """Find all paths including those referenced by egg-links. - - Egg-link-referenced directories are inserted into path immediately before - the directory on which their links were found. Such directories are not - taken into consideration by normal import mechanism, but they are traversed - when doing pkg_resources.require. - """ - result = [] - for p in sys_path: - # pkg_resources does not define a specific order for egg-link files - # using os.listdir to enumerate them, we're sorting them to have - # reproducible tests. - for egg_link in sorted(glob.glob(os.path.join(p, '*.egg-link'))): - with open(egg_link) as fd: - for line in fd: - line = line.strip() - if line: - result.append(os.path.join(p, line)) - # pkg_resources package only interprets the first - # non-empty line in egg-link files. - break - result.append(p) - return result - - -def _get_venv_path_dirs(venv): - """Get sys.path for venv without starting up the interpreter.""" - venv = os.path.abspath(venv) - sitedir = _get_venv_sitepackages(venv) - sys_path = [] - addsitedir(sys_path, sitedir) - return sys_path - - -def _get_venv_sitepackages(venv): - if os.name == 'nt': - p = os.path.join(venv, 'lib', 'site-packages') - else: - p = os.path.join(venv, 'lib', 'python%d.%d' % sys.version_info[:2], - 'site-packages') - return p - - -def _execute_code(module_path, code): - c = "import os; from os.path import *; result=%s" - variables = {'__file__': module_path} - try: - exec_function(c % code, variables) - except Exception: - debug.warning('sys.path manipulation detected, but failed to evaluate.') - else: - try: - res = variables['result'] - if isinstance(res, str): - return [os.path.abspath(res)] - except KeyError: - pass - return [] - - -def _paths_from_assignment(evaluator, expr_stmt): - """ - Extracts the assigned strings from an assignment that looks as follows:: - - >>> sys.path[0:0] = ['module/path', 'another/module/path'] - - This function is in general pretty tolerant (and therefore 'buggy'). - However, it's not a big issue usually to add more paths to Jedi's sys_path, - because it will only affect Jedi in very random situations and by adding - more paths than necessary, it usually benefits the general user. - """ - for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]): - try: - assert operator in ['=', '+='] - assert tree.is_node(assignee, 'power', 'atom_expr') and \ - len(assignee.children) > 1 - c = assignee.children - assert c[0].type == 'name' and c[0].value == 'sys' - trailer = c[1] - assert trailer.children[0] == '.' and trailer.children[1].value == 'path' - # TODO Essentially we're not checking details on sys.path - # manipulation. Both assigment of the sys.path and changing/adding - # parts of the sys.path are the same: They get added to the current - # sys.path. - """ - execution = c[2] - assert execution.children[0] == '[' - subscript = execution.children[1] - assert subscript.type == 'subscript' - assert ':' in subscript.children - """ - except AssertionError: - continue - - from jedi.evaluate.iterable import py__iter__ - from jedi.evaluate.precedence import is_string - types = evaluator.eval_element(expr_stmt) - for types in py__iter__(evaluator, types, expr_stmt): - for typ in types: - if is_string(typ): - yield typ.obj - - -def _paths_from_list_modifications(module_path, trailer1, trailer2): - """ extract the path from either "sys.path.append" or "sys.path.insert" """ - # Guarantee that both are trailers, the first one a name and the second one - # a function execution with at least one param. - if not (tree.is_node(trailer1, 'trailer') and trailer1.children[0] == '.' - and tree.is_node(trailer2, 'trailer') and trailer2.children[0] == '(' - and len(trailer2.children) == 3): - return [] - - name = trailer1.children[1].value - if name not in ['insert', 'append']: - return [] - arg = trailer2.children[1] - if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma. - arg = arg.children[2] - return _execute_code(module_path, arg.get_code()) - - -def _check_module(evaluator, module): - """ - Detect sys.path modifications within module. - """ - def get_sys_path_powers(names): - for name in names: - power = name.parent.parent - if tree.is_node(power, 'power', 'atom_expr'): - c = power.children - if isinstance(c[0], tree.Name) and c[0].value == 'sys' \ - and tree.is_node(c[1], 'trailer'): - n = c[1].children[1] - if isinstance(n, tree.Name) and n.value == 'path': - yield name, power - - sys_path = list(evaluator.sys_path) # copy - try: - possible_names = module.used_names['path'] - except KeyError: - # module.used_names is MergedNamesDict whose getitem never throws - # keyerror, this is superfluous. - pass - else: - for name, power in get_sys_path_powers(possible_names): - stmt = name.get_definition() - if len(power.children) >= 4: - sys_path.extend(_paths_from_list_modifications(module.path, *power.children[2:4])) - elif name.get_definition().type == 'expr_stmt': - sys_path.extend(_paths_from_assignment(evaluator, stmt)) - return sys_path - - -@memoize_default(evaluator_is_first_arg=True, default=[]) -def sys_path_with_modifications(evaluator, module): - if module.path is None: - # Support for modules without a path is bad, therefore return the - # normal path. - return list(evaluator.sys_path) - - curdir = os.path.abspath(os.curdir) - #TODO why do we need a chdir? - with common.ignored(OSError): - os.chdir(os.path.dirname(module.path)) - - buildout_script_paths = set() - - result = _check_module(evaluator, module) - result += _detect_django_path(module.path) - for buildout_script in _get_buildout_scripts(module.path): - for path in _get_paths_from_buildout_script(evaluator, buildout_script): - buildout_script_paths.add(path) - # cleanup, back to old directory - os.chdir(curdir) - return list(result) + list(buildout_script_paths) - - -def _get_paths_from_buildout_script(evaluator, buildout_script): - def load(buildout_script): - try: - with open(buildout_script, 'rb') as f: - source = common.source_to_unicode(f.read()) - except IOError: - debug.dbg('Error trying to read buildout_script: %s', buildout_script) - return - - p = ParserWithRecovery(evaluator.grammar, source, buildout_script) - save_parser(buildout_script, p) - return p.module - - cached = load_parser(buildout_script) - module = cached and cached.module or load(buildout_script) - if not module: - return - - for path in _check_module(evaluator, module): - yield path - - -def traverse_parents(path): - while True: - new = os.path.dirname(path) - if new == path: - return - path = new - yield path - - -def _get_parent_dir_with_file(path, filename): - for parent in traverse_parents(path): - if os.path.isfile(os.path.join(parent, filename)): - return parent - return None - - -def _detect_django_path(module_path): - """ Detects the path of the very well known Django library (if used) """ - result = [] - - for parent in traverse_parents(module_path): - with common.ignored(IOError): - with open(parent + os.path.sep + 'manage.py'): - debug.dbg('Found django path: %s', module_path) - result.append(parent) - return result - - -def _get_buildout_scripts(module_path): - """ - if there is a 'buildout.cfg' file in one of the parent directories of the - given module it will return a list of all files in the buildout bin - directory that look like python files. - - :param module_path: absolute path to the module. - :type module_path: str - """ - project_root = _get_parent_dir_with_file(module_path, 'buildout.cfg') - if not project_root: - return [] - bin_path = os.path.join(project_root, 'bin') - if not os.path.exists(bin_path): - return [] - extra_module_paths = [] - for filename in os.listdir(bin_path): - try: - filepath = os.path.join(bin_path, filename) - with open(filepath, 'r') as f: - firstline = f.readline() - if firstline.startswith('#!') and 'python' in firstline: - extra_module_paths.append(filepath) - except (UnicodeDecodeError, IOError) as e: - # Probably a binary file; permission error or race cond. because file got deleted - # ignore - debug.warning(unicode(e)) - continue - return extra_module_paths diff --git a/pythonFiles/preview/jedi/parser/__init__.py b/pythonFiles/preview/jedi/parser/__init__.py deleted file mode 100644 index 22f386050ef5..000000000000 --- a/pythonFiles/preview/jedi/parser/__init__.py +++ /dev/null @@ -1,409 +0,0 @@ -""" -The ``Parser`` tries to convert the available Python code in an easy to read -format, something like an abstract syntax tree. The classes who represent this -tree, are sitting in the :mod:`jedi.parser.tree` module. - -The Python module ``tokenize`` is a very important part in the ``Parser``, -because it splits the code into different words (tokens). Sometimes it looks a -bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast`` -module for this? Well, ``ast`` does a very good job understanding proper Python -code, but fails to work as soon as there's a single line of broken code. - -There's one important optimization that needs to be known: Statements are not -being parsed completely. ``Statement`` is just a representation of the tokens -within the statement. This lowers memory usage and cpu time and reduces the -complexity of the ``Parser`` (there's another parser sitting inside -``Statement``, which produces ``Array`` and ``Call``). -""" -import os -import re - -from jedi.parser import tree as pt -from jedi.parser import tokenize -from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER, - STRING) -from jedi.parser.pgen2.pgen import generate_grammar -from jedi.parser.pgen2.parse import PgenParser - -OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or' -# Not used yet. In the future I intend to add something like KeywordStatement -STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \ - 'return', 'yield', 'pass', 'continue', 'break' - - -_loaded_grammars = {} - - -class ParseError(Exception): - """ - Signals you that the code you fed the Parser was not correct Python code. - """ - - -def load_grammar(version='3.4'): - # For now we only support two different Python syntax versions: The latest - # Python 3 and Python 2. This may change. - if version in ('3.2', '3.3'): - version = '3.4' - elif version == '2.6': - version = '2.7' - - file = 'grammar' + version + '.txt' - - global _loaded_grammars - path = os.path.join(os.path.dirname(__file__), file) - try: - return _loaded_grammars[path] - except KeyError: - return _loaded_grammars.setdefault(path, generate_grammar(path)) - - -class ParserSyntaxError(object): - def __init__(self, message, position): - self.message = message - self.position = position - - -class Parser(object): - AST_MAPPING = { - 'expr_stmt': pt.ExprStmt, - 'classdef': pt.Class, - 'funcdef': pt.Function, - 'file_input': pt.Module, - 'import_name': pt.ImportName, - 'import_from': pt.ImportFrom, - 'break_stmt': pt.KeywordStatement, - 'continue_stmt': pt.KeywordStatement, - 'return_stmt': pt.ReturnStmt, - 'raise_stmt': pt.KeywordStatement, - 'yield_expr': pt.YieldExpr, - 'del_stmt': pt.KeywordStatement, - 'pass_stmt': pt.KeywordStatement, - 'global_stmt': pt.GlobalStmt, - 'nonlocal_stmt': pt.KeywordStatement, - 'print_stmt': pt.KeywordStatement, - 'assert_stmt': pt.AssertStmt, - 'if_stmt': pt.IfStmt, - 'with_stmt': pt.WithStmt, - 'for_stmt': pt.ForStmt, - 'while_stmt': pt.WhileStmt, - 'try_stmt': pt.TryStmt, - 'comp_for': pt.CompFor, - 'decorator': pt.Decorator, - 'lambdef': pt.Lambda, - 'old_lambdef': pt.Lambda, - 'lambdef_nocond': pt.Lambda, - } - - def __init__(self, grammar, source, start_symbol='file_input', - tokenizer=None, start_parsing=True): - # Todo Remove start_parsing (with False) - - self._used_names = {} - self._scope_names_stack = [{}] - self._last_failed_start_pos = (0, 0) - self._global_names = [] - - # For the fast parser. - self.position_modifier = pt.PositionModifier() - - self._added_newline = False - # The Python grammar needs a newline at the end of each statement. - if not source.endswith('\n') and start_symbol == 'file_input': - source += '\n' - self._added_newline = True - - self.source = source - self._start_symbol = start_symbol - self._grammar = grammar - - self._parsed = None - - if start_parsing: - if tokenizer is None: - tokenizer = tokenize.source_tokens(source, use_exact_op_types=True) - self.parse(tokenizer) - - def parse(self, tokenizer): - if self._parsed is not None: - return self._parsed - - start_number = self._grammar.symbol2number[self._start_symbol] - pgen_parser = PgenParser( - self._grammar, self.convert_node, self.convert_leaf, - self.error_recovery, start_number - ) - - try: - self._parsed = pgen_parser.parse(tokenizer) - finally: - self.stack = pgen_parser.stack - - if self._start_symbol == 'file_input' != self._parsed.type: - # If there's only one statement, we get back a non-module. That's - # not what we want, we want a module, so we add it here: - self._parsed = self.convert_node(self._grammar, - self._grammar.symbol2number['file_input'], - [self._parsed]) - - if self._added_newline: - self.remove_last_newline() - - def get_parsed_node(self): - # TODO rename to get_root_node - return self._parsed - - def error_recovery(self, grammar, stack, arcs, typ, value, start_pos, prefix, - add_token_callback): - raise ParseError - - def convert_node(self, grammar, type, children): - """ - Convert raw node information to a Node instance. - - This is passed to the parser driver which calls it whenever a reduction of a - grammar rule produces a new complete node, so that the tree is build - strictly bottom-up. - """ - symbol = grammar.number2symbol[type] - try: - new_node = Parser.AST_MAPPING[symbol](children) - except KeyError: - new_node = pt.Node(symbol, children) - - # We need to check raw_node always, because the same node can be - # returned by convert multiple times. - if symbol == 'global_stmt': - self._global_names += new_node.get_global_names() - elif isinstance(new_node, pt.Lambda): - new_node.names_dict = self._scope_names_stack.pop() - elif isinstance(new_node, (pt.ClassOrFunc, pt.Module)) \ - and symbol in ('funcdef', 'classdef', 'file_input'): - # scope_name_stack handling - scope_names = self._scope_names_stack.pop() - if isinstance(new_node, pt.ClassOrFunc): - n = new_node.name - scope_names[n.value].remove(n) - # Set the func name of the current node - arr = self._scope_names_stack[-1].setdefault(n.value, []) - arr.append(n) - new_node.names_dict = scope_names - elif isinstance(new_node, pt.CompFor): - # The name definitions of comprehenions shouldn't be part of the - # current scope. They are part of the comprehension scope. - for n in new_node.get_defined_names(): - self._scope_names_stack[-1][n.value].remove(n) - return new_node - - def convert_leaf(self, grammar, type, value, prefix, start_pos): - # print('leaf', repr(value), token.tok_name[type]) - if type == tokenize.NAME: - if value in grammar.keywords: - if value in ('def', 'class', 'lambda'): - self._scope_names_stack.append({}) - - return pt.Keyword(self.position_modifier, value, start_pos, prefix) - else: - name = pt.Name(self.position_modifier, value, start_pos, prefix) - # Keep a listing of all used names - arr = self._used_names.setdefault(name.value, []) - arr.append(name) - arr = self._scope_names_stack[-1].setdefault(name.value, []) - arr.append(name) - return name - elif type == STRING: - return pt.String(self.position_modifier, value, start_pos, prefix) - elif type == NUMBER: - return pt.Number(self.position_modifier, value, start_pos, prefix) - elif type == NEWLINE: - return pt.Newline(self.position_modifier, value, start_pos, prefix) - elif type == INDENT: - return pt.Indent(self.position_modifier, value, start_pos, prefix) - elif type == DEDENT: - return pt.Dedent(self.position_modifier, value, start_pos, prefix) - elif type == ENDMARKER: - return pt.EndMarker(self.position_modifier, value, start_pos, prefix) - else: - return pt.Operator(self.position_modifier, value, start_pos, prefix) - - def remove_last_newline(self): - """ - In all of this we need to work with _start_pos, because if we worked - with start_pos, we would need to check the position_modifier as well - (which is accounted for in the start_pos property). - """ - endmarker = self._parsed.children[-1] - # The newline is either in the endmarker as a prefix or the previous - # leaf as a newline token. - prefix = endmarker.prefix - if prefix.endswith('\n'): - endmarker.prefix = prefix = prefix[:-1] - last_end = 0 - if '\n' not in prefix: - # Basically if the last line doesn't end with a newline. we - # have to add the previous line's end_position. - try: - last_end = endmarker.get_previous_leaf().end_pos[1] - except IndexError: - pass - last_line = re.sub('.*\n', '', prefix) - endmarker._start_pos = endmarker._start_pos[0] - 1, last_end + len(last_line) - else: - try: - newline = endmarker.get_previous_leaf() - except IndexError: - return # This means that the parser is empty. - while True: - if newline.value == '': - # Must be a DEDENT, just continue. - try: - newline = newline.get_previous_leaf() - except IndexError: - # If there's a statement that fails to be parsed, there - # will be no previous leaf. So just ignore it. - break - elif newline.value != '\n': - # TODO REMOVE, error recovery was simplified. - # This may happen if error correction strikes and removes - # a whole statement including '\n'. - break - else: - newline.value = '' - if self._last_failed_start_pos > newline._start_pos: - # It may be the case that there was a syntax error in a - # function. In that case error correction removes the - # right newline. So we use the previously assigned - # _last_failed_start_pos variable to account for that. - endmarker._start_pos = self._last_failed_start_pos - else: - endmarker._start_pos = newline._start_pos - break - - -class ParserWithRecovery(Parser): - """ - This class is used to parse a Python file, it then divides them into a - class structure of different scopes. - - :param grammar: The grammar object of pgen2. Loaded by load_grammar. - :param source: The codebase for the parser. Must be unicode. - :param module_path: The path of the module in the file system, may be None. - :type module_path: str - """ - def __init__(self, grammar, source, module_path=None, tokenizer=None, - start_parsing=True): - self.syntax_errors = [] - - self._omit_dedent_list = [] - self._indent_counter = 0 - - # TODO do print absolute import detection here. - # try: - # del python_grammar_no_print_statement.keywords["print"] - # except KeyError: - # pass # Doesn't exist in the Python 3 grammar. - - # if self.options["print_function"]: - # python_grammar = pygram.python_grammar_no_print_statement - # else: - super(ParserWithRecovery, self).__init__( - grammar, source, - tokenizer=tokenizer, - start_parsing=start_parsing - ) - if start_parsing: - self.module = self._parsed - self.module.used_names = self._used_names - self.module.path = module_path - self.module.global_names = self._global_names - - def parse(self, tokenizer): - return super(ParserWithRecovery, self).parse(self._tokenize(self._tokenize(tokenizer))) - - def error_recovery(self, grammar, stack, arcs, typ, value, start_pos, prefix, - add_token_callback): - """ - This parser is written in a dynamic way, meaning that this parser - allows using different grammars (even non-Python). However, error - recovery is purely written for Python. - """ - def current_suite(stack): - # For now just discard everything that is not a suite or - # file_input, if we detect an error. - for index, (dfa, state, (type_, nodes)) in reversed(list(enumerate(stack))): - # `suite` can sometimes be only simple_stmt, not stmt. - symbol = grammar.number2symbol[type_] - if symbol == 'file_input': - break - elif symbol == 'suite' and len(nodes) > 1: - # suites without an indent in them get discarded. - break - elif symbol == 'simple_stmt' and len(nodes) > 1: - # simple_stmt can just be turned into a Node, if there are - # enough statements. Ignore the rest after that. - break - return index, symbol, nodes - - index, symbol, nodes = current_suite(stack) - if symbol == 'simple_stmt': - index -= 2 - (_, _, (type_, suite_nodes)) = stack[index] - symbol = grammar.number2symbol[type_] - suite_nodes.append(pt.Node(symbol, list(nodes))) - # Remove - nodes[:] = [] - nodes = suite_nodes - stack[index] - - # print('err', token.tok_name[typ], repr(value), start_pos, len(stack), index) - if self._stack_removal(grammar, stack, arcs, index + 1, value, start_pos): - add_token_callback(typ, value, start_pos, prefix) - else: - if typ == INDENT: - # For every deleted INDENT we have to delete a DEDENT as well. - # Otherwise the parser will get into trouble and DEDENT too early. - self._omit_dedent_list.append(self._indent_counter) - else: - error_leaf = pt.ErrorLeaf(self.position_modifier, typ, value, start_pos, prefix) - stack[-1][2][1].append(error_leaf) - - def _stack_removal(self, grammar, stack, arcs, start_index, value, start_pos): - failed_stack = [] - found = False - all_nodes = [] - for dfa, state, (typ, nodes) in stack[start_index:]: - if nodes: - found = True - if found: - symbol = grammar.number2symbol[typ] - failed_stack.append((symbol, nodes)) - all_nodes += nodes - if nodes and nodes[0] in ('def', 'class', 'lambda'): - self._scope_names_stack.pop() - if failed_stack: - stack[start_index - 1][2][1].append(pt.ErrorNode(all_nodes)) - - self._last_failed_start_pos = start_pos - - stack[start_index:] = [] - return failed_stack - - def _tokenize(self, tokenizer): - for typ, value, start_pos, prefix in tokenizer: - # print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix)) - if typ == DEDENT: - # We need to count indents, because if we just omit any DEDENT, - # we might omit them in the wrong place. - o = self._omit_dedent_list - if o and o[-1] == self._indent_counter: - o.pop() - continue - - self._indent_counter -= 1 - elif typ == INDENT: - self._indent_counter += 1 - - yield typ, value, start_pos, prefix - - def __repr__(self): - return "<%s: %s>" % (type(self).__name__, self.module) diff --git a/pythonFiles/preview/jedi/parser/fast.py b/pythonFiles/preview/jedi/parser/fast.py deleted file mode 100644 index 9c471fd44336..000000000000 --- a/pythonFiles/preview/jedi/parser/fast.py +++ /dev/null @@ -1,621 +0,0 @@ -""" -Basically a parser that is faster, because it tries to parse only parts and if -anything changes, it only reparses the changed parts. But because it's not -finished (and still not working as I want), I won't document it any further. -""" -import re -from itertools import chain - -from jedi._compatibility import use_metaclass -from jedi import settings -from jedi.parser import ParserWithRecovery -from jedi.parser import tree -from jedi.parser.utils import underscore_memoization, parser_cache -from jedi import debug -from jedi.parser.tokenize import (source_tokens, NEWLINE, - ENDMARKER, INDENT, DEDENT) - -FLOWS = 'if', 'else', 'elif', 'while', 'with', 'try', 'except', 'finally', 'for' - - -class FastModule(tree.Module): - type = 'file_input' - - def __init__(self, module_path): - super(FastModule, self).__init__([]) - self.modules = [] - self.reset_caches() - self.names_dict = {} - self.path = module_path - - def reset_caches(self): - self.modules = [] - try: - del self._used_names # Remove the used names cache. - except AttributeError: - pass # It was never used. - - @property - @underscore_memoization - def used_names(self): - return MergedNamesDict([m.used_names for m in self.modules]) - - @property - def global_names(self): - return [name for m in self.modules for name in m.global_names] - - @property - def error_statements(self): - return [e for m in self.modules for e in m.error_statements] - - def __repr__(self): - return "" % (type(self).__name__, self.name, - self.start_pos[0], self.end_pos[0]) - - # To avoid issues with with the `parser.ParserWithRecovery`, we need - # setters that do nothing, because if pickle comes along and sets those - # values. - @global_names.setter - def global_names(self, value): - pass - - @error_statements.setter - def error_statements(self, value): - pass - - @used_names.setter - def used_names(self, value): - pass - - -class MergedNamesDict(object): - def __init__(self, dicts): - self.dicts = dicts - - def __iter__(self): - return iter(set(key for dct in self.dicts for key in dct)) - - def __getitem__(self, value): - return list(chain.from_iterable(dct.get(value, []) for dct in self.dicts)) - - def items(self): - dct = {} - for d in self.dicts: - for key, values in d.items(): - try: - dct_values = dct[key] - dct_values += values - except KeyError: - dct[key] = list(values) - return dct.items() - - def values(self): - lst = [] - for dct in self.dicts: - lst += dct.values() - return lst - - -class CachedFastParser(type): - """ This is a metaclass for caching `FastParser`. """ - def __call__(self, grammar, source, module_path=None): - if not settings.fast_parser: - return ParserWithRecovery(grammar, source, module_path) - - pi = parser_cache.get(module_path, None) - if pi is None or isinstance(pi.parser, ParserWithRecovery): - p = super(CachedFastParser, self).__call__(grammar, source, module_path) - else: - p = pi.parser # pi is a `cache.ParserCacheItem` - p.update(source) - return p - - -class ParserNode(object): - def __init__(self, fast_module, parser, source): - self._fast_module = fast_module - self.parent = None - self._node_children = [] - - self.source = source - self.hash = hash(source) - self.parser = parser - if source: - self._end_pos = parser.module.end_pos - else: - self._end_pos = 1, 0 - - try: - # With fast_parser we have either 1 subscope or only statements. - self._content_scope = parser.module.subscopes[0] - # A parsed node's content will be in the first indent, because - # everything that's parsed is within this subscope. - self._is_class_or_def = True - except IndexError: - self._content_scope = parser.module - self._is_class_or_def = False - else: - self._rewrite_last_newline() - - # We need to be able to reset the original children of a parser. - self._old_children = list(self._content_scope.children) - - def is_root_node(self): - return self.parent is None - - def _rewrite_last_newline(self): - """ - The ENDMARKER can contain a newline in the prefix. However this prefix - really belongs to the function - respectively to the next function or - parser node. If we don't rewrite that newline, we end up with a newline - in the wrong position, i.d. at the end of the file instead of in the - middle. - """ - c = self._content_scope.children - if tree.is_node(c[-1], 'suite'): # In a simple_stmt there's no DEDENT. - end_marker = self.parser.module.children[-1] - # Set the DEDENT prefix instead of the ENDMARKER. - c[-1].children[-1].prefix = end_marker.prefix - end_marker.prefix = '' - - def __repr__(self): - module = self.parser.module - try: - return '<%s: %s-%s>' % (type(self).__name__, module.start_pos, module.end_pos) - except IndexError: - # There's no module yet. - return '<%s: empty>' % type(self).__name__ - - @property - def end_pos(self): - return self._end_pos[0] + self.parser.position_modifier.line, self._end_pos[1] - - def reset_node(self): - """ - Removes changes that were applied in this class. - """ - self._node_children = [] - scope = self._content_scope - scope.children = list(self._old_children) - try: - # This works if it's a MergedNamesDict. - # We are correcting it, because the MergedNamesDicts are artificial - # and can change after closing a node. - scope.names_dict = scope.names_dict.dicts[0] - except AttributeError: - pass - - def close(self): - """ - Closes the current parser node. This means that after this no further - nodes should be added anymore. - """ - # We only need to replace the dict if multiple dictionaries are used: - if self._node_children: - dcts = [n.parser.module.names_dict for n in self._node_children] - # Need to insert the own node as well. - dcts.insert(0, self._content_scope.names_dict) - self._content_scope.names_dict = MergedNamesDict(dcts) - endmarker = self.parser.get_parsed_node().children[-1] - assert endmarker.type == 'endmarker' - last_parser = self._node_children[-1].parser - endmarker.start_pos = last_parser.get_parsed_node().end_pos - - @property - def _indent(self): - if self.is_root_node(): - return 0 - - return self.parser.module.children[0].start_pos[1] - - def add_node(self, node, start_line, indent): - """ - Adding a node means adding a node that was either just parsed or one - that can be reused. - """ - # Content that is not a subscope can never be part of the current node, - # because it's basically a sister node, that sits next to it and not - # within it. - if (self._indent >= indent or not self._is_class_or_def) and \ - not self.is_root_node(): - self.close() - return self.parent.add_node(node, start_line, indent) - - # Changing the line offsets is very important, because if they don't - # fit, all the start_pos values will be wrong. - m = node.parser.module - node.parser.position_modifier.line = start_line - 1 - self._fast_module.modules.append(m) - node.parent = self - - self._node_children.append(node) - - # Insert parser objects into current structure. We only need to set the - # parents and children in a good way. - scope = self._content_scope - for child in m.children: - child.parent = scope - scope.children.append(child) - - return node - - def all_sub_nodes(self): - """ - Returns all nodes including nested ones. - """ - for n in self._node_children: - yield n - for y in n.all_sub_nodes(): - yield y - - @underscore_memoization # Should only happen once! - def remove_last_newline(self): - self.parser.remove_last_newline() - - -class FastParser(use_metaclass(CachedFastParser)): - _FLOWS_NEED_SPACE = 'if', 'elif', 'while', 'with', 'except', 'for' - _FLOWS_NEED_COLON = 'else', 'try', 'except', 'finally' - _keyword_re = re.compile('^[ \t]*(def |class |@|(?:%s)|(?:%s)\s*:)' - % ('|'.join(_FLOWS_NEED_SPACE), - '|'.join(_FLOWS_NEED_COLON))) - - def __init__(self, grammar, source, module_path=None): - # set values like `tree.Module`. - self._grammar = grammar - self.module_path = module_path - self._reset_caches() - self.update(source) - - def _reset_caches(self): - self.module = FastModule(self.module_path) - self.root_node = self.current_node = ParserNode(self.module, self, '') - - def get_parsed_node(self): - return self.module - - def update(self, source): - # Variables for testing purposes: It is important that the number of - # parsers used can be minimized. With these variables we can test - # against that. - self.number_parsers_used = 0 - self.number_of_splits = 0 - self.number_of_misses = 0 - self.module.reset_caches() - self.source = source - try: - self._parse(source) - except: - # FastParser is cached, be careful with exceptions. - self._reset_caches() - raise - - def _split_parts(self, source): - """ - Split the source code into different parts. This makes it possible to - parse each part seperately and therefore cache parts of the file and - not everything. - """ - def gen_part(): - text = ''.join(current_lines) - del current_lines[:] - self.number_of_splits += 1 - return text - - def just_newlines(current_lines): - for line in current_lines: - line = line.lstrip('\t \n\r') - if line and line[0] != '#': - return False - return True - - # Split only new lines. Distinction between \r\n is the tokenizer's - # job. - # It seems like there's no problem with form feed characters here, - # because we're not counting lines. - self._lines = source.splitlines(True) - current_lines = [] - is_decorator = False - # Use -1, because that indent is always smaller than any other. - indent_list = [-1, 0] - new_indent = False - parentheses_level = 0 - flow_indent = None - previous_line = None - # All things within flows are simply being ignored. - for i, l in enumerate(self._lines): - # Handle backslash newline escaping. - if l.endswith('\\\n') or l.endswith('\\\r\n'): - if previous_line is not None: - previous_line += l - else: - previous_line = l - continue - if previous_line is not None: - l = previous_line + l - previous_line = None - - # check for dedents - s = l.lstrip('\t \n\r') - indent = len(l) - len(s) - if not s or s[0] == '#': - current_lines.append(l) # Just ignore comments and blank lines - continue - - if new_indent and not parentheses_level: - if indent > indent_list[-2]: - # Set the actual indent, not just the random old indent + 1. - indent_list[-1] = indent - new_indent = False - - while indent < indent_list[-1]: # -> dedent - indent_list.pop() - # This automatically resets the flow_indent if there was a - # dedent or a flow just on one line (with one simple_stmt). - new_indent = False - if flow_indent is None and current_lines and not parentheses_level: - yield gen_part() - flow_indent = None - - # Check lines for functions/classes and split the code there. - if flow_indent is None: - m = self._keyword_re.match(l) - if m: - # Strip whitespace and colon from flows as a check. - if m.group(1).strip(' \t\r\n:') in FLOWS: - if not parentheses_level: - flow_indent = indent - else: - if not is_decorator and not just_newlines(current_lines): - yield gen_part() - is_decorator = '@' == m.group(1) - if not is_decorator: - parentheses_level = 0 - # The new indent needs to be higher - indent_list.append(indent + 1) - new_indent = True - elif is_decorator: - is_decorator = False - - parentheses_level = \ - max(0, (l.count('(') + l.count('[') + l.count('{') - - l.count(')') - l.count(']') - l.count('}'))) - - current_lines.append(l) - - if previous_line is not None: - current_lines.append(previous_line) - if current_lines: - yield gen_part() - - def _parse(self, source): - """ :type source: str """ - added_newline = False - if not source or source[-1] != '\n': - # To be compatible with Pythons grammar, we need a newline at the - # end. The parser would handle it, but since the fast parser abuses - # the normal parser in various ways, we need to care for this - # ourselves. - source += '\n' - added_newline = True - - next_code_part_end_line = code_part_end_line = 1 - start = 0 - nodes = list(self.root_node.all_sub_nodes()) - # Now we can reset the node, because we have all the old nodes. - self.root_node.reset_node() - self.current_node = self.root_node - last_end_line = 1 - - for code_part in self._split_parts(source): - next_code_part_end_line += code_part.count('\n') - # If the last code part parsed isn't equal to the current end_pos, - # we know that the parser went further (`def` start in a - # docstring). So just parse the next part. - if code_part_end_line == last_end_line: - self._parse_part(code_part, source[start:], code_part_end_line, nodes) - else: - self.number_of_misses += 1 - # Means that some lines where not fully parsed. Parse it now. - # This is a very rare case. Should only happens with very - # strange code bits. - while last_end_line < next_code_part_end_line: - code_part_end_line = last_end_line - # We could calculate the src in a more complicated way to - # make caching here possible as well. However, this is - # complicated and error-prone. Since this is not very often - # called - just ignore it. - src = ''.join(self._lines[code_part_end_line - 1:]) - self._parse_part(code_part, src, code_part_end_line, nodes) - last_end_line = self.current_node.end_pos[0] - debug.dbg("While parsing %s, starting with line %s wasn't included in split.", - self.module_path, code_part_end_line) - #assert code_part_end_line > last_end_line - # This means that the parser parsed faster than the last given - # `code_part`. - debug.dbg('While parsing %s, line %s slowed down the fast parser.', - self.module_path, code_part_end_line) - - code_part_end_line = next_code_part_end_line - start += len(code_part) - - last_end_line = self.current_node.end_pos[0] - - if added_newline: - self.current_node.remove_last_newline() - - # Now that the for loop is finished, we still want to close all nodes. - node = self.current_node - while node is not None: - node.close() - node = node.parent - - debug.dbg('Parsed %s, with %s parsers in %s splits.' - % (self.module_path, self.number_parsers_used, - self.number_of_splits)) - - def _parse_part(self, source, parser_code, code_part_end_line, nodes): - """ - Side effect: Alters the list of nodes. - """ - h = hash(source) - for index, node in enumerate(nodes): - if node.hash == h and node.source == source: - node.reset_node() - nodes.remove(node) - parser_code = source - break - else: - tokenizer = FastTokenizer(parser_code) - self.number_parsers_used += 1 - p = ParserWithRecovery(self._grammar, parser_code, self.module_path, tokenizer=tokenizer) - - end = code_part_end_line - 1 + p.module.end_pos[0] - used_lines = self._lines[code_part_end_line - 1:end - 1] - code_part_actually_used = ''.join(used_lines) - - node = ParserNode(self.module, p, code_part_actually_used) - - indent = len(parser_code) - len(parser_code.lstrip('\t ')) - - self.current_node.add_node(node, code_part_end_line, indent) - self.current_node = node - - -class FastTokenizer(object): - """ - Breaks when certain conditions are met, i.e. a new function or class opens. - """ - def __init__(self, source): - self.source = source - self._gen = source_tokens(source, use_exact_op_types=True) - self._closed = False - - # fast parser options - self.current = self.previous = NEWLINE, '', (0, 0) - self._in_flow = False - self._is_decorator = False - self._first_stmt = True - self._parentheses_level = 0 - self._indent_counter = 0 - self._flow_indent_counter = 0 - self._returned_endmarker = False - self._expect_indent = False - - def __iter__(self): - return self - - def next(self): - """ Python 2 Compatibility """ - return self.__next__() - - def __next__(self): - if self._closed: - return self._finish_dedents() - - typ, value, start_pos, prefix = current = next(self._gen) - if typ == ENDMARKER: - self._closed = True - self._returned_endmarker = True - return current - - self.previous = self.current - self.current = current - - if typ == INDENT: - self._indent_counter += 1 - if not self._expect_indent and not self._first_stmt and not self._in_flow: - # This does not mean that there is an actual flow, it means - # that the INDENT is syntactically wrong. - self._flow_indent_counter = self._indent_counter - 1 - self._in_flow = True - self._expect_indent = False - elif typ == DEDENT: - self._indent_counter -= 1 - if self._in_flow: - if self._indent_counter == self._flow_indent_counter: - self._in_flow = False - else: - self._closed = True - return current - - previous_type = self.previous[0] - if value in ('def', 'class') and self._parentheses_level: - # Account for the fact that an open parentheses before a function - # will reset the parentheses counter, but new lines before will - # still be ignored. So check the prefix. - - # TODO what about flow parentheses counter resets in the tokenizer? - self._parentheses_level = 0 - # We need to simulate a newline before the indent, because the - # open parentheses ignored them. - if re.search('\n\s*', prefix): - previous_type = NEWLINE - - # Parentheses ignore the indentation rules. The other three stand for - # new lines. - if previous_type in (NEWLINE, INDENT, DEDENT) \ - and not self._parentheses_level and typ not in (INDENT, DEDENT): - if not self._in_flow: - if value in FLOWS: - self._flow_indent_counter = self._indent_counter - self._first_stmt = False - elif value in ('def', 'class', '@'): - # The values here are exactly the same check as in - # _split_parts, but this time with tokenize and therefore - # precise. - if not self._first_stmt and not self._is_decorator: - return self._close() - - self._is_decorator = '@' == value - if not self._is_decorator: - self._first_stmt = False - self._expect_indent = True - elif self._expect_indent: - return self._close() - else: - self._first_stmt = False - - if value in '([{' and value: - self._parentheses_level += 1 - elif value in ')]}' and value: - # Ignore closing parentheses, because they are all - # irrelevant for the indentation. - self._parentheses_level = max(self._parentheses_level - 1, 0) - return current - - def _close(self): - if self._first_stmt: - # Continue like nothing has happened, because we want to enter - # the first class/function. - if self.current[1] != '@': - self._first_stmt = False - return self.current - else: - self._closed = True - return self._finish_dedents() - - def _finish_dedents(self): - if self._indent_counter: - self._indent_counter -= 1 - return DEDENT, '', self.current[2], '' - elif not self._returned_endmarker: - self._returned_endmarker = True - return ENDMARKER, '', self.current[2], self._get_prefix() - else: - raise StopIteration - - def _get_prefix(self): - """ - We're using the current prefix for the endmarker to not loose any - information. However we care about "lost" lines. The prefix of the - current line (indent) will always be included in the current line. - """ - cur = self.current - while cur[0] == DEDENT: - cur = next(self._gen) - prefix = cur[3] - - # \Z for the end of the string. $ is bugged, because it has the - # same behavior with or without re.MULTILINE. - return re.sub(r'[^\n]+\Z', '', prefix) diff --git a/pythonFiles/preview/jedi/parser/pgen2/pgen.py b/pythonFiles/preview/jedi/parser/pgen2/pgen.py deleted file mode 100644 index fa2742dd5dc4..000000000000 --- a/pythonFiles/preview/jedi/parser/pgen2/pgen.py +++ /dev/null @@ -1,394 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -# Modifications: -# Copyright 2014 David Halter. Integration into Jedi. -# Modifications are dual-licensed: MIT and PSF. - -# Pgen imports -from . import grammar -from jedi.parser import token -from jedi.parser import tokenize - - -class ParserGenerator(object): - def __init__(self, filename, stream=None): - close_stream = None - if stream is None: - stream = open(filename) - close_stream = stream.close - self.filename = filename - self.stream = stream - self.generator = tokenize.generate_tokens(stream.readline) - self.gettoken() # Initialize lookahead - self.dfas, self.startsymbol = self.parse() - if close_stream is not None: - close_stream() - self.first = {} # map from symbol name to set of tokens - self.addfirstsets() - - def make_grammar(self): - c = grammar.Grammar() - names = list(self.dfas.keys()) - names.sort() - names.remove(self.startsymbol) - names.insert(0, self.startsymbol) - for name in names: - i = 256 + len(c.symbol2number) - c.symbol2number[name] = i - c.number2symbol[i] = name - for name in names: - dfa = self.dfas[name] - states = [] - for state in dfa: - arcs = [] - for label, next in state.arcs.items(): - arcs.append((self.make_label(c, label), dfa.index(next))) - if state.isfinal: - arcs.append((0, dfa.index(state))) - states.append(arcs) - c.states.append(states) - c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) - c.start = c.symbol2number[self.startsymbol] - return c - - def make_first(self, c, name): - rawfirst = self.first[name] - first = {} - for label in rawfirst: - ilabel = self.make_label(c, label) - ##assert ilabel not in first # XXX failed on <> ... != - first[ilabel] = 1 - return first - - def make_label(self, c, label): - # XXX Maybe this should be a method on a subclass of converter? - ilabel = len(c.labels) - if label[0].isalpha(): - # Either a symbol name or a named token - if label in c.symbol2number: - # A symbol name (a non-terminal) - if label in c.symbol2label: - return c.symbol2label[label] - else: - c.labels.append((c.symbol2number[label], None)) - c.symbol2label[label] = ilabel - return ilabel - else: - # A named token (NAME, NUMBER, STRING) - itoken = getattr(token, label, None) - assert isinstance(itoken, int), label - assert itoken in token.tok_name, label - if itoken in c.tokens: - return c.tokens[itoken] - else: - c.labels.append((itoken, None)) - c.tokens[itoken] = ilabel - return ilabel - else: - # Either a keyword or an operator - assert label[0] in ('"', "'"), label - value = eval(label) - if value[0].isalpha(): - # A keyword - if value in c.keywords: - return c.keywords[value] - else: - c.labels.append((token.NAME, value)) - c.keywords[value] = ilabel - return ilabel - else: - # An operator (any non-numeric token) - itoken = token.opmap[value] # Fails if unknown token - if itoken in c.tokens: - return c.tokens[itoken] - else: - c.labels.append((itoken, None)) - c.tokens[itoken] = ilabel - return ilabel - - def addfirstsets(self): - names = list(self.dfas.keys()) - names.sort() - for name in names: - if name not in self.first: - self.calcfirst(name) - #print name, self.first[name].keys() - - def calcfirst(self, name): - dfa = self.dfas[name] - self.first[name] = None # dummy to detect left recursion - state = dfa[0] - totalset = {} - overlapcheck = {} - for label, next in state.arcs.items(): - if label in self.dfas: - if label in self.first: - fset = self.first[label] - if fset is None: - raise ValueError("recursion for rule %r" % name) - else: - self.calcfirst(label) - fset = self.first[label] - totalset.update(fset) - overlapcheck[label] = fset - else: - totalset[label] = 1 - overlapcheck[label] = {label: 1} - inverse = {} - for label, itsfirst in overlapcheck.items(): - for symbol in itsfirst: - if symbol in inverse: - raise ValueError("rule %s is ambiguous; %s is in the" - " first sets of %s as well as %s" % - (name, symbol, label, inverse[symbol])) - inverse[symbol] = label - self.first[name] = totalset - - def parse(self): - dfas = {} - startsymbol = None - # MSTART: (NEWLINE | RULE)* ENDMARKER - while self.type != token.ENDMARKER: - while self.type == token.NEWLINE: - self.gettoken() - # RULE: NAME ':' RHS NEWLINE - name = self.expect(token.NAME) - self.expect(token.OP, ":") - a, z = self.parse_rhs() - self.expect(token.NEWLINE) - #self.dump_nfa(name, a, z) - dfa = self.make_dfa(a, z) - #self.dump_dfa(name, dfa) - # oldlen = len(dfa) - self.simplify_dfa(dfa) - # newlen = len(dfa) - dfas[name] = dfa - #print name, oldlen, newlen - if startsymbol is None: - startsymbol = name - return dfas, startsymbol - - def make_dfa(self, start, finish): - # To turn an NFA into a DFA, we define the states of the DFA - # to correspond to *sets* of states of the NFA. Then do some - # state reduction. Let's represent sets as dicts with 1 for - # values. - assert isinstance(start, NFAState) - assert isinstance(finish, NFAState) - - def closure(state): - base = {} - addclosure(state, base) - return base - - def addclosure(state, base): - assert isinstance(state, NFAState) - if state in base: - return - base[state] = 1 - for label, next in state.arcs: - if label is None: - addclosure(next, base) - - states = [DFAState(closure(start), finish)] - for state in states: # NB states grows while we're iterating - arcs = {} - for nfastate in state.nfaset: - for label, next in nfastate.arcs: - if label is not None: - addclosure(next, arcs.setdefault(label, {})) - for label, nfaset in arcs.items(): - for st in states: - if st.nfaset == nfaset: - break - else: - st = DFAState(nfaset, finish) - states.append(st) - state.addarc(st, label) - return states # List of DFAState instances; first one is start - - def dump_nfa(self, name, start, finish): - print("Dump of NFA for", name) - todo = [start] - for i, state in enumerate(todo): - print(" State", i, state is finish and "(final)" or "") - for label, next in state.arcs: - if next in todo: - j = todo.index(next) - else: - j = len(todo) - todo.append(next) - if label is None: - print(" -> %d" % j) - else: - print(" %s -> %d" % (label, j)) - - def dump_dfa(self, name, dfa): - print("Dump of DFA for", name) - for i, state in enumerate(dfa): - print(" State", i, state.isfinal and "(final)" or "") - for label, next in state.arcs.items(): - print(" %s -> %d" % (label, dfa.index(next))) - - def simplify_dfa(self, dfa): - # This is not theoretically optimal, but works well enough. - # Algorithm: repeatedly look for two states that have the same - # set of arcs (same labels pointing to the same nodes) and - # unify them, until things stop changing. - - # dfa is a list of DFAState instances - changes = True - while changes: - changes = False - for i, state_i in enumerate(dfa): - for j in range(i + 1, len(dfa)): - state_j = dfa[j] - if state_i == state_j: - #print " unify", i, j - del dfa[j] - for state in dfa: - state.unifystate(state_j, state_i) - changes = True - break - - def parse_rhs(self): - # RHS: ALT ('|' ALT)* - a, z = self.parse_alt() - if self.value != "|": - return a, z - else: - aa = NFAState() - zz = NFAState() - aa.addarc(a) - z.addarc(zz) - while self.value == "|": - self.gettoken() - a, z = self.parse_alt() - aa.addarc(a) - z.addarc(zz) - return aa, zz - - def parse_alt(self): - # ALT: ITEM+ - a, b = self.parse_item() - while (self.value in ("(", "[") or - self.type in (token.NAME, token.STRING)): - c, d = self.parse_item() - b.addarc(c) - b = d - return a, b - - def parse_item(self): - # ITEM: '[' RHS ']' | ATOM ['+' | '*'] - if self.value == "[": - self.gettoken() - a, z = self.parse_rhs() - self.expect(token.OP, "]") - a.addarc(z) - return a, z - else: - a, z = self.parse_atom() - value = self.value - if value not in ("+", "*"): - return a, z - self.gettoken() - z.addarc(a) - if value == "+": - return a, z - else: - return a, a - - def parse_atom(self): - # ATOM: '(' RHS ')' | NAME | STRING - if self.value == "(": - self.gettoken() - a, z = self.parse_rhs() - self.expect(token.OP, ")") - return a, z - elif self.type in (token.NAME, token.STRING): - a = NFAState() - z = NFAState() - a.addarc(z, self.value) - self.gettoken() - return a, z - else: - self.raise_error("expected (...) or NAME or STRING, got %s/%s", - self.type, self.value) - - def expect(self, type, value=None): - if self.type != type or (value is not None and self.value != value): - self.raise_error("expected %s/%s, got %s/%s", - type, value, self.type, self.value) - value = self.value - self.gettoken() - return value - - def gettoken(self): - tup = next(self.generator) - while tup[0] in (token.COMMENT, token.NL): - tup = next(self.generator) - self.type, self.value, self.begin, prefix = tup - #print tokenize.tok_name[self.type], repr(self.value) - - def raise_error(self, msg, *args): - if args: - try: - msg = msg % args - except: - msg = " ".join([msg] + list(map(str, args))) - line = open(self.filename).readlines()[self.begin[0]] - raise SyntaxError(msg, (self.filename, self.begin[0], - self.begin[1], line)) - - -class NFAState(object): - def __init__(self): - self.arcs = [] # list of (label, NFAState) pairs - - def addarc(self, next, label=None): - assert label is None or isinstance(label, str) - assert isinstance(next, NFAState) - self.arcs.append((label, next)) - - -class DFAState(object): - def __init__(self, nfaset, final): - assert isinstance(nfaset, dict) - assert isinstance(next(iter(nfaset)), NFAState) - assert isinstance(final, NFAState) - self.nfaset = nfaset - self.isfinal = final in nfaset - self.arcs = {} # map from label to DFAState - - def addarc(self, next, label): - assert isinstance(label, str) - assert label not in self.arcs - assert isinstance(next, DFAState) - self.arcs[label] = next - - def unifystate(self, old, new): - for label, next in self.arcs.items(): - if next is old: - self.arcs[label] = new - - def __eq__(self, other): - # Equality test -- ignore the nfaset instance variable - assert isinstance(other, DFAState) - if self.isfinal != other.isfinal: - return False - # Can't just return self.arcs == other.arcs, because that - # would invoke this method recursively, with cycles... - if len(self.arcs) != len(other.arcs): - return False - for label, next in self.arcs.items(): - if next is not other.arcs.get(label): - return False - return True - - __hash__ = None # For Py3 compatibility. - - -def generate_grammar(filename="Grammar.txt"): - p = ParserGenerator(filename) - return p.make_grammar() diff --git a/pythonFiles/preview/jedi/parser/tokenize.py b/pythonFiles/preview/jedi/parser/tokenize.py deleted file mode 100644 index 233c6d877398..000000000000 --- a/pythonFiles/preview/jedi/parser/tokenize.py +++ /dev/null @@ -1,329 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This tokenizer has been copied from the ``tokenize.py`` standard library -tokenizer. The reason was simple: The standard library tokenizer fails -if the indentation is not right. The fast parser of jedi however requires -"wrong" indentation. - -Basically this is a stripped down version of the standard library module, so -you can read the documentation there. Additionally we included some speed and -memory optimizations here. -""" -from __future__ import absolute_import - -import string -import re -from collections import namedtuple -from io import StringIO - -from jedi.parser.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, opmap, - NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT) -from jedi._compatibility import is_py3 - - -cookie_re = re.compile("coding[:=]\s*([-\w.]+)") - - -if is_py3: - # Python 3 has str.isidentifier() to check if a char is a valid identifier - is_identifier = str.isidentifier -else: - namechars = string.ascii_letters + '_' - is_identifier = lambda s: s in namechars - - -COMMENT = N_TOKENS -tok_name[COMMENT] = 'COMMENT' - - -def group(*choices): - return '(' + '|'.join(choices) + ')' - - -def maybe(*choices): - return group(*choices) + '?' - - -# Note: we use unicode matching for names ("\w") but ascii matching for -# number literals. -whitespace = r'[ \f\t]*' -comment = r'#[^\r\n]*' -name = r'\w+' - -hex_number = r'0[xX][0-9a-fA-F]+' -bin_number = r'0[bB][01]+' -if is_py3: - oct_number = r'0[oO][0-7]+' -else: - oct_number = '0[0-7]+' -dec_number = r'(?:0+|[1-9][0-9]*)' -int_number = group(hex_number, bin_number, oct_number, dec_number) -exponent = r'[eE][-+]?[0-9]+' -point_float = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(exponent) -Expfloat = r'[0-9]+' + exponent -float_number = group(point_float, Expfloat) -imag_number = group(r'[0-9]+[jJ]', float_number + r'[jJ]') -number = group(imag_number, float_number, int_number) - -# Tail end of ' string. -single = r"[^'\\]*(?:\\.[^'\\]*)*'" -# Tail end of " string. -double = r'[^"\\]*(?:\\.[^"\\]*)*"' -# Tail end of ''' string. -single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" -# Tail end of """ string. -double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""') -# Single-line ' or " string. - -# Because of leftmost-then-longest match semantics, be sure to put the -# longest operators first (e.g., if = came before ==, == would get -# recognized as two instances of =). -operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", - r"//=?", r"->", - r"[+\-*@/%&|^=<>]=?", - r"~") - -bracket = '[][(){}]' -special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') -funny = group(operator, bracket, special) - -# First (or only) line of ' or " string. -cont_str = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + - group("'", r'\\\r?\n'), - r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + - group('"', r'\\\r?\n')) -pseudo_extras = group(r'\\\r?\n', comment, triple) -pseudo_token = group(whitespace) + \ - group(pseudo_extras, number, funny, cont_str, name) - - -def _compile(expr): - return re.compile(expr, re.UNICODE) - - -pseudoprog, single3prog, double3prog = map( - _compile, (pseudo_token, single3, double3)) - -endprogs = {"'": _compile(single), '"': _compile(double), - "'''": single3prog, '"""': double3prog, - "r'''": single3prog, 'r"""': double3prog, - "b'''": single3prog, 'b"""': double3prog, - "u'''": single3prog, 'u"""': double3prog, - "R'''": single3prog, 'R"""': double3prog, - "B'''": single3prog, 'B"""': double3prog, - "U'''": single3prog, 'U"""': double3prog, - "br'''": single3prog, 'br"""': double3prog, - "bR'''": single3prog, 'bR"""': double3prog, - "Br'''": single3prog, 'Br"""': double3prog, - "BR'''": single3prog, 'BR"""': double3prog, - "ur'''": single3prog, 'ur"""': double3prog, - "uR'''": single3prog, 'uR"""': double3prog, - "Ur'''": single3prog, 'Ur"""': double3prog, - "UR'''": single3prog, 'UR"""': double3prog, - 'r': None, 'R': None, 'b': None, 'B': None} - -triple_quoted = {} -for t in ("'''", '"""', - "r'''", 'r"""', "R'''", 'R"""', - "b'''", 'b"""', "B'''", 'B"""', - "u'''", 'u"""', "U'''", 'U"""', - "br'''", 'br"""', "Br'''", 'Br"""', - "bR'''", 'bR"""', "BR'''", 'BR"""', - "ur'''", 'ur"""', "Ur'''", 'Ur"""', - "uR'''", 'uR"""', "UR'''", 'UR"""'): - triple_quoted[t] = t -single_quoted = {} -for t in ("'", '"', - "r'", 'r"', "R'", 'R"', - "b'", 'b"', "B'", 'B"', - "u'", 'u"', "U'", 'U"', - "br'", 'br"', "Br'", 'Br"', - "bR'", 'bR"', "BR'", 'BR"', - "ur'", 'ur"', "Ur'", 'Ur"', - "uR'", 'uR"', "UR'", 'UR"'): - single_quoted[t] = t - -del _compile - -tabsize = 8 - -# TODO add with? -ALWAYS_BREAK_TOKENS = (';', 'import', 'class', 'def', 'try', 'except', - 'finally', 'while', 'return') - - -class TokenInfo(namedtuple('Token', ['type', 'string', 'start_pos', 'prefix'])): - def __repr__(self): - annotated_type = tok_name[self.type] - return ('TokenInfo(type=%s, string=%r, start=%r, prefix=%r)' % - self._replace(type=annotated_type)) - - @property - def exact_type(self): - if self.type == OP and self.string in opmap: - return opmap[self.string] - else: - return self.type - - -def source_tokens(source, use_exact_op_types=False): - """Generate tokens from a the source code (string).""" - source = source - readline = StringIO(source).readline - return generate_tokens(readline, use_exact_op_types) - - -def generate_tokens(readline, use_exact_op_types=False): - """ - A heavily modified Python standard library tokenizer. - - Additionally to the default information, yields also the prefix of each - token. This idea comes from lib2to3. The prefix contains all information - that is irrelevant for the parser like newlines in parentheses or comments. - """ - paren_level = 0 # count parentheses - indents = [0] - lnum = 0 - max = 0 - numchars = '0123456789' - contstr = '' - contline = None - # We start with a newline. This makes indent at the first position - # possible. It's not valid Python, but still better than an INDENT in the - # second line (and not in the first). This makes quite a few things in - # Jedi's fast parser possible. - new_line = True - prefix = '' # Should never be required, but here for safety - additional_prefix = '' - while True: # loop over lines in stream - line = readline() # readline returns empty when finished. See StringIO - if not line: - if contstr: - yield TokenInfo(ERRORTOKEN, contstr, contstr_start, prefix) - break - - lnum += 1 - pos, max = 0, len(line) - - if contstr: # continued string - endmatch = endprog.match(line) - if endmatch: - pos = endmatch.end(0) - yield TokenInfo(STRING, contstr + line[:pos], contstr_start, prefix) - contstr = '' - contline = None - else: - contstr = contstr + line - contline = contline + line - continue - - while pos < max: - pseudomatch = pseudoprog.match(line, pos) - if not pseudomatch: # scan for tokens - txt = line[pos] - if line[pos] in '"\'': - # If a literal starts but doesn't end the whole rest of the - # line is an error token. - txt = line[pos:] - yield TokenInfo(ERRORTOKEN, txt, (lnum, pos), prefix) - pos += 1 - continue - - prefix = additional_prefix + pseudomatch.group(1) - additional_prefix = '' - start, pos = pseudomatch.span(2) - spos = (lnum, start) - token, initial = line[start:pos], line[start] - - if new_line and initial not in '\r\n#': - new_line = False - if paren_level == 0: - i = 0 - while line[i] == '\f': - i += 1 - start -= 1 - if start > indents[-1]: - yield TokenInfo(INDENT, '', spos, '') - indents.append(start) - while start < indents[-1]: - yield TokenInfo(DEDENT, '', spos, '') - indents.pop() - - if (initial in numchars or # ordinary number - (initial == '.' and token != '.' and token != '...')): - yield TokenInfo(NUMBER, token, spos, prefix) - elif initial in '\r\n': - if not new_line and paren_level == 0: - yield TokenInfo(NEWLINE, token, spos, prefix) - else: - additional_prefix = prefix + token - new_line = True - elif initial == '#': # Comments - assert not token.endswith("\n") - additional_prefix = prefix + token - elif token in triple_quoted: - endprog = endprogs[token] - endmatch = endprog.match(line, pos) - if endmatch: # all on one line - pos = endmatch.end(0) - token = line[start:pos] - yield TokenInfo(STRING, token, spos, prefix) - else: - contstr_start = (lnum, start) # multiple lines - contstr = line[start:] - contline = line - break - elif initial in single_quoted or \ - token[:2] in single_quoted or \ - token[:3] in single_quoted: - if token[-1] == '\n': # continued string - contstr_start = lnum, start - endprog = (endprogs.get(initial) or endprogs.get(token[1]) - or endprogs.get(token[2])) - contstr = line[start:] - contline = line - break - else: # ordinary string - yield TokenInfo(STRING, token, spos, prefix) - elif is_identifier(initial): # ordinary name - if token in ALWAYS_BREAK_TOKENS: - paren_level = 0 - while True: - indent = indents.pop() - if indent > start: - yield TokenInfo(DEDENT, '', spos, '') - else: - indents.append(indent) - break - yield TokenInfo(NAME, token, spos, prefix) - elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n'): # continued stmt - additional_prefix += prefix + line[start:] - break - else: - if token in '([{': - paren_level += 1 - elif token in ')]}': - paren_level -= 1 - - try: - # This check is needed in any case to check if it's a valid - # operator or just some random unicode character. - exact_type = opmap[token] - except KeyError: - exact_type = typ = ERRORTOKEN - if use_exact_op_types: - typ = exact_type - else: - typ = OP - yield TokenInfo(typ, token, spos, prefix) - - if new_line or additional_prefix[-1:] == '\n': - end_pos = lnum + 1, 0 - else: - end_pos = lnum, max - # As the last position we just take the maximally possible position. We - # remove -1 for the last new line. - for indent in indents[1:]: - yield TokenInfo(DEDENT, '', end_pos, '') - yield TokenInfo(ENDMARKER, '', end_pos, additional_prefix) diff --git a/pythonFiles/preview/jedi/parser/tree.py b/pythonFiles/preview/jedi/parser/tree.py deleted file mode 100644 index 0033a49cbfde..000000000000 --- a/pythonFiles/preview/jedi/parser/tree.py +++ /dev/null @@ -1,1663 +0,0 @@ -""" -If you know what an abstract syntax tree (AST) is, you'll see that this module -is pretty much that. The classes represent syntax elements like functions and -imports. - -This is the "business logic" part of the parser. There's a lot of logic here -that makes it easier for Jedi (and other libraries to deal with a Python syntax -tree. - -By using `get_code` on a module, you can get back the 1-to-1 representation of -the input given to the parser. This is important if you are using refactoring. - -The easiest way to play with this module is to use :class:`parsing.Parser`. -:attr:`parsing.Parser.module` holds an instance of :class:`Module`: - ->>> from jedi._compatibility import u ->>> from jedi.parser import ParserWithRecovery, load_grammar ->>> parser = ParserWithRecovery(load_grammar(), u('import os'), 'example.py') ->>> submodule = parser.module ->>> submodule - - -Any subclasses of :class:`Scope`, including :class:`Module` has an attribute -:attr:`imports `: - ->>> submodule.imports -[] - -See also :attr:`Scope.subscopes` and :attr:`Scope.statements`. - -For static analysis purposes there exists a method called -``nodes_to_execute`` on all nodes and leaves. It's documented in the static -anaylsis documentation. -""" -import os -import re -from inspect import cleandoc -from itertools import chain -import textwrap -import abc - -from jedi._compatibility import (Python3Method, encoding, is_py3, utf8_repr, - literal_eval, use_metaclass, unicode) -from jedi.parser import token -from jedi.parser.utils import underscore_memoization - - -def is_node(node, *symbol_names): - try: - type = node.type - except AttributeError: - return False - else: - return type in symbol_names - - -class PositionModifier(object): - """A start_pos modifier for the fast parser.""" - def __init__(self): - self.line = 0 - - -zero_position_modifier = PositionModifier() - - -class DocstringMixin(object): - __slots__ = () - - @property - def raw_doc(self): - """ Returns a cleaned version of the docstring token. """ - if isinstance(self, Module): - node = self.children[0] - elif isinstance(self, ClassOrFunc): - node = self.children[self.children.index(':') + 1] - if is_node(node, 'suite'): # Normally a suite - node = node.children[2] # -> NEWLINE INDENT stmt - else: # ExprStmt - simple_stmt = self.parent - c = simple_stmt.parent.children - index = c.index(simple_stmt) - if not index: - return '' - node = c[index - 1] - - if is_node(node, 'simple_stmt'): - node = node.children[0] - - if node.type == 'string': - # TODO We have to check next leaves until there are no new - # leaves anymore that might be part of the docstring. A - # docstring can also look like this: ``'foo' 'bar' - # Returns a literal cleaned version of the ``Token``. - cleaned = cleandoc(literal_eval(node.value)) - # Since we want the docstr output to be always unicode, just - # force it. - if is_py3 or isinstance(cleaned, unicode): - return cleaned - else: - return unicode(cleaned, 'UTF-8', 'replace') - return '' - - -class Base(object): - """ - This is just here to have an isinstance check, which is also used on - evaluate classes. But since they have sometimes a special type of - delegation, it is important for those classes to override this method. - - I know that there is a chance to do such things with __instancecheck__, but - since Python 2.5 doesn't support it, I decided to do it this way. - """ - __slots__ = () - - def isinstance(self, *cls): - return isinstance(self, cls) - - @Python3Method - def get_parent_until(self, classes=(), reverse=False, - include_current=True): - """ - Searches the parent "chain" until the object is an instance of - classes. If classes is empty return the last parent in the chain - (is without a parent). - """ - if type(classes) not in (tuple, list): - classes = (classes,) - scope = self if include_current else self.parent - while scope.parent is not None: - # TODO why if classes? - if classes and reverse != scope.isinstance(*classes): - break - scope = scope.parent - return scope - - def get_parent_scope(self, include_flows=False): - """ - Returns the underlying scope. - """ - scope = self.parent - while scope is not None: - if include_flows and isinstance(scope, Flow): - return scope - if scope.is_scope(): - break - scope = scope.parent - return scope - - def get_definition(self): - if self.type in ('newline', 'dedent', 'indent', 'endmarker'): - raise ValueError('Cannot get the indentation of whitespace or indentation.') - scope = self - while scope.parent is not None: - parent = scope.parent - if scope.isinstance(Node, Leaf) and parent.type != 'simple_stmt': - if scope.type == 'testlist_comp': - try: - if isinstance(scope.children[1], CompFor): - return scope.children[1] - except IndexError: - pass - scope = parent - else: - break - return scope - - def assignment_indexes(self): - """ - Returns an array of tuple(int, node) of the indexes that are used in - tuple assignments. - - For example if the name is ``y`` in the following code:: - - x, (y, z) = 2, '' - - would result in ``[(1, xyz_node), (0, yz_node)]``. - """ - indexes = [] - node = self.parent - compare = self - while node is not None: - if is_node(node, 'testlist_comp', 'testlist_star_expr', 'exprlist'): - for i, child in enumerate(node.children): - if child == compare: - indexes.insert(0, (int(i / 2), node)) - break - else: - raise LookupError("Couldn't find the assignment.") - elif isinstance(node, (ExprStmt, CompFor)): - break - - compare = node - node = node.parent - return indexes - - def is_scope(self): - # Default is not being a scope. Just inherit from Scope. - return False - - @abc.abstractmethod - def nodes_to_execute(self, last_added=False): - raise NotImplementedError() - - def get_next_sibling(self): - """ - The node immediately following the invocant in their parent's children - list. If the invocant does not have a next sibling, it is None - """ - # Can't use index(); we need to test by identity - for i, child in enumerate(self.parent.children): - if child is self: - try: - return self.parent.children[i + 1] - except IndexError: - return None - - def get_previous_sibling(self): - """ - The node/leaf immediately preceding the invocant in their parent's - children list. If the invocant does not have a previous sibling, it is - None. - """ - # Can't use index(); we need to test by identity - for i, child in enumerate(self.parent.children): - if child is self: - if i == 0: - return None - return self.parent.children[i - 1] - - def get_previous_leaf(self): - """ - Returns the previous leaf in the parser tree. - Raises an IndexError if it's the first element. - """ - node = self - while True: - c = node.parent.children - i = c.index(node) - if i == 0: - node = node.parent - if node.parent is None: - raise IndexError('Cannot access the previous element of the first one.') - else: - node = c[i - 1] - break - - while True: - try: - node = node.children[-1] - except AttributeError: # A Leaf doesn't have children. - return node - - def get_next_leaf(self): - """ - Returns the previous leaf in the parser tree. - Raises an IndexError if it's the last element. - """ - node = self - while True: - c = node.parent.children - i = c.index(node) - if i == len(c) - 1: - node = node.parent - if node.parent is None: - raise IndexError('Cannot access the next element of the last one.') - else: - node = c[i + 1] - break - - while True: - try: - node = node.children[0] - except AttributeError: # A Leaf doesn't have children. - return node - - -class Leaf(Base): - __slots__ = ('position_modifier', 'value', 'parent', '_start_pos', 'prefix') - - def __init__(self, position_modifier, value, start_pos, prefix=''): - self.position_modifier = position_modifier - self.value = value - self._start_pos = start_pos - self.prefix = prefix - self.parent = None - - @property - def start_pos(self): - return self._start_pos[0] + self.position_modifier.line, self._start_pos[1] - - @start_pos.setter - def start_pos(self, value): - self._start_pos = value[0] - self.position_modifier.line, value[1] - - def get_start_pos_of_prefix(self): - try: - previous_leaf = self - while True: - previous_leaf = previous_leaf.get_previous_leaf() - if previous_leaf.type not in ('indent', 'dedent'): - return previous_leaf.end_pos - except IndexError: - return 1, 0 # It's the first leaf. - - @property - def end_pos(self): - return (self._start_pos[0] + self.position_modifier.line, - self._start_pos[1] + len(self.value)) - - def move(self, line_offset, column_offset): - self._start_pos = (self._start_pos[0] + line_offset, - self._start_pos[1] + column_offset) - - def first_leaf(self): - return self - - def get_code(self, normalized=False, include_prefix=True): - if normalized: - return self.value - if include_prefix: - return self.prefix + self.value - else: - return self.value - - def nodes_to_execute(self, last_added=False): - return [] - - @utf8_repr - def __repr__(self): - return "<%s: %s>" % (type(self).__name__, self.value) - - -class LeafWithNewLines(Leaf): - __slots__ = () - - @property - def end_pos(self): - """ - Literals and whitespace end_pos are more complicated than normal - end_pos, because the containing newlines may change the indexes. - """ - end_pos_line, end_pos_col = self.start_pos - lines = self.value.split('\n') - end_pos_line += len(lines) - 1 - # Check for multiline token - if self.start_pos[0] == end_pos_line: - end_pos_col += len(lines[-1]) - else: - end_pos_col = len(lines[-1]) - return end_pos_line, end_pos_col - - @utf8_repr - def __repr__(self): - return "<%s: %r>" % (type(self).__name__, self.value) - - -class EndMarker(Leaf): - __slots__ = () - type = 'endmarker' - - -class Newline(LeafWithNewLines): - """Contains NEWLINE and ENDMARKER tokens.""" - __slots__ = () - type = 'newline' - - @utf8_repr - def __repr__(self): - return "<%s: %s>" % (type(self).__name__, repr(self.value)) - - -class Name(Leaf): - """ - A string. Sometimes it is important to know if the string belongs to a name - or not. - """ - type = 'name' - __slots__ = () - - def __str__(self): - return self.value - - def __unicode__(self): - return self.value - - def __repr__(self): - return "<%s: %s@%s,%s>" % (type(self).__name__, self.value, - self.start_pos[0], self.start_pos[1]) - - def is_definition(self): - if self.parent.type in ('power', 'atom_expr'): - # In `self.x = 3` self is not a definition, but x is. - return False - - stmt = self.get_definition() - if stmt.type in ('funcdef', 'classdef', 'file_input', 'param'): - return self == stmt.name - elif stmt.type == 'for_stmt': - return self.start_pos < stmt.children[2].start_pos - elif stmt.type == 'try_stmt': - return self.get_previous_sibling() == 'as' - else: - return stmt.type in ('expr_stmt', 'import_name', 'import_from', - 'comp_for', 'with_stmt') \ - and self in stmt.get_defined_names() - - def nodes_to_execute(self, last_added=False): - if last_added is False: - yield self - - -class Literal(LeafWithNewLines): - __slots__ = () - - def eval(self): - return literal_eval(self.value) - - -class Number(Literal): - type = 'number' - __slots__ = () - - -class String(Literal): - type = 'string' - __slots__ = () - - -class Indent(Leaf): - type = 'indent' - __slots__ = () - - -class Dedent(Leaf): - type = 'dedent' - __slots__ = () - - -class Operator(Leaf): - type = 'operator' - __slots__ = () - - def __str__(self): - return self.value - - def __eq__(self, other): - """ - Make comparisons with strings easy. - Improves the readability of the parser. - """ - if isinstance(other, Operator): - return self is other - else: - return self.value == other - - def __ne__(self, other): - """Python 2 compatibility.""" - return self.value != other - - def __hash__(self): - return hash(self.value) - - -class Keyword(Leaf): - type = 'keyword' - __slots__ = () - - def __eq__(self, other): - """ - Make comparisons with strings easy. - Improves the readability of the parser. - """ - if isinstance(other, Keyword): - return self is other - return self.value == other - - def __ne__(self, other): - """Python 2 compatibility.""" - return not self.__eq__(other) - - def __hash__(self): - return hash(self.value) - - -class BaseNode(Base): - """ - The super class for Scope, Import, Name and Statement. Every object in - the parser tree inherits from this class. - """ - __slots__ = ('children', 'parent') - type = None - - def __init__(self, children): - """ - Initialize :class:`BaseNode`. - - :param children: The module in which this Python object locates. - """ - for c in children: - c.parent = self - self.children = children - self.parent = None - - def move(self, line_offset, column_offset): - """ - Move the Node's start_pos. - """ - for c in self.children: - c.move(line_offset, column_offset) - - @property - def start_pos(self): - return self.children[0].start_pos - - def get_start_pos_of_prefix(self): - return self.children[0].get_start_pos_of_prefix() - - @property - def end_pos(self): - return self.children[-1].end_pos - - def get_code(self, normalized=False, include_prefix=True): - # TODO implement normalized (depending on context). - if include_prefix: - return "".join(c.get_code(normalized) for c in self.children) - else: - first = self.children[0].get_code(include_prefix=False) - return first + "".join(c.get_code(normalized) for c in self.children[1:]) - - @Python3Method - def name_for_position(self, position): - for c in self.children: - if isinstance(c, Leaf): - if isinstance(c, Name) and c.start_pos <= position <= c.end_pos: - return c - else: - result = c.name_for_position(position) - if result is not None: - return result - return None - - def get_leaf_for_position(self, position, include_prefixes=False): - for c in self.children: - if include_prefixes: - start_pos = c.get_start_pos_of_prefix() - else: - start_pos = c.start_pos - - if start_pos <= position <= c.end_pos: - try: - return c.get_leaf_for_position(position, include_prefixes) - except AttributeError: - while c.type in ('indent', 'dedent'): - # We'd rather not have indents and dedents as a leaf, - # because they don't contain indentation information. - c = c.get_next_leaf() - return c - - return None - - @Python3Method - def get_statement_for_position(self, pos): - for c in self.children: - if c.start_pos <= pos <= c.end_pos: - if c.type not in ('decorated', 'simple_stmt', 'suite') \ - and not isinstance(c, (Flow, ClassOrFunc)): - return c - else: - try: - return c.get_statement_for_position(pos) - except AttributeError: - pass # Must be a non-scope - return None - - def first_leaf(self): - try: - return self.children[0].first_leaf() - except AttributeError: - return self.children[0] - - def get_next_leaf(self): - """ - Raises an IndexError if it's the last node. (Would be the module) - """ - c = self.parent.children - index = c.index(self) - if index == len(c) - 1: - # TODO WTF? recursion? - return self.get_next_leaf() - else: - return c[index + 1] - - def last_leaf(self): - try: - return self.children[-1].last_leaf() - except AttributeError: - return self.children[-1] - - def get_following_comment_same_line(self): - """ - returns (as string) any comment that appears on the same line, - after the node, including the # - """ - try: - if self.isinstance(ForStmt): - whitespace = self.children[5].first_leaf().prefix - elif self.isinstance(WithStmt): - whitespace = self.children[3].first_leaf().prefix - else: - whitespace = self.last_leaf().get_next_leaf().prefix - except AttributeError: - return None - except ValueError: - # TODO in some particular cases, the tree doesn't seem to be linked - # correctly - return None - if "#" not in whitespace: - return None - comment = whitespace[whitespace.index("#"):] - if "\r" in comment: - comment = comment[:comment.index("\r")] - if "\n" in comment: - comment = comment[:comment.index("\n")] - return comment - - @utf8_repr - def __repr__(self): - code = self.get_code().replace('\n', ' ').strip() - if not is_py3: - code = code.encode(encoding, 'replace') - return "<%s: %s@%s,%s>" % \ - (type(self).__name__, code, self.start_pos[0], self.start_pos[1]) - - -class Node(BaseNode): - """Concrete implementation for interior nodes.""" - __slots__ = ('type',) - - _IGNORE_EXECUTE_NODES = set([ - 'suite', 'subscriptlist', 'subscript', 'simple_stmt', 'sliceop', - 'testlist_comp', 'dictorsetmaker', 'trailer', 'decorators', - 'decorated', 'arglist', 'argument', 'exprlist', 'testlist', - 'testlist_safe', 'testlist1' - ]) - - def __init__(self, type, children): - """ - Initializer. - - Takes a type constant (a symbol number >= 256), a sequence of - child nodes, and an optional context keyword argument. - - As a side effect, the parent pointers of the children are updated. - """ - super(Node, self).__init__(children) - self.type = type - - def nodes_to_execute(self, last_added=False): - """ - For static analysis. - """ - result = [] - if self.type not in Node._IGNORE_EXECUTE_NODES and not last_added: - result.append(self) - last_added = True - - for child in self.children: - result += child.nodes_to_execute(last_added) - return result - - def __repr__(self): - return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children) - - -class ErrorNode(BaseNode): - """ - TODO doc - """ - __slots__ = () - type = 'error_node' - - def nodes_to_execute(self, last_added=False): - return [] - - -class ErrorLeaf(LeafWithNewLines): - """ - TODO doc - """ - __slots__ = ('original_type') - type = 'error_leaf' - - def __init__(self, position_modifier, original_type, value, start_pos, prefix=''): - super(ErrorLeaf, self).__init__(position_modifier, value, start_pos, prefix) - self.original_type = original_type - - def __repr__(self): - token_type = token.tok_name[self.original_type] - return "<%s: %s, %s)>" % (type(self).__name__, token_type, self.start_pos) - - -class IsScopeMeta(type): - def __instancecheck__(self, other): - return other.is_scope() - - -class IsScope(use_metaclass(IsScopeMeta)): - pass - - -class Scope(BaseNode, DocstringMixin): - """ - Super class for the parser tree, which represents the state of a python - text file. - A Scope manages and owns its subscopes, which are classes and functions, as - well as variables and imports. It is used to access the structure of python - files. - - :param start_pos: The position (line and column) of the scope. - :type start_pos: tuple(int, int) - """ - __slots__ = ('names_dict',) - - def __init__(self, children): - super(Scope, self).__init__(children) - - @property - def returns(self): - # Needed here for fast_parser, because the fast_parser splits and - # returns will be in "normal" modules. - return self._search_in_scope(ReturnStmt) - - @property - def subscopes(self): - return self._search_in_scope(Scope) - - @property - def flows(self): - return self._search_in_scope(Flow) - - @property - def imports(self): - return self._search_in_scope(Import) - - @Python3Method - def _search_in_scope(self, typ): - def scan(children): - elements = [] - for element in children: - if isinstance(element, typ): - elements.append(element) - if is_node(element, 'suite', 'simple_stmt', 'decorated') \ - or isinstance(element, Flow): - elements += scan(element.children) - return elements - - return scan(self.children) - - @property - def statements(self): - return self._search_in_scope((ExprStmt, KeywordStatement)) - - def is_scope(self): - return True - - def __repr__(self): - try: - name = self.path - except AttributeError: - try: - name = self.name - except AttributeError: - name = self.command - - return "<%s: %s@%s-%s>" % (type(self).__name__, name, - self.start_pos[0], self.end_pos[0]) - - def walk(self): - yield self - for s in self.subscopes: - for scope in s.walk(): - yield scope - - for r in self.statements: - while isinstance(r, Flow): - for scope in r.walk(): - yield scope - r = r.next - - -class Module(Scope): - """ - The top scope, which is always a module. - Depending on the underlying parser this may be a full module or just a part - of a module. - """ - __slots__ = ('path', 'global_names', 'used_names', '_name') - type = 'file_input' - - def __init__(self, children): - """ - Initialize :class:`Module`. - - :type path: str - :arg path: File path to this module. - - .. todo:: Document `top_module`. - """ - super(Module, self).__init__(children) - self.path = None # Set later. - - @property - @underscore_memoization - def name(self): - """ This is used for the goto functions. """ - if self.path is None: - string = '' # no path -> empty name - else: - sep = (re.escape(os.path.sep),) * 2 - r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self.path) - # Remove PEP 3149 names - string = re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) - # Positions are not real, but a module starts at (1, 0) - p = (1, 0) - name = Name(zero_position_modifier, string, p) - name.parent = self - return name - - @property - def has_explicit_absolute_import(self): - """ - Checks if imports in this module are explicitly absolute, i.e. there - is a ``__future__`` import. - """ - # TODO this is a strange scan and not fully correct. I think Python's - # parser does it in a different way and scans for the first - # statement/import with a tokenizer (to check for syntax changes like - # the future print statement). - for imp in self.imports: - if imp.type == 'import_from' and imp.level == 0: - for path in imp.paths(): - if [str(name) for name in path] == ['__future__', 'absolute_import']: - return True - return False - - def nodes_to_execute(self, last_added=False): - # Yield itself, class needs to be executed for decorator checks. - result = [] - for child in self.children: - result += child.nodes_to_execute() - return result - - -class Decorator(BaseNode): - type = 'decorator' - __slots__ = () - - def nodes_to_execute(self, last_added=False): - if self.children[-2] == ')': - node = self.children[-3] - if node != '(': - return node.nodes_to_execute() - return [] - - -class ClassOrFunc(Scope): - __slots__ = () - - @property - def name(self): - return self.children[1] - - def get_decorators(self): - decorated = self.parent - if is_node(decorated, 'decorated'): - if is_node(decorated.children[0], 'decorators'): - return decorated.children[0].children - else: - return decorated.children[:1] - else: - return [] - - -class Class(ClassOrFunc): - """ - Used to store the parsed contents of a python class. - - :param name: The Class name. - :type name: str - :param supers: The super classes of a Class. - :type supers: list - :param start_pos: The start position (line, column) of the class. - :type start_pos: tuple(int, int) - """ - type = 'classdef' - __slots__ = () - - def __init__(self, children): - super(Class, self).__init__(children) - - def get_super_arglist(self): - if self.children[2] != '(': # Has no parentheses - return None - else: - if self.children[3] == ')': # Empty parentheses - return None - else: - return self.children[3] - - @property - def doc(self): - """ - Return a document string including call signature of __init__. - """ - docstr = self.raw_doc - for sub in self.subscopes: - if str(sub.name) == '__init__': - return '%s\n\n%s' % ( - sub.get_call_signature(func_name=self.name), docstr) - return docstr - - def nodes_to_execute(self, last_added=False): - # Yield itself, class needs to be executed for decorator checks. - yield self - # Super arguments. - arglist = self.get_super_arglist() - try: - children = arglist.children - except AttributeError: - if arglist is not None: - for node_to_execute in arglist.nodes_to_execute(): - yield node_to_execute - else: - for argument in children: - if argument.type == 'argument': - # metaclass= or list comprehension or */** - raise NotImplementedError('Metaclasses not implemented') - else: - for node_to_execute in argument.nodes_to_execute(): - yield node_to_execute - - # care for the class suite: - for node in self.children[self.children.index(':'):]: - # This could be easier without the fast parser. But we need to find - # the position of the colon, because everything after it can be a - # part of the class, not just its suite. - for node_to_execute in node.nodes_to_execute(): - yield node_to_execute - - -def _create_params(parent, argslist_list): - """ - `argslist_list` is a list that can contain an argslist as a first item, but - most not. It's basically the items between the parameter brackets (which is - at most one item). - This function modifies the parser structure. It generates `Param` objects - from the normal ast. Those param objects do not exist in a normal ast, but - make the evaluation of the ast tree so much easier. - You could also say that this function replaces the argslist node with a - list of Param objects. - """ - def check_python2_nested_param(node): - """ - Python 2 allows params to look like ``def x(a, (b, c))``, which is - basically a way of unpacking tuples in params. Python 3 has ditched - this behavior. Jedi currently just ignores those constructs. - """ - return node.type == 'tfpdef' and node.children[0] == '(' - - try: - first = argslist_list[0] - except IndexError: - return [] - - if first.type in ('name', 'tfpdef'): - if check_python2_nested_param(first): - return [first] - else: - return [Param([first], parent)] - elif first == '*': - return [first] - else: # argslist is a `typedargslist` or a `varargslist`. - children = first.children - new_children = [] - start = 0 - # Start with offset 1, because the end is higher. - for end, child in enumerate(children + [None], 1): - if child is None or child == ',': - param_children = children[start:end] - if param_children: # Could as well be comma and then end. - if check_python2_nested_param(param_children[0]): - new_children += param_children - elif param_children[0] == '*' and param_children[1] == ',': - new_children += param_children - else: - new_children.append(Param(param_children, parent)) - start = end - return new_children - - -class Function(ClassOrFunc): - """ - Used to store the parsed contents of a python function. - - Children: - 0) - 1) - 2) parameter list (including open-paren and close-paren s) - 3) - 4) Node() representing function body - 5) ?? - 6) annotation (if present) - """ - __slots__ = ('listeners',) - type = 'funcdef' - - def __init__(self, children): - super(Function, self).__init__(children) - self.listeners = set() # not used here, but in evaluation. - parameters = self.children[2] # After `def foo` - parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1]) - - @property - def params(self): - return [p for p in self.children[2].children if p.type == 'param'] - - @property - def name(self): - return self.children[1] # First token after `def` - - @property - def yields(self): - # TODO This is incorrect, yields are also possible in a statement. - return self._search_in_scope(YieldExpr) - - def is_generator(self): - return bool(self.yields) - - def annotation(self): - try: - if self.children[3] == "->": - return self.children[4] - assert self.children[3] == ":" - return None - except IndexError: - return None - - def get_call_signature(self, width=72, func_name=None): - """ - Generate call signature of this function. - - :param width: Fold lines if a line is longer than this value. - :type width: int - :arg func_name: Override function name when given. - :type func_name: str - - :rtype: str - """ - func_name = func_name or self.name - code = unicode(func_name) + self._get_paramlist_code() - return '\n'.join(textwrap.wrap(code, width)) - - def _get_paramlist_code(self): - return self.children[2].get_code() - - @property - def doc(self): - """ Return a document string including call signature. """ - docstr = self.raw_doc - return '%s\n\n%s' % (self.get_call_signature(), docstr) - - def nodes_to_execute(self, last_added=False): - # Yield itself, functions needs to be executed for decorator checks. - yield self - for param in self.params: - if param.default is not None: - yield param.default - # care for the function suite: - for node in self.children[4:]: - # This could be easier without the fast parser. The fast parser - # allows that the 4th position is empty or that there's even a - # fifth element (another function/class). So just scan everything - # after colon. - for node_to_execute in node.nodes_to_execute(): - yield node_to_execute - - -class Lambda(Function): - """ - Lambdas are basically trimmed functions, so give it the same interface. - - Children: - 0) - *) for each argument x - -2) - -1) Node() representing body - """ - type = 'lambda' - __slots__ = () - - def __init__(self, children): - # We don't want to call the Function constructor, call its parent. - super(Function, self).__init__(children) - self.listeners = set() # not used here, but in evaluation. - lst = self.children[1:-2] # Everything between `lambda` and the `:` operator is a parameter. - self.children[1:-2] = _create_params(self, lst) - - @property - def name(self): - # Borrow the position of the AST node. - return Name(self.children[0].position_modifier, '', self.children[0].start_pos) - - def _get_paramlist_code(self): - return '(' + ''.join(param.get_code() for param in self.params).strip() + ')' - - @property - def params(self): - return self.children[1:-2] - - def is_generator(self): - return False - - def annotation(self): - # lambda functions do not support annotations - return None - - @property - def yields(self): - return [] - - def nodes_to_execute(self, last_added=False): - for param in self.params: - if param.default is not None: - yield param.default - # Care for the lambda test (last child): - for node_to_execute in self.children[-1].nodes_to_execute(): - yield node_to_execute - - def __repr__(self): - return "<%s@%s>" % (self.__class__.__name__, self.start_pos) - - -class Flow(BaseNode): - __slots__ = () - - def nodes_to_execute(self, last_added=False): - for child in self.children: - for node_to_execute in child.nodes_to_execute(): - yield node_to_execute - - -class IfStmt(Flow): - type = 'if_stmt' - __slots__ = () - - def check_nodes(self): - """ - Returns all the `test` nodes that are defined as x, here: - - if x: - pass - elif x: - pass - """ - for i, c in enumerate(self.children): - if c in ('elif', 'if'): - yield self.children[i + 1] - - def node_in_which_check_node(self, node): - """ - Returns the check node (see function above) that a node is contained - in. However if it the node is in the check node itself and not in the - suite return None. - """ - start_pos = node.start_pos - for check_node in reversed(list(self.check_nodes())): - if check_node.start_pos < start_pos: - if start_pos < check_node.end_pos: - return None - # In this case the node is within the check_node itself, - # not in the suite - else: - return check_node - - def node_after_else(self, node): - """ - Checks if a node is defined after `else`. - """ - for c in self.children: - if c == 'else': - if node.start_pos > c.start_pos: - return True - else: - return False - - -class WhileStmt(Flow): - type = 'while_stmt' - __slots__ = () - - -class ForStmt(Flow): - type = 'for_stmt' - __slots__ = () - - def get_input_node(self): - """ - Returns the input node ``y`` from: ``for x in y:``. - """ - return self.children[3] - - def defines_one_name(self): - """ - Returns True if only one name is returned: ``for x in y``. - Returns False if the for loop is more complicated: ``for x, z in y``. - - :returns: bool - """ - return self.children[1].type == 'name' - - -class TryStmt(Flow): - type = 'try_stmt' - __slots__ = () - - def except_clauses(self): - """ - Returns the ``test`` nodes found in ``except_clause`` nodes. - Returns ``[None]`` for except clauses without an exception given. - """ - for node in self.children: - if node.type == 'except_clause': - yield node.children[1] - elif node == 'except': - yield None - - def nodes_to_execute(self, last_added=False): - result = [] - for child in self.children[2::3]: - result += child.nodes_to_execute() - for child in self.children[0::3]: - if child.type == 'except_clause': - # Add the test node and ignore the `as NAME` definition. - result += child.children[1].nodes_to_execute() - return result - - -class WithStmt(Flow): - type = 'with_stmt' - __slots__ = () - - def get_defined_names(self): - names = [] - for with_item in self.children[1:-2:2]: - # Check with items for 'as' names. - if is_node(with_item, 'with_item'): - names += _defined_names(with_item.children[2]) - return names - - def node_from_name(self, name): - node = name - while True: - node = node.parent - if is_node(node, 'with_item'): - return node.children[0] - - def nodes_to_execute(self, last_added=False): - result = [] - for child in self.children[1::2]: - if child.type == 'with_item': - # Just ignore the `as EXPR` part - at least for now, because - # most times it's just a name. - child = child.children[0] - result += child.nodes_to_execute() - return result - - -class Import(BaseNode): - __slots__ = () - - def path_for_name(self, name): - try: - # The name may be an alias. If it is, just map it back to the name. - name = self.aliases()[name] - except KeyError: - pass - - for path in self.paths(): - if name in path: - return path[:path.index(name) + 1] - raise ValueError('Name should be defined in the import itself') - - def is_nested(self): - return False # By default, sub classes may overwrite this behavior - - def is_star_import(self): - return self.children[-1] == '*' - - def nodes_to_execute(self, last_added=False): - """ - `nodes_to_execute` works a bit different for imports, because the names - itself cannot directly get resolved (except on itself). - """ - # TODO couldn't we return the names? Would be nicer. - return [self] - - -class ImportFrom(Import): - type = 'import_from' - __slots__ = () - - def get_defined_names(self): - return [alias or name for name, alias in self._as_name_tuples()] - - def aliases(self): - """Mapping from alias to its corresponding name.""" - return dict((alias, name) for name, alias in self._as_name_tuples() - if alias is not None) - - def get_from_names(self): - for n in self.children[1:]: - if n not in ('.', '...'): - break - if is_node(n, 'dotted_name'): # from x.y import - return n.children[::2] - elif n == 'import': # from . import - return [] - else: # from x import - return [n] - - @property - def level(self): - """The level parameter of ``__import__``.""" - level = 0 - for n in self.children[1:]: - if n in ('.', '...'): - level += len(n.value) - else: - break - return level - - def _as_name_tuples(self): - last = self.children[-1] - if last == ')': - last = self.children[-2] - elif last == '*': - return # No names defined directly. - - if is_node(last, 'import_as_names'): - as_names = last.children[::2] - else: - as_names = [last] - for as_name in as_names: - if as_name.type == 'name': - yield as_name, None - else: - yield as_name.children[::2] # yields x, y -> ``x as y`` - - def star_import_name(self): - """ - The last name defined in a star import. - """ - return self.paths()[-1][-1] - - def paths(self): - """ - The import paths defined in an import statement. Typically an array - like this: ``[, ]``. - """ - dotted = self.get_from_names() - - if self.children[-1] == '*': - return [dotted] - return [dotted + [name] for name, alias in self._as_name_tuples()] - - -class ImportName(Import): - """For ``import_name`` nodes. Covers normal imports without ``from``.""" - type = 'import_name' - __slots__ = () - - def get_defined_names(self): - return [alias or path[0] for path, alias in self._dotted_as_names()] - - @property - def level(self): - """The level parameter of ``__import__``.""" - return 0 # Obviously 0 for imports without from. - - def paths(self): - return [path for path, alias in self._dotted_as_names()] - - def _dotted_as_names(self): - """Generator of (list(path), alias) where alias may be None.""" - dotted_as_names = self.children[1] - if is_node(dotted_as_names, 'dotted_as_names'): - as_names = dotted_as_names.children[::2] - else: - as_names = [dotted_as_names] - - for as_name in as_names: - if is_node(as_name, 'dotted_as_name'): - alias = as_name.children[2] - as_name = as_name.children[0] - else: - alias = None - if as_name.type == 'name': - yield [as_name], alias - else: - # dotted_names - yield as_name.children[::2], alias - - def is_nested(self): - """ - This checks for the special case of nested imports, without aliases and - from statement:: - - import foo.bar - """ - return [1 for path, alias in self._dotted_as_names() - if alias is None and len(path) > 1] - - def aliases(self): - return dict((alias, path[-1]) for path, alias in self._dotted_as_names() - if alias is not None) - - -class KeywordStatement(BaseNode): - """ - For the following statements: `assert`, `del`, `global`, `nonlocal`, - `raise`, `return`, `yield`, `return`, `yield`. - - `pass`, `continue` and `break` are not in there, because they are just - simple keywords and the parser reduces it to a keyword. - """ - __slots__ = () - - @property - def type(self): - """ - Keyword statements start with the keyword and end with `_stmt`. You can - crosscheck this with the Python grammar. - """ - return '%s_stmt' % self.keyword - - @property - def keyword(self): - return self.children[0].value - - def nodes_to_execute(self, last_added=False): - result = [] - for child in self.children: - result += child.nodes_to_execute() - return result - - -class AssertStmt(KeywordStatement): - __slots__ = () - - def assertion(self): - return self.children[1] - - -class GlobalStmt(KeywordStatement): - __slots__ = () - - def get_defined_names(self): - return [] - - def get_global_names(self): - return self.children[1::2] - - def nodes_to_execute(self, last_added=False): - """ - The global keyword allows to define any name. Even if it doesn't - exist. - """ - return [] - - -class ReturnStmt(KeywordStatement): - __slots__ = () - - -class YieldExpr(BaseNode): - __slots__ = () - - @property - def type(self): - return 'yield_expr' - - def nodes_to_execute(self, last_added=False): - if len(self.children) > 1: - return self.children[1].nodes_to_execute() - else: - return [] - - -def _defined_names(current): - """ - A helper function to find the defined names in statements, for loops and - list comprehensions. - """ - names = [] - if is_node(current, 'testlist_star_expr', 'testlist_comp', 'exprlist'): - for child in current.children[::2]: - names += _defined_names(child) - elif is_node(current, 'atom', 'star_expr'): - names += _defined_names(current.children[1]) - elif is_node(current, 'power', 'atom_expr'): - if current.children[-2] != '**': # Just if there's no operation - trailer = current.children[-1] - if trailer.children[0] == '.': - names.append(trailer.children[1]) - else: - names.append(current) - return names - - -class ExprStmt(BaseNode, DocstringMixin): - type = 'expr_stmt' - __slots__ = () - - def get_defined_names(self): - names = [] - if self.children[1].type == 'annassign': - names = _defined_names(self.children[0]) - return list(chain.from_iterable( - _defined_names(self.children[i]) - for i in range(0, len(self.children) - 2, 2) - if '=' in self.children[i + 1].value) - ) + names - - def get_rhs(self): - """Returns the right-hand-side of the equals.""" - return self.children[-1] - - def first_operation(self): - """ - Returns `+=`, `=`, etc or None if there is no operation. - """ - try: - return self.children[1] - except IndexError: - return None - - def nodes_to_execute(self, last_added=False): - # I think evaluating the statement (and possibly returned arrays), - # should be enough for static analysis. - result = [self] - for child in self.children: - result += child.nodes_to_execute(last_added=True) - return result - - -class Param(BaseNode): - """ - It's a helper class that makes business logic with params much easier. The - Python grammar defines no ``param`` node. It defines it in a different way - that is not really suited to working with parameters. - """ - type = 'param' - - def __init__(self, children, parent): - super(Param, self).__init__(children) - self.parent = parent - for child in children: - child.parent = self - - @property - def stars(self): - first = self.children[0] - if first in ('*', '**'): - return len(first.value) - return 0 - - @property - def default(self): - try: - return self.children[int(self.children[0] in ('*', '**')) + 2] - except IndexError: - return None - - def annotation(self): - tfpdef = self._tfpdef() - if is_node(tfpdef, 'tfpdef'): - assert tfpdef.children[1] == ":" - assert len(tfpdef.children) == 3 - annotation = tfpdef.children[2] - return annotation - else: - return None - - def _tfpdef(self): - """ - tfpdef: see grammar.txt. - """ - offset = int(self.children[0] in ('*', '**')) - return self.children[offset] - - @property - def name(self): - if is_node(self._tfpdef(), 'tfpdef'): - return self._tfpdef().children[0] - else: - return self._tfpdef() - - @property - def position_nr(self): - return self.parent.children.index(self) - 1 - - @property - def parent_function(self): - return self.get_parent_until(IsScope) - - def __repr__(self): - default = '' if self.default is None else '=%s' % self.default - return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default) - - -class CompFor(BaseNode): - type = 'comp_for' - __slots__ = () - - def get_comp_fors(self): - yield self - last = self.children[-1] - while True: - if isinstance(last, CompFor): - yield last - elif not is_node(last, 'comp_if'): - break - last = last.children[-1] - - def is_scope(self): - return True - - @property - def names_dict(self): - dct = {} - for name in self.get_defined_names(): - arr = dct.setdefault(name.value, []) - arr.append(name) - return dct - - def names_dicts(self, search_global): - yield self.names_dict - - def get_defined_names(self): - return _defined_names(self.children[1]) - - def nodes_to_execute(self, last_added=False): - last = self.children[-1] - if last.type == 'comp_if': - for node in last.children[-1].nodes_to_execute(): - yield node - last = self.children[-2] - elif last.type == 'comp_for': - for node in last.nodes_to_execute(): - yield node - last = self.children[-2] - for node in last.nodes_to_execute(): - yield node diff --git a/pythonFiles/preview/jedi/parser/utils.py b/pythonFiles/preview/jedi/parser/utils.py deleted file mode 100644 index 9098b095e6d6..000000000000 --- a/pythonFiles/preview/jedi/parser/utils.py +++ /dev/null @@ -1,198 +0,0 @@ -import inspect -import time -import os -import sys -import json -import hashlib -import gc -import shutil -import pickle - -from jedi import settings -from jedi import debug - - -def underscore_memoization(func): - """ - Decorator for methods:: - - class A(object): - def x(self): - if self._x: - self._x = 10 - return self._x - - Becomes:: - - class A(object): - @underscore_memoization - def x(self): - return 10 - - A now has an attribute ``_x`` written by this decorator. - """ - name = '_' + func.__name__ - - def wrapper(self): - try: - return getattr(self, name) - except AttributeError: - result = func(self) - if inspect.isgenerator(result): - result = list(result) - setattr(self, name, result) - return result - - return wrapper - - -# for fast_parser, should not be deleted -parser_cache = {} - - -class ParserCacheItem(object): - def __init__(self, parser, change_time=None): - self.parser = parser - if change_time is None: - change_time = time.time() - self.change_time = change_time - - -def load_parser(path): - """ - Returns the module or None, if it fails. - """ - p_time = os.path.getmtime(path) if path else None - try: - parser_cache_item = parser_cache[path] - if not path or p_time <= parser_cache_item.change_time: - return parser_cache_item.parser - except KeyError: - if settings.use_filesystem_cache: - return ParserPickling.load_parser(path, p_time) - - -def save_parser(path, parser, pickling=True): - try: - p_time = None if path is None else os.path.getmtime(path) - except OSError: - p_time = None - pickling = False - - item = ParserCacheItem(parser, p_time) - parser_cache[path] = item - if settings.use_filesystem_cache and pickling: - ParserPickling.save_parser(path, item) - - -class ParserPickling(object): - - version = 26 - """ - Version number (integer) for file system cache. - - Increment this number when there are any incompatible changes in - parser representation classes. For example, the following changes - are regarded as incompatible. - - - Class name is changed. - - Class is moved to another module. - - Defined slot of the class is changed. - """ - - def __init__(self): - self.__index = None - self.py_tag = 'cpython-%s%s' % sys.version_info[:2] - """ - Short name for distinguish Python implementations and versions. - - It's like `sys.implementation.cache_tag` but for Python < 3.3 - we generate something similar. See: - http://docs.python.org/3/library/sys.html#sys.implementation - - .. todo:: Detect interpreter (e.g., PyPy). - """ - - def load_parser(self, path, original_changed_time): - try: - pickle_changed_time = self._index[path] - except KeyError: - return None - if original_changed_time is not None \ - and pickle_changed_time < original_changed_time: - # the pickle file is outdated - return None - - with open(self._get_hashed_path(path), 'rb') as f: - try: - gc.disable() - parser_cache_item = pickle.load(f) - finally: - gc.enable() - - debug.dbg('pickle loaded: %s', path) - parser_cache[path] = parser_cache_item - return parser_cache_item.parser - - def save_parser(self, path, parser_cache_item): - self.__index = None - try: - files = self._index - except KeyError: - files = {} - self._index = files - - with open(self._get_hashed_path(path), 'wb') as f: - pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL) - files[path] = parser_cache_item.change_time - - self._flush_index() - - @property - def _index(self): - if self.__index is None: - try: - with open(self._get_path('index.json')) as f: - data = json.load(f) - except (IOError, ValueError): - self.__index = {} - else: - # 0 means version is not defined (= always delete cache): - if data.get('version', 0) != self.version: - self.clear_cache() - else: - self.__index = data['index'] - return self.__index - - def _remove_old_modules(self): - # TODO use - change = False - if change: - self._flush_index(self) - self._index # reload index - - def _flush_index(self): - data = {'version': self.version, 'index': self._index} - with open(self._get_path('index.json'), 'w') as f: - json.dump(data, f) - self.__index = None - - def clear_cache(self): - shutil.rmtree(self._cache_directory()) - self.__index = {} - - def _get_hashed_path(self, path): - return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest()) - - def _get_path(self, file): - dir = self._cache_directory() - if not os.path.exists(dir): - os.makedirs(dir) - return os.path.join(dir, file) - - def _cache_directory(self): - return os.path.join(settings.cache_directory, self.py_tag) - - -# is a singleton -ParserPickling = ParserPickling() diff --git a/pythonFiles/preview/jedi/refactoring.py b/pythonFiles/preview/jedi/refactoring.py deleted file mode 100644 index a342f08e2b8f..000000000000 --- a/pythonFiles/preview/jedi/refactoring.py +++ /dev/null @@ -1,202 +0,0 @@ -""" -Introduce some basic refactoring functions to |jedi|. This module is still in a -very early development stage and needs much testing and improvement. - -.. warning:: I won't do too much here, but if anyone wants to step in, please - do. Refactoring is none of my priorities - -It uses the |jedi| `API `_ and supports currently the -following functions (sometimes bug-prone): - -- rename -- extract variable -- inline variable -""" -import difflib - -from jedi import common -from jedi.evaluate import helpers -from jedi.parser import tree as pt - - -class Refactoring(object): - def __init__(self, change_dct): - """ - :param change_dct: dict(old_path=(new_path, old_lines, new_lines)) - """ - self.change_dct = change_dct - - def old_files(self): - dct = {} - for old_path, (new_path, old_l, new_l) in self.change_dct.items(): - dct[new_path] = '\n'.join(new_l) - return dct - - def new_files(self): - dct = {} - for old_path, (new_path, old_l, new_l) in self.change_dct.items(): - dct[new_path] = '\n'.join(new_l) - return dct - - def diff(self): - texts = [] - for old_path, (new_path, old_l, new_l) in self.change_dct.items(): - if old_path: - udiff = difflib.unified_diff(old_l, new_l) - else: - udiff = difflib.unified_diff(old_l, new_l, old_path, new_path) - texts.append('\n'.join(udiff)) - return '\n'.join(texts) - - -def rename(script, new_name): - """ The `args` / `kwargs` params are the same as in `api.Script`. - :param operation: The refactoring operation to execute. - :type operation: str - :type source: str - :return: list of changed lines/changed files - """ - return Refactoring(_rename(script.usages(), new_name)) - - -def _rename(names, replace_str): - """ For both rename and inline. """ - order = sorted(names, key=lambda x: (x.module_path, x.line, x.column), - reverse=True) - - def process(path, old_lines, new_lines): - if new_lines is not None: # goto next file, save last - dct[path] = path, old_lines, new_lines - - dct = {} - current_path = object() - new_lines = old_lines = None - for name in order: - if name.in_builtin_module(): - continue - if current_path != name.module_path: - current_path = name.module_path - - process(current_path, old_lines, new_lines) - if current_path is not None: - # None means take the source that is a normal param. - with open(current_path) as f: - source = f.read() - - new_lines = common.splitlines(common.source_to_unicode(source)) - old_lines = new_lines[:] - - nr, indent = name.line, name.column - line = new_lines[nr - 1] - new_lines[nr - 1] = line[:indent] + replace_str + \ - line[indent + len(name.name):] - process(current_path, old_lines, new_lines) - return dct - - -def extract(script, new_name): - """ The `args` / `kwargs` params are the same as in `api.Script`. - :param operation: The refactoring operation to execute. - :type operation: str - :type source: str - :return: list of changed lines/changed files - """ - new_lines = common.splitlines(common.source_to_unicode(script.source)) - old_lines = new_lines[:] - - user_stmt = script._parser.user_stmt() - - # TODO care for multiline extracts - dct = {} - if user_stmt: - pos = script._pos - line_index = pos[0] - 1 - arr, index = helpers.array_for_pos(user_stmt, pos) - if arr is not None: - start_pos = arr[index].start_pos - end_pos = arr[index].end_pos - - # take full line if the start line is different from end line - e = end_pos[1] if end_pos[0] == start_pos[0] else None - start_line = new_lines[start_pos[0] - 1] - text = start_line[start_pos[1]:e] - for l in range(start_pos[0], end_pos[0] - 1): - text += '\n' + l - if e is None: - end_line = new_lines[end_pos[0] - 1] - text += '\n' + end_line[:end_pos[1]] - - # remove code from new lines - t = text.lstrip() - del_start = start_pos[1] + len(text) - len(t) - - text = t.rstrip() - del_end = len(t) - len(text) - if e is None: - new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:] - e = len(start_line) - else: - e = e - del_end - start_line = start_line[:del_start] + new_name + start_line[e:] - new_lines[start_pos[0] - 1] = start_line - new_lines[start_pos[0]:end_pos[0] - 1] = [] - - # add parentheses in multiline case - open_brackets = ['(', '[', '{'] - close_brackets = [')', ']', '}'] - if '\n' in text and not (text[0] in open_brackets and text[-1] == - close_brackets[open_brackets.index(text[0])]): - text = '(%s)' % text - - # add new line before statement - indent = user_stmt.start_pos[1] - new = "%s%s = %s" % (' ' * indent, new_name, text) - new_lines.insert(line_index, new) - dct[script.path] = script.path, old_lines, new_lines - return Refactoring(dct) - - -def inline(script): - """ - :type script: api.Script - """ - new_lines = common.splitlines(common.source_to_unicode(script.source)) - - dct = {} - - definitions = script.goto_assignments() - with common.ignored(AssertionError): - assert len(definitions) == 1 - stmt = definitions[0]._definition - usages = script.usages() - inlines = [r for r in usages - if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos] - inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column), - reverse=True) - expression_list = stmt.expression_list() - # don't allow multiline refactorings for now. - assert stmt.start_pos[0] == stmt.end_pos[0] - index = stmt.start_pos[0] - 1 - - line = new_lines[index] - replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1] - replace_str = replace_str.strip() - # tuples need parentheses - if expression_list and isinstance(expression_list[0], pr.Array): - arr = expression_list[0] - if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1: - replace_str = '(%s)' % replace_str - - # if it's the only assignment, remove the statement - if len(stmt.get_defined_names()) == 1: - line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] - - dct = _rename(inlines, replace_str) - # remove the empty line - new_lines = dct[script.path][2] - if line.strip(): - new_lines[index] = line - else: - new_lines.pop(index) - - return Refactoring(dct) diff --git a/pythonFiles/preview/jedi/settings.py b/pythonFiles/preview/jedi/settings.py deleted file mode 100644 index 17cf9a0237b6..000000000000 --- a/pythonFiles/preview/jedi/settings.py +++ /dev/null @@ -1,233 +0,0 @@ -""" -This module contains variables with global |jedi| settings. To change the -behavior of |jedi|, change the variables defined in :mod:`jedi.settings`. - -Plugins should expose an interface so that the user can adjust the -configuration. - - -Example usage:: - - from jedi import settings - settings.case_insensitive_completion = True - - -Completion output -~~~~~~~~~~~~~~~~~ - -.. autodata:: case_insensitive_completion -.. autodata:: add_dot_after_module -.. autodata:: add_bracket_after_function -.. autodata:: no_completion_duplicates - - -Filesystem cache -~~~~~~~~~~~~~~~~ - -.. autodata:: cache_directory -.. autodata:: use_filesystem_cache - - -Parser -~~~~~~ - -.. autodata:: fast_parser - - -Dynamic stuff -~~~~~~~~~~~~~ - -.. autodata:: dynamic_array_additions -.. autodata:: dynamic_params -.. autodata:: dynamic_params_for_other_modules -.. autodata:: additional_dynamic_modules -.. autodata:: auto_import_modules - - -.. _settings-recursion: - -Recursions -~~~~~~~~~~ - -Recursion settings are important if you don't want extremly -recursive python code to go absolutely crazy. First of there is a -global limit :data:`max_executions`. This limit is important, to set -a maximum amount of time, the completion may use. - -The default values are based on experiments while completing the |jedi| library -itself (inception!). But I don't think there's any other Python library that -uses recursion in a similarly extreme way. These settings make the completion -definitely worse in some cases. But a completion should also be fast. - -.. autodata:: max_until_execution_unique -.. autodata:: max_function_recursion_level -.. autodata:: max_executions_without_builtins -.. autodata:: max_executions -.. autodata:: max_dynamic_params_depth -.. autodata:: scale_call_signatures - - -Caching -~~~~~~~ - -.. autodata:: star_import_cache_validity -.. autodata:: call_signatures_validity - - -""" -import os -import platform - -# ---------------- -# completion output settings -# ---------------- - -case_insensitive_completion = True -""" -The completion is by default case insensitive. -""" - -add_bracket_after_function = False -""" -Adds an opening bracket after a function, because that's normal behaviour. -Removed it again, because in VIM that is not very practical. -""" - -no_completion_duplicates = True -""" -If set, completions with the same name don't appear in the output anymore, -but are in the `same_name_completions` attribute. -""" - -# ---------------- -# Filesystem cache -# ---------------- - -use_filesystem_cache = True -""" -Use filesystem cache to save once parsed files with pickle. -""" - -if platform.system().lower() == 'windows': - _cache_directory = os.path.join(os.getenv('LOCALAPPDATA') or '~', 'Jedi', - 'Jedi') -elif platform.system().lower() == 'darwin': - _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi') -else: - _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', - 'jedi') -cache_directory = os.path.expanduser(_cache_directory) -""" -The path where all the caches can be found. - -On Linux, this defaults to ``~/.cache/jedi/``, on OS X to -``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``. -On Linux, if environment variable ``$XDG_CACHE_HOME`` is set, -``$XDG_CACHE_HOME/jedi`` is used instead of the default one. -""" - -# ---------------- -# parser -# ---------------- - -fast_parser = True -""" -Use the fast parser. This means that reparsing is only being done if -something has been changed e.g. to a function. If this happens, only the -function is being reparsed. -""" - -# ---------------- -# dynamic stuff -# ---------------- - -dynamic_array_additions = True -""" -check for `append`, etc. on arrays: [], {}, () as well as list/set calls. -""" - -dynamic_params = True -""" -A dynamic param completion, finds the callees of the function, which define -the params of a function. -""" - -dynamic_params_for_other_modules = True -""" -Do the same for other modules. -""" - -additional_dynamic_modules = [] -""" -Additional modules in which |jedi| checks if statements are to be found. This -is practical for IDEs, that want to administrate their modules themselves. -""" - -dynamic_flow_information = True -""" -Check for `isinstance` and other information to infer a type. -""" - -auto_import_modules = [ - 'hashlib', # setattr -] -""" -Modules that are not analyzed but imported, although they contain Python code. -This improves autocompletion for libraries that use ``setattr`` or -``globals()`` modifications a lot. -""" - -# ---------------- -# recursions -# ---------------- - -max_until_execution_unique = 50 -""" -This limit is probably the most important one, because if this limit is -exceeded, functions can only be one time executed. So new functions will be -executed, complex recursions with the same functions again and again, are -ignored. -""" - -max_function_recursion_level = 5 -""" -`max_function_recursion_level` is more about whether the recursions are -stopped in deepth or in width. The ratio beetween this and -`max_until_execution_unique` is important here. It stops a recursion (after -the number of function calls in the recursion), if it was already used -earlier. -""" - -max_executions_without_builtins = 200 -""" -.. todo:: Document this. -""" - -max_executions = 250 -""" -A maximum amount of time, the completion may use. -""" - -scale_call_signatures = 0.1 -""" -Because call_signatures is normally used on every single key hit, it has -to be faster than a normal completion. This is the factor that is used to -scale `max_executions` and `max_until_execution_unique`: -""" - -# ---------------- -# caching validity (time) -# ---------------- - -star_import_cache_validity = 60.0 -""" -In huge packages like numpy, checking all star imports on every completion -might be slow, therefore we do a star import caching, that lasts a certain -time span (in seconds). -""" - -call_signatures_validity = 3.0 -""" -Finding function calls might be slow (0.1-0.5s). This is not acceptible for -normal writing. Therefore cache it for a short time. -""" diff --git a/pythonFiles/preview/jedi/utils.py b/pythonFiles/preview/jedi/utils.py deleted file mode 100644 index 44f7c0d20e03..000000000000 --- a/pythonFiles/preview/jedi/utils.py +++ /dev/null @@ -1,131 +0,0 @@ -""" -Utilities for end-users. -""" - -from __future__ import absolute_import -import __main__ -from collections import namedtuple -import logging -import traceback -import re -import os -import sys - -from jedi import Interpreter -from jedi.api.helpers import get_on_completion_name -from jedi import common - - -READLINE_DEBUG = False - - -def setup_readline(namespace_module=__main__): - """ - Install Jedi completer to :mod:`readline`. - - This function setups :mod:`readline` to use Jedi in Python interactive - shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically - ``$HOME/.pythonrc.py``), you can add this piece of code:: - - try: - from jedi.utils import setup_readline - setup_readline() - except ImportError: - # Fallback to the stdlib readline completer if it is installed. - # Taken from http://docs.python.org/2/library/rlcompleter.html - print("Jedi is not installed, falling back to readline") - try: - import readline - import rlcompleter - readline.parse_and_bind("tab: complete") - except ImportError: - print("Readline is not installed either. No tab completion is enabled.") - - This will fallback to the readline completer if Jedi is not installed. - The readline completer will only complete names in the global namespace, - so for example:: - - ran - - will complete to ``range`` - - with both Jedi and readline, but:: - - range(10).cou - - will show complete to ``range(10).count`` only with Jedi. - - You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to - your shell profile (usually ``.bash_profile`` or ``.profile`` if you use - bash). - - """ - if READLINE_DEBUG: - logging.basicConfig( - filename='/tmp/jedi.log', - filemode='a', - level=logging.DEBUG - ) - - class JediRL(object): - def complete(self, text, state): - """ - This complete stuff is pretty weird, a generator would make - a lot more sense, but probably due to backwards compatibility - this is still the way how it works. - - The only important part is stuff in the ``state == 0`` flow, - everything else has been copied from the ``rlcompleter`` std. - library module. - """ - if state == 0: - sys.path.insert(0, os.getcwd()) - # Calling python doesn't have a path, so add to sys.path. - try: - logging.debug("Start REPL completion: " + repr(text)) - interpreter = Interpreter(text, [namespace_module.__dict__]) - - lines = common.splitlines(text) - position = (len(lines), len(lines[-1])) - name = get_on_completion_name(interpreter._get_module(), lines, position) - before = text[:len(text) - len(name)] - completions = interpreter.completions() - except: - logging.error("REPL Completion error:\n" + traceback.format_exc()) - raise - finally: - sys.path.pop(0) - - self.matches = [before + c.name_with_symbols for c in completions] - try: - return self.matches[state] - except IndexError: - return None - - try: - import readline - except ImportError: - print("Jedi: Module readline not available.") - else: - readline.set_completer(JediRL().complete) - readline.parse_and_bind("tab: complete") - # jedi itself does the case matching - readline.parse_and_bind("set completion-ignore-case on") - # because it's easier to hit the tab just once - readline.parse_and_bind("set show-all-if-unmodified") - readline.parse_and_bind("set show-all-if-ambiguous on") - # don't repeat all the things written in the readline all the time - readline.parse_and_bind("set completion-prefix-display-length 2") - # No delimiters, Jedi handles that. - readline.set_completer_delims('') - - -def version_info(): - """ - Returns a namedtuple of Jedi's version, similar to Python's - ``sys.version_info``. - """ - Version = namedtuple('Version', 'major, minor, micro') - from jedi import __version__ - tupl = re.findall('[a-z]+|\d+', __version__) - return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) diff --git a/pythonFiles/release/jedi/__main__.py b/pythonFiles/release/jedi/__main__.py deleted file mode 100755 index f2ee0477695b..000000000000 --- a/pythonFiles/release/jedi/__main__.py +++ /dev/null @@ -1,48 +0,0 @@ -import sys -from os.path import join, dirname, abspath, isdir - - -def _start_linter(): - """ - This is a pre-alpha API. You're not supposed to use it at all, except for - testing. It will very likely change. - """ - import jedi - - if '--debug' in sys.argv: - jedi.set_debug_function() - - for path in sys.argv[2:]: - if path.startswith('--'): - continue - if isdir(path): - import fnmatch - import os - - paths = [] - for root, dirnames, filenames in os.walk(path): - for filename in fnmatch.filter(filenames, '*.py'): - paths.append(os.path.join(root, filename)) - else: - paths = [path] - - try: - for path in paths: - for error in jedi.Script(path=path)._analysis(): - print(error) - except Exception: - if '--pdb' in sys.argv: - import traceback - traceback.print_exc() - import pdb - pdb.post_mortem() - else: - raise - - -if len(sys.argv) == 2 and sys.argv[1] == 'repl': - # don't want to use __main__ only for repl yet, maybe we want to use it for - # something else. So just use the keyword ``repl`` for now. - print(join(dirname(abspath(__file__)), 'api', 'replstartup.py')) -elif len(sys.argv) > 1 and sys.argv[1] == 'linter': - _start_linter() diff --git a/pythonFiles/release/jedi/api/replstartup.py b/pythonFiles/release/jedi/api/replstartup.py deleted file mode 100755 index 5bfcc8ce889e..000000000000 --- a/pythonFiles/release/jedi/api/replstartup.py +++ /dev/null @@ -1,27 +0,0 @@ -""" -To use Jedi completion in Python interpreter, add the following in your shell -setup (e.g., ``.bashrc``):: - - export PYTHONSTARTUP="$(python -m jedi repl)" - -Then you will be able to use Jedi completer in your Python interpreter:: - - $ python - Python 2.7.2+ (default, Jul 20 2012, 22:15:08) - [GCC 4.6.1] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import os - >>> os.path.join().split().in # doctest: +SKIP - os.path.join().split().index os.path.join().split().insert - -""" -import jedi.utils -from jedi import __version__ as __jedi_version__ - -print('REPL completion using Jedi %s' % __jedi_version__) -jedi.utils.setup_readline() - -del jedi - -# Note: try not to do many things here, as it will contaminate global -# namespace of the interpreter. diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/_functools.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/_functools.pym deleted file mode 100755 index 909ef03fc3dd..000000000000 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/_functools.pym +++ /dev/null @@ -1,9 +0,0 @@ -class partial(): - def __init__(self, func, *args, **keywords): - self.__func = func - self.__args = args - self.__keywords = keywords - - def __call__(self, *args, **kwargs): - # TODO should be **dict(self.__keywords, **kwargs) - return self.__func(*(self.__args + args), **self.__keywords) diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/_sqlite3.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/_sqlite3.pym deleted file mode 100755 index 2151e652b401..000000000000 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/_sqlite3.pym +++ /dev/null @@ -1,26 +0,0 @@ -def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None): - return Connection() - - -class Connection(): - def cursor(self): - return Cursor() - - -class Cursor(): - def cursor(self): - return Cursor() - - def fetchone(self): - return Row() - - def fetchmany(self, size=cursor.arraysize): - return [self.fetchone()] - - def fetchall(self): - return [self.fetchone()] - - -class Row(): - def keys(self): - return [''] diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/_sre.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/_sre.pym deleted file mode 100755 index 217be5633982..000000000000 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/_sre.pym +++ /dev/null @@ -1,99 +0,0 @@ -def compile(): - class SRE_Match(): - endpos = int() - lastgroup = int() - lastindex = int() - pos = int() - string = str() - regs = ((int(), int()),) - - def __init__(self, pattern): - self.re = pattern - - def start(self): - return int() - - def end(self): - return int() - - def span(self): - return int(), int() - - def expand(self): - return str() - - def group(self, nr): - return str() - - def groupdict(self): - return {str(): str()} - - def groups(self): - return (str(),) - - class SRE_Pattern(): - flags = int() - groupindex = {} - groups = int() - pattern = str() - - def findall(self, string, pos=None, endpos=None): - """ - findall(string[, pos[, endpos]]) --> list. - Return a list of all non-overlapping matches of pattern in string. - """ - return [str()] - - def finditer(self, string, pos=None, endpos=None): - """ - finditer(string[, pos[, endpos]]) --> iterator. - Return an iterator over all non-overlapping matches for the - RE pattern in string. For each match, the iterator returns a - match object. - """ - yield SRE_Match(self) - - def match(self, string, pos=None, endpos=None): - """ - match(string[, pos[, endpos]]) --> match object or None. - Matches zero or more characters at the beginning of the string - pattern - """ - return SRE_Match(self) - - def scanner(self, string, pos=None, endpos=None): - pass - - def search(self, string, pos=None, endpos=None): - """ - search(string[, pos[, endpos]]) --> match object or None. - Scan through string looking for a match, and return a corresponding - MatchObject instance. Return None if no position in the string matches. - """ - return SRE_Match(self) - - def split(self, string, maxsplit=0]): - """ - split(string[, maxsplit = 0]) --> list. - Split string by the occurrences of pattern. - """ - return [str()] - - def sub(self, repl, string, count=0): - """ - sub(repl, string[, count = 0]) --> newstring - Return the string obtained by replacing the leftmost non-overlapping - occurrences of pattern in string by the replacement repl. - """ - return str() - - def subn(self, repl, string, count=0): - """ - subn(repl, string[, count = 0]) --> (newstring, number of subs) - Return the tuple (new_string, number_of_subs_made) found by replacing - the leftmost non-overlapping occurrences of pattern with the - replacement repl. - """ - return (str(), int()) - - return SRE_Pattern() diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/datetime.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/datetime.pym deleted file mode 100755 index 823ac5b7fd56..000000000000 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/datetime.pym +++ /dev/null @@ -1,4 +0,0 @@ -class datetime(): - @staticmethod - def now(): - return datetime() diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/posix.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/posix.pym deleted file mode 100755 index 4417f7cb0427..000000000000 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/posix.pym +++ /dev/null @@ -1,5 +0,0 @@ -def getcwd(): - return '' - -def getcwdu(): - return '' diff --git a/pythonFiles/release/jedi/evaluate/jedi_typing.py b/pythonFiles/release/jedi/evaluate/jedi_typing.py deleted file mode 100644 index f48a567327eb..000000000000 --- a/pythonFiles/release/jedi/evaluate/jedi_typing.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -This module is not intended to be used in jedi, rather it will be fed to the -jedi-parser to replace classes in the typing module -""" - -try: - from collections import abc -except ImportError: - # python 2 - import collections as abc - - -def factory(typing_name, indextypes): - class Iterable(abc.Iterable): - def __iter__(self): - while True: - yield indextypes[0]() - - class Iterator(Iterable, abc.Iterator): - def next(self): - """ needed for python 2 """ - return self.__next__() - - def __next__(self): - return indextypes[0]() - - class Sequence(abc.Sequence): - def __getitem__(self, index): - return indextypes[0]() - - class MutableSequence(Sequence, abc.MutableSequence): - pass - - class List(MutableSequence, list): - pass - - class Tuple(Sequence, tuple): - def __getitem__(self, index): - if indextypes[1] == Ellipsis: - # https://www.python.org/dev/peps/pep-0484/#the-typing-module - # Tuple[int, ...] means a tuple of ints of indetermined length - return indextypes[0]() - else: - return indextypes[index]() - - class AbstractSet(Iterable, abc.Set): - pass - - class MutableSet(AbstractSet, abc.MutableSet): - pass - - class KeysView(Iterable, abc.KeysView): - pass - - class ValuesView(abc.ValuesView): - def __iter__(self): - while True: - yield indextypes[1]() - - class ItemsView(abc.ItemsView): - def __iter__(self): - while True: - yield (indextypes[0](), indextypes[1]()) - - class Mapping(Iterable, abc.Mapping): - def __getitem__(self, item): - return indextypes[1]() - - def keys(self): - return KeysView() - - def values(self): - return ValuesView() - - def items(self): - return ItemsView() - - class MutableMapping(Mapping, abc.MutableMapping): - pass - - class Dict(MutableMapping, dict): - pass - - dct = { - "Sequence": Sequence, - "MutableSequence": MutableSequence, - "List": List, - "Iterable": Iterable, - "Iterator": Iterator, - "AbstractSet": AbstractSet, - "MutableSet": MutableSet, - "Mapping": Mapping, - "MutableMapping": MutableMapping, - "Tuple": Tuple, - "KeysView": KeysView, - "ItemsView": ItemsView, - "ValuesView": ValuesView, - "Dict": Dict, - } - return dct[typing_name] diff --git a/pythonFiles/release/jedi/evaluate/site.py b/pythonFiles/release/jedi/evaluate/site.py deleted file mode 100644 index bf884faefaaf..000000000000 --- a/pythonFiles/release/jedi/evaluate/site.py +++ /dev/null @@ -1,110 +0,0 @@ -"""An adapted copy of relevant site-packages functionality from Python stdlib. - -This file contains some functions related to handling site-packages in Python -with jedi-specific modifications: - -- the functions operate on sys_path argument rather than global sys.path - -- in .pth files "import ..." lines that allow execution of arbitrary code are - skipped to prevent code injection into jedi interpreter - -""" - -# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved - -from __future__ import print_function - -import sys -import os - - -def makepath(*paths): - dir = os.path.join(*paths) - try: - dir = os.path.abspath(dir) - except OSError: - pass - return dir, os.path.normcase(dir) - - -def _init_pathinfo(sys_path): - """Return a set containing all existing directory entries from sys_path""" - d = set() - for dir in sys_path: - try: - if os.path.isdir(dir): - dir, dircase = makepath(dir) - d.add(dircase) - except TypeError: - continue - return d - - -def addpackage(sys_path, sitedir, name, known_paths): - """Process a .pth file within the site-packages directory: - For each line in the file, either combine it with sitedir to a path - and add that to known_paths, or execute it if it starts with 'import '. - """ - if known_paths is None: - known_paths = _init_pathinfo(sys_path) - reset = 1 - else: - reset = 0 - fullname = os.path.join(sitedir, name) - try: - f = open(fullname, "r") - except OSError: - return - with f: - for n, line in enumerate(f): - if line.startswith("#"): - continue - try: - if line.startswith(("import ", "import\t")): - # Change by immerrr: don't evaluate import lines to prevent - # code injection into jedi through pth files. - # - # exec(line) - continue - line = line.rstrip() - dir, dircase = makepath(sitedir, line) - if not dircase in known_paths and os.path.exists(dir): - sys_path.append(dir) - known_paths.add(dircase) - except Exception: - print("Error processing line {:d} of {}:\n".format(n+1, fullname), - file=sys.stderr) - import traceback - for record in traceback.format_exception(*sys.exc_info()): - for line in record.splitlines(): - print(' '+line, file=sys.stderr) - print("\nRemainder of file ignored", file=sys.stderr) - break - if reset: - known_paths = None - return known_paths - - -def addsitedir(sys_path, sitedir, known_paths=None): - """Add 'sitedir' argument to sys_path if missing and handle .pth files in - 'sitedir'""" - if known_paths is None: - known_paths = _init_pathinfo(sys_path) - reset = 1 - else: - reset = 0 - sitedir, sitedircase = makepath(sitedir) - if not sitedircase in known_paths: - sys_path.append(sitedir) # Add path component - known_paths.add(sitedircase) - try: - names = os.listdir(sitedir) - except OSError: - return - names = [name for name in names if name.endswith(".pth")] - for name in sorted(names): - addpackage(sys_path, sitedir, name, known_paths) - if reset: - known_paths = None - return known_paths diff --git a/pythonFiles/release/jedi/parser/__init__.py b/pythonFiles/release/jedi/parser/__init__.py deleted file mode 100755 index da4e25580968..000000000000 --- a/pythonFiles/release/jedi/parser/__init__.py +++ /dev/null @@ -1,395 +0,0 @@ -""" -The ``Parser`` tries to convert the available Python code in an easy to read -format, something like an abstract syntax tree. The classes who represent this -tree, are sitting in the :mod:`jedi.parser.tree` module. - -The Python module ``tokenize`` is a very important part in the ``Parser``, -because it splits the code into different words (tokens). Sometimes it looks a -bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast`` -module for this? Well, ``ast`` does a very good job understanding proper Python -code, but fails to work as soon as there's a single line of broken code. - -There's one important optimization that needs to be known: Statements are not -being parsed completely. ``Statement`` is just a representation of the tokens -within the statement. This lowers memory usage and cpu time and reduces the -complexity of the ``Parser`` (there's another parser sitting inside -``Statement``, which produces ``Array`` and ``Call``). -""" -import os -import re - -from jedi.parser import tree as pt -from jedi.parser import tokenize -from jedi.parser import token -from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER, - STRING, OP, ERRORTOKEN) -from jedi.parser.pgen2.pgen import generate_grammar -from jedi.parser.pgen2.parse import PgenParser - -OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or' -# Not used yet. In the future I intend to add something like KeywordStatement -STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \ - 'return', 'yield', 'pass', 'continue', 'break' - - -_loaded_grammars = {} - - -def load_grammar(file='grammar3.4'): - # For now we only support two different Python syntax versions: The latest - # Python 3 and Python 2. This may change. - if file.startswith('grammar3'): - file = 'grammar3.4' - else: - file = 'grammar2.7' - - global _loaded_grammars - path = os.path.join(os.path.dirname(__file__), file) + '.txt' - try: - return _loaded_grammars[path] - except KeyError: - return _loaded_grammars.setdefault(path, generate_grammar(path)) - - -class ErrorStatement(object): - def __init__(self, stack, next_token, position_modifier, next_start_pos): - self.stack = stack - self._position_modifier = position_modifier - self.next_token = next_token - self._next_start_pos = next_start_pos - - @property - def next_start_pos(self): - s = self._next_start_pos - return s[0] + self._position_modifier.line, s[1] - - @property - def first_pos(self): - first_type, nodes = self.stack[0] - return nodes[0].start_pos - - @property - def first_type(self): - first_type, nodes = self.stack[0] - return first_type - - -class ParserSyntaxError(object): - def __init__(self, message, position): - self.message = message - self.position = position - - -class Parser(object): - """ - This class is used to parse a Python file, it then divides them into a - class structure of different scopes. - - :param grammar: The grammar object of pgen2. Loaded by load_grammar. - :param source: The codebase for the parser. Must be unicode. - :param module_path: The path of the module in the file system, may be None. - :type module_path: str - :param top_module: Use this module as a parent instead of `self.module`. - """ - def __init__(self, grammar, source, module_path=None, tokenizer=None): - self._ast_mapping = { - 'expr_stmt': pt.ExprStmt, - 'classdef': pt.Class, - 'funcdef': pt.Function, - 'file_input': pt.Module, - 'import_name': pt.ImportName, - 'import_from': pt.ImportFrom, - 'break_stmt': pt.KeywordStatement, - 'continue_stmt': pt.KeywordStatement, - 'return_stmt': pt.ReturnStmt, - 'raise_stmt': pt.KeywordStatement, - 'yield_expr': pt.YieldExpr, - 'del_stmt': pt.KeywordStatement, - 'pass_stmt': pt.KeywordStatement, - 'global_stmt': pt.GlobalStmt, - 'nonlocal_stmt': pt.KeywordStatement, - 'assert_stmt': pt.AssertStmt, - 'if_stmt': pt.IfStmt, - 'with_stmt': pt.WithStmt, - 'for_stmt': pt.ForStmt, - 'while_stmt': pt.WhileStmt, - 'try_stmt': pt.TryStmt, - 'comp_for': pt.CompFor, - 'decorator': pt.Decorator, - 'lambdef': pt.Lambda, - 'old_lambdef': pt.Lambda, - 'lambdef_nocond': pt.Lambda, - } - - self.syntax_errors = [] - - self._global_names = [] - self._omit_dedent_list = [] - self._indent_counter = 0 - self._last_failed_start_pos = (0, 0) - - # TODO do print absolute import detection here. - #try: - # del python_grammar_no_print_statement.keywords["print"] - #except KeyError: - # pass # Doesn't exist in the Python 3 grammar. - - #if self.options["print_function"]: - # python_grammar = pygram.python_grammar_no_print_statement - #else: - self._used_names = {} - self._scope_names_stack = [{}] - self._error_statement_stacks = [] - - added_newline = False - # The Python grammar needs a newline at the end of each statement. - if not source.endswith('\n'): - source += '\n' - added_newline = True - - # For the fast parser. - self.position_modifier = pt.PositionModifier() - p = PgenParser(grammar, self.convert_node, self.convert_leaf, - self.error_recovery) - tokenizer = tokenizer or tokenize.source_tokens(source) - self.module = p.parse(self._tokenize(tokenizer)) - if self.module.type != 'file_input': - # If there's only one statement, we get back a non-module. That's - # not what we want, we want a module, so we add it here: - self.module = self.convert_node(grammar, - grammar.symbol2number['file_input'], - [self.module]) - - if added_newline: - self.remove_last_newline() - self.module.used_names = self._used_names - self.module.path = module_path - self.module.global_names = self._global_names - self.module.error_statement_stacks = self._error_statement_stacks - - def convert_node(self, grammar, type, children): - """ - Convert raw node information to a Node instance. - - This is passed to the parser driver which calls it whenever a reduction of a - grammar rule produces a new complete node, so that the tree is build - strictly bottom-up. - """ - symbol = grammar.number2symbol[type] - try: - new_node = self._ast_mapping[symbol](children) - except KeyError: - new_node = pt.Node(symbol, children) - - # We need to check raw_node always, because the same node can be - # returned by convert multiple times. - if symbol == 'global_stmt': - self._global_names += new_node.get_global_names() - elif isinstance(new_node, pt.Lambda): - new_node.names_dict = self._scope_names_stack.pop() - elif isinstance(new_node, (pt.ClassOrFunc, pt.Module)) \ - and symbol in ('funcdef', 'classdef', 'file_input'): - # scope_name_stack handling - scope_names = self._scope_names_stack.pop() - if isinstance(new_node, pt.ClassOrFunc): - n = new_node.name - scope_names[n.value].remove(n) - # Set the func name of the current node - arr = self._scope_names_stack[-1].setdefault(n.value, []) - arr.append(n) - new_node.names_dict = scope_names - elif isinstance(new_node, pt.CompFor): - # The name definitions of comprehenions shouldn't be part of the - # current scope. They are part of the comprehension scope. - for n in new_node.get_defined_names(): - self._scope_names_stack[-1][n.value].remove(n) - return new_node - - def convert_leaf(self, grammar, type, value, prefix, start_pos): - #print('leaf', value, pytree.type_repr(type)) - if type == tokenize.NAME: - if value in grammar.keywords: - if value in ('def', 'class', 'lambda'): - self._scope_names_stack.append({}) - - return pt.Keyword(self.position_modifier, value, start_pos, prefix) - else: - name = pt.Name(self.position_modifier, value, start_pos, prefix) - # Keep a listing of all used names - arr = self._used_names.setdefault(name.value, []) - arr.append(name) - arr = self._scope_names_stack[-1].setdefault(name.value, []) - arr.append(name) - return name - elif type == STRING: - return pt.String(self.position_modifier, value, start_pos, prefix) - elif type == NUMBER: - return pt.Number(self.position_modifier, value, start_pos, prefix) - elif type in (NEWLINE, ENDMARKER): - return pt.Whitespace(self.position_modifier, value, start_pos, prefix) - else: - return pt.Operator(self.position_modifier, value, start_pos, prefix) - - def error_recovery(self, grammar, stack, typ, value, start_pos, prefix, - add_token_callback): - """ - This parser is written in a dynamic way, meaning that this parser - allows using different grammars (even non-Python). However, error - recovery is purely written for Python. - """ - def current_suite(stack): - # For now just discard everything that is not a suite or - # file_input, if we detect an error. - for index, (dfa, state, (typ, nodes)) in reversed(list(enumerate(stack))): - # `suite` can sometimes be only simple_stmt, not stmt. - symbol = grammar.number2symbol[typ] - if symbol == 'file_input': - break - elif symbol == 'suite' and len(nodes) > 1: - # suites without an indent in them get discarded. - break - elif symbol == 'simple_stmt' and len(nodes) > 1: - # simple_stmt can just be turned into a Node, if there are - # enough statements. Ignore the rest after that. - break - return index, symbol, nodes - - index, symbol, nodes = current_suite(stack) - if symbol == 'simple_stmt': - index -= 2 - (_, _, (typ, suite_nodes)) = stack[index] - symbol = grammar.number2symbol[typ] - suite_nodes.append(pt.Node(symbol, list(nodes))) - # Remove - nodes[:] = [] - nodes = suite_nodes - stack[index] - - #print('err', token.tok_name[typ], repr(value), start_pos, len(stack), index) - self._stack_removal(grammar, stack, index + 1, value, start_pos) - if typ == INDENT: - # For every deleted INDENT we have to delete a DEDENT as well. - # Otherwise the parser will get into trouble and DEDENT too early. - self._omit_dedent_list.append(self._indent_counter) - - if value in ('import', 'from', 'class', 'def', 'try', 'while', 'return'): - # Those can always be new statements. - add_token_callback(typ, value, prefix, start_pos) - elif typ == DEDENT and symbol == 'suite': - # Close the current suite, with DEDENT. - # Note that this may cause some suites to not contain any - # statements at all. This is contrary to valid Python syntax. We - # keep incomplete suites in Jedi to be able to complete param names - # or `with ... as foo` names. If we want to use this parser for - # syntax checks, we have to check in a separate turn if suites - # contain statements or not. However, a second check is necessary - # anyway (compile.c does that for Python), because Python's grammar - # doesn't stop you from defining `continue` in a module, etc. - add_token_callback(typ, value, prefix, start_pos) - - def _stack_removal(self, grammar, stack, start_index, value, start_pos): - def clear_names(children): - for c in children: - try: - clear_names(c.children) - except AttributeError: - if isinstance(c, pt.Name): - try: - self._scope_names_stack[-1][c.value].remove(c) - self._used_names[c.value].remove(c) - except ValueError: - pass # This may happen with CompFor. - except KeyError: - pass # this seems to happen for large files - - for dfa, state, node in stack[start_index:]: - clear_names(children=node[1]) - - failed_stack = [] - found = False - for dfa, state, (typ, nodes) in stack[start_index:]: - if nodes: - found = True - if found: - symbol = grammar.number2symbol[typ] - failed_stack.append((symbol, nodes)) - if nodes and nodes[0] in ('def', 'class', 'lambda'): - self._scope_names_stack.pop() - if failed_stack: - err = ErrorStatement(failed_stack, value, self.position_modifier, start_pos) - self._error_statement_stacks.append(err) - - self._last_failed_start_pos = start_pos - - stack[start_index:] = [] - - def _tokenize(self, tokenizer): - for typ, value, start_pos, prefix in tokenizer: - #print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix)) - if typ == DEDENT: - # We need to count indents, because if we just omit any DEDENT, - # we might omit them in the wrong place. - o = self._omit_dedent_list - if o and o[-1] == self._indent_counter: - o.pop() - continue - - self._indent_counter -= 1 - elif typ == INDENT: - self._indent_counter += 1 - elif typ == ERRORTOKEN: - self._add_syntax_error('Strange token', start_pos) - continue - - if typ == OP: - typ = token.opmap[value] - yield typ, value, prefix, start_pos - - def _add_syntax_error(self, message, position): - self.syntax_errors.append(ParserSyntaxError(message, position)) - - def __repr__(self): - return "<%s: %s>" % (type(self).__name__, self.module) - - def remove_last_newline(self): - """ - In all of this we need to work with _start_pos, because if we worked - with start_pos, we would need to check the position_modifier as well - (which is accounted for in the start_pos property). - """ - endmarker = self.module.children[-1] - # The newline is either in the endmarker as a prefix or the previous - # leaf as a newline token. - if endmarker.prefix.endswith('\n'): - endmarker.prefix = endmarker.prefix[:-1] - last_line = re.sub('.*\n', '', endmarker.prefix) - endmarker._start_pos = endmarker._start_pos[0] - 1, len(last_line) - else: - try: - newline = endmarker.get_previous() - except IndexError: - return # This means that the parser is empty. - while True: - if newline.value == '': - # Must be a DEDENT, just continue. - try: - newline = newline.get_previous() - except IndexError: - # If there's a statement that fails to be parsed, there - # will be no previous leaf. So just ignore it. - break - elif newline.value != '\n': - # This may happen if error correction strikes and removes - # a whole statement including '\n'. - break - else: - newline.value = '' - if self._last_failed_start_pos > newline._start_pos: - # It may be the case that there was a syntax error in a - # function. In that case error correction removes the - # right newline. So we use the previously assigned - # _last_failed_start_pos variable to account for that. - endmarker._start_pos = self._last_failed_start_pos - else: - endmarker._start_pos = newline._start_pos - break diff --git a/pythonFiles/release/jedi/parser/fast.py b/pythonFiles/release/jedi/parser/fast.py deleted file mode 100755 index 35bb85556b22..000000000000 --- a/pythonFiles/release/jedi/parser/fast.py +++ /dev/null @@ -1,580 +0,0 @@ -""" -Basically a parser that is faster, because it tries to parse only parts and if -anything changes, it only reparses the changed parts. But because it's not -finished (and still not working as I want), I won't document it any further. -""" -import re -from itertools import chain - -from jedi._compatibility import use_metaclass -from jedi import settings -from jedi.parser import Parser -from jedi.parser import tree -from jedi import cache -from jedi import debug -from jedi.parser.tokenize import (source_tokens, NEWLINE, - ENDMARKER, INDENT, DEDENT) - -FLOWS = 'if', 'else', 'elif', 'while', 'with', 'try', 'except', 'finally', 'for' - - -class FastModule(tree.Module): - type = 'file_input' - - def __init__(self, module_path): - super(FastModule, self).__init__([]) - self.modules = [] - self.reset_caches() - self.names_dict = {} - self.path = module_path - - def reset_caches(self): - self.modules = [] - try: - del self._used_names # Remove the used names cache. - except AttributeError: - pass # It was never used. - - @property - @cache.underscore_memoization - def used_names(self): - return MergedNamesDict([m.used_names for m in self.modules]) - - @property - def global_names(self): - return [name for m in self.modules for name in m.global_names] - - @property - def error_statement_stacks(self): - return [e for m in self.modules for e in m.error_statement_stacks] - - def __repr__(self): - return "" % (type(self).__name__, self.name, - self.start_pos[0], self.end_pos[0]) - - # To avoid issues with with the `parser.Parser`, we need setters that do - # nothing, because if pickle comes along and sets those values. - @global_names.setter - def global_names(self, value): - pass - - @error_statement_stacks.setter - def error_statement_stacks(self, value): - pass - - @used_names.setter - def used_names(self, value): - pass - - -class MergedNamesDict(object): - def __init__(self, dicts): - self.dicts = dicts - - def __iter__(self): - return iter(set(key for dct in self.dicts for key in dct)) - - def __getitem__(self, value): - return list(chain.from_iterable(dct.get(value, []) for dct in self.dicts)) - - def items(self): - dct = {} - for d in self.dicts: - for key, values in d.items(): - try: - dct_values = dct[key] - dct_values += values - except KeyError: - dct[key] = list(values) - return dct.items() - - def values(self): - lst = [] - for dct in self.dicts: - lst += dct.values() - return lst - - -class CachedFastParser(type): - """ This is a metaclass for caching `FastParser`. """ - def __call__(self, grammar, source, module_path=None): - if not settings.fast_parser: - return Parser(grammar, source, module_path) - - pi = cache.parser_cache.get(module_path, None) - if pi is None or isinstance(pi.parser, Parser): - p = super(CachedFastParser, self).__call__(grammar, source, module_path) - else: - p = pi.parser # pi is a `cache.ParserCacheItem` - p.update(source) - return p - - -class ParserNode(object): - def __init__(self, fast_module, parser, source): - self._fast_module = fast_module - self.parent = None - self._node_children = [] - - self.source = source - self.hash = hash(source) - self.parser = parser - - try: - # With fast_parser we have either 1 subscope or only statements. - self._content_scope = parser.module.subscopes[0] - except IndexError: - self._content_scope = parser.module - else: - self._rewrite_last_newline() - - # We need to be able to reset the original children of a parser. - self._old_children = list(self._content_scope.children) - - def _rewrite_last_newline(self): - """ - The ENDMARKER can contain a newline in the prefix. However this prefix - really belongs to the function - respectively to the next function or - parser node. If we don't rewrite that newline, we end up with a newline - in the wrong position, i.d. at the end of the file instead of in the - middle. - """ - c = self._content_scope.children - if tree.is_node(c[-1], 'suite'): # In a simple_stmt there's no DEDENT. - end_marker = self.parser.module.children[-1] - # Set the DEDENT prefix instead of the ENDMARKER. - c[-1].children[-1].prefix = end_marker.prefix - end_marker.prefix = '' - - def __repr__(self): - module = self.parser.module - try: - return '<%s: %s-%s>' % (type(self).__name__, module.start_pos, module.end_pos) - except IndexError: - # There's no module yet. - return '<%s: empty>' % type(self).__name__ - - def reset_node(self): - """ - Removes changes that were applied in this class. - """ - self._node_children = [] - scope = self._content_scope - scope.children = list(self._old_children) - try: - # This works if it's a MergedNamesDict. - # We are correcting it, because the MergedNamesDicts are artificial - # and can change after closing a node. - scope.names_dict = scope.names_dict.dicts[0] - except AttributeError: - pass - - def close(self): - """ - Closes the current parser node. This means that after this no further - nodes should be added anymore. - """ - # We only need to replace the dict if multiple dictionaries are used: - if self._node_children: - dcts = [n.parser.module.names_dict for n in self._node_children] - # Need to insert the own node as well. - dcts.insert(0, self._content_scope.names_dict) - self._content_scope.names_dict = MergedNamesDict(dcts) - - def parent_until_indent(self, indent=None): - if (indent is None or self._indent >= indent) and self.parent is not None: - self.close() - return self.parent.parent_until_indent(indent) - return self - - @property - def _indent(self): - if not self.parent: - return 0 - - return self.parser.module.children[0].start_pos[1] - - def add_node(self, node, line_offset): - """Adding a node means adding a node that was already added earlier""" - # Changing the line offsets is very important, because if they don't - # fit, all the start_pos values will be wrong. - m = node.parser.module - node.parser.position_modifier.line = line_offset - self._fast_module.modules.append(m) - node.parent = self - - self._node_children.append(node) - - # Insert parser objects into current structure. We only need to set the - # parents and children in a good way. - scope = self._content_scope - for child in m.children: - child.parent = scope - scope.children.append(child) - - return node - - def all_sub_nodes(self): - """ - Returns all nodes including nested ones. - """ - for n in self._node_children: - yield n - for y in n.all_sub_nodes(): - yield y - - @cache.underscore_memoization # Should only happen once! - def remove_last_newline(self): - self.parser.remove_last_newline() - - -class FastParser(use_metaclass(CachedFastParser)): - _FLOWS_NEED_SPACE = 'if', 'elif', 'while', 'with', 'except', 'for' - _FLOWS_NEED_COLON = 'else', 'try', 'except', 'finally' - _keyword_re = re.compile('^[ \t]*(def |class |@|(?:%s)|(?:%s)\s*:)' - % ('|'.join(_FLOWS_NEED_SPACE), - '|'.join(_FLOWS_NEED_COLON))) - - def __init__(self, grammar, source, module_path=None): - # set values like `tree.Module`. - self._grammar = grammar - self.module_path = module_path - self._reset_caches() - self.update(source) - - def _reset_caches(self): - self.module = FastModule(self.module_path) - self.current_node = ParserNode(self.module, self, '') - - def update(self, source): - # For testing purposes: It is important that the number of parsers used - # can be minimized. With these variables we can test against that. - self.number_parsers_used = 0 - self.number_of_splits = 0 - self.number_of_misses = 0 - self.module.reset_caches() - try: - self._parse(source) - except: - # FastParser is cached, be careful with exceptions. - self._reset_caches() - raise - - def _split_parts(self, source): - """ - Split the source code into different parts. This makes it possible to - parse each part seperately and therefore cache parts of the file and - not everything. - """ - def gen_part(): - text = ''.join(current_lines) - del current_lines[:] - self.number_of_splits += 1 - return text - - def just_newlines(current_lines): - for line in current_lines: - line = line.lstrip('\t \n\r') - if line and line[0] != '#': - return False - return True - - # Split only new lines. Distinction between \r\n is the tokenizer's - # job. - # It seems like there's no problem with form feed characters here, - # because we're not counting lines. - self._lines = source.splitlines(True) - current_lines = [] - is_decorator = False - # Use -1, because that indent is always smaller than any other. - indent_list = [-1, 0] - new_indent = False - parentheses_level = 0 - flow_indent = None - previous_line = None - # All things within flows are simply being ignored. - for i, l in enumerate(self._lines): - # Handle backslash newline escaping. - if l.endswith('\\\n') or l.endswith('\\\r\n'): - if previous_line is not None: - previous_line += l - else: - previous_line = l - continue - if previous_line is not None: - l = previous_line + l - previous_line = None - - # check for dedents - s = l.lstrip('\t \n\r') - indent = len(l) - len(s) - if not s or s[0] == '#': - current_lines.append(l) # Just ignore comments and blank lines - continue - - if new_indent: - if indent > indent_list[-2]: - # Set the actual indent, not just the random old indent + 1. - indent_list[-1] = indent - new_indent = False - - while indent <= indent_list[-2]: # -> dedent - indent_list.pop() - # This automatically resets the flow_indent if there was a - # dedent or a flow just on one line (with one simple_stmt). - new_indent = False - if flow_indent is None and current_lines and not parentheses_level: - yield gen_part() - flow_indent = None - - # Check lines for functions/classes and split the code there. - if flow_indent is None: - m = self._keyword_re.match(l) - if m: - # Strip whitespace and colon from flows as a check. - if m.group(1).strip(' \t\r\n:') in FLOWS: - if not parentheses_level: - flow_indent = indent - else: - if not is_decorator and not just_newlines(current_lines): - yield gen_part() - is_decorator = '@' == m.group(1) - if not is_decorator: - parentheses_level = 0 - # The new indent needs to be higher - indent_list.append(indent + 1) - new_indent = True - elif is_decorator: - is_decorator = False - - parentheses_level = \ - max(0, (l.count('(') + l.count('[') + l.count('{') - - l.count(')') - l.count(']') - l.count('}'))) - - current_lines.append(l) - if current_lines: - yield gen_part() - - def _parse(self, source): - """ :type source: str """ - added_newline = False - if not source or source[-1] != '\n': - # To be compatible with Pythons grammar, we need a newline at the - # end. The parser would handle it, but since the fast parser abuses - # the normal parser in various ways, we need to care for this - # ourselves. - source += '\n' - added_newline = True - - next_line_offset = line_offset = 0 - start = 0 - nodes = list(self.current_node.all_sub_nodes()) - # Now we can reset the node, because we have all the old nodes. - self.current_node.reset_node() - last_end_line = 1 - - for code_part in self._split_parts(source): - next_line_offset += code_part.count('\n') - # If the last code part parsed isn't equal to the current end_pos, - # we know that the parser went further (`def` start in a - # docstring). So just parse the next part. - if line_offset + 1 == last_end_line: - self.current_node = self._get_node(code_part, source[start:], - line_offset, nodes) - else: - # Means that some lines where not fully parsed. Parse it now. - # This is a very rare case. Should only happens with very - # strange code bits. - self.number_of_misses += 1 - while last_end_line < next_line_offset + 1: - line_offset = last_end_line - 1 - # We could calculate the src in a more complicated way to - # make caching here possible as well. However, this is - # complicated and error-prone. Since this is not very often - # called - just ignore it. - src = ''.join(self._lines[line_offset:]) - self.current_node = self._get_node(code_part, src, - line_offset, nodes) - last_end_line = self.current_node.parser.module.end_pos[0] - - debug.dbg('While parsing %s, line %s slowed down the fast parser.', - self.module_path, line_offset + 1) - - line_offset = next_line_offset - start += len(code_part) - - last_end_line = self.current_node.parser.module.end_pos[0] - - if added_newline: - self.current_node.remove_last_newline() - - # Now that the for loop is finished, we still want to close all nodes. - self.current_node = self.current_node.parent_until_indent() - self.current_node.close() - - debug.dbg('Parsed %s, with %s parsers in %s splits.' - % (self.module_path, self.number_parsers_used, - self.number_of_splits)) - - def _get_node(self, source, parser_code, line_offset, nodes): - """ - Side effect: Alters the list of nodes. - """ - indent = len(source) - len(source.lstrip('\t ')) - self.current_node = self.current_node.parent_until_indent(indent) - - h = hash(source) - for index, node in enumerate(nodes): - if node.hash == h and node.source == source: - node.reset_node() - nodes.remove(node) - break - else: - tokenizer = FastTokenizer(parser_code) - self.number_parsers_used += 1 - p = Parser(self._grammar, parser_code, self.module_path, tokenizer=tokenizer) - - end = line_offset + p.module.end_pos[0] - used_lines = self._lines[line_offset:end - 1] - code_part_actually_used = ''.join(used_lines) - - node = ParserNode(self.module, p, code_part_actually_used) - - self.current_node.add_node(node, line_offset) - return node - - -class FastTokenizer(object): - """ - Breaks when certain conditions are met, i.e. a new function or class opens. - """ - def __init__(self, source): - self.source = source - self._gen = source_tokens(source) - self._closed = False - - # fast parser options - self.current = self.previous = NEWLINE, '', (0, 0) - self._in_flow = False - self._is_decorator = False - self._first_stmt = True - self._parentheses_level = 0 - self._indent_counter = 0 - self._flow_indent_counter = 0 - self._returned_endmarker = False - self._expect_indent = False - - def __iter__(self): - return self - - def next(self): - """ Python 2 Compatibility """ - return self.__next__() - - def __next__(self): - if self._closed: - return self._finish_dedents() - - typ, value, start_pos, prefix = current = next(self._gen) - if typ == ENDMARKER: - self._closed = True - self._returned_endmarker = True - return current - - self.previous = self.current - self.current = current - - if typ == INDENT: - self._indent_counter += 1 - if not self._expect_indent and not self._first_stmt and not self._in_flow: - # This does not mean that there is an actual flow, it means - # that the INDENT is syntactically wrong. - self._flow_indent_counter = self._indent_counter - 1 - self._in_flow = True - self._expect_indent = False - elif typ == DEDENT: - self._indent_counter -= 1 - if self._in_flow: - if self._indent_counter == self._flow_indent_counter: - self._in_flow = False - else: - self._closed = True - return current - - if value in ('def', 'class') and self._parentheses_level \ - and re.search(r'\n[ \t]*\Z', prefix): - # Account for the fact that an open parentheses before a function - # will reset the parentheses counter, but new lines before will - # still be ignored. So check the prefix. - - # TODO what about flow parentheses counter resets in the tokenizer? - self._parentheses_level = 0 - return self._close() - - # Parentheses ignore the indentation rules. The other three stand for - # new lines. - if self.previous[0] in (NEWLINE, INDENT, DEDENT) \ - and not self._parentheses_level and typ not in (INDENT, DEDENT): - if not self._in_flow: - if value in FLOWS: - self._flow_indent_counter = self._indent_counter - self._first_stmt = False - elif value in ('def', 'class', '@'): - # The values here are exactly the same check as in - # _split_parts, but this time with tokenize and therefore - # precise. - if not self._first_stmt and not self._is_decorator: - return self._close() - - self._is_decorator = '@' == value - if not self._is_decorator: - self._first_stmt = False - self._expect_indent = True - elif self._expect_indent: - return self._close() - else: - self._first_stmt = False - - if value in '([{' and value: - self._parentheses_level += 1 - elif value in ')]}' and value: - # Ignore closing parentheses, because they are all - # irrelevant for the indentation. - self._parentheses_level = max(self._parentheses_level - 1, 0) - return current - - def _close(self): - if self._first_stmt: - # Continue like nothing has happened, because we want to enter - # the first class/function. - if self.current[1] != '@': - self._first_stmt = False - return self.current - else: - self._closed = True - return self._finish_dedents() - - def _finish_dedents(self): - if self._indent_counter: - self._indent_counter -= 1 - return DEDENT, '', self.current[2], '' - elif not self._returned_endmarker: - self._returned_endmarker = True - return ENDMARKER, '', self.current[2], self._get_prefix() - else: - raise StopIteration - - def _get_prefix(self): - """ - We're using the current prefix for the endmarker to not loose any - information. However we care about "lost" lines. The prefix of the - current line (indent) will always be included in the current line. - """ - cur = self.current - while cur[0] == DEDENT: - cur = next(self._gen) - prefix = cur[3] - - # \Z for the end of the string. $ is bugged, because it has the - # same behavior with or without re.MULTILINE. - return re.sub(r'[^\n]+\Z', '', prefix) diff --git a/pythonFiles/release/jedi/parser/pgen2/__init__.py b/pythonFiles/release/jedi/parser/pgen2/__init__.py deleted file mode 100755 index 1ddae5fea9f7..000000000000 --- a/pythonFiles/release/jedi/parser/pgen2/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -# Modifications: -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. -# Copyright 2014 David Halter. Integration into Jedi. -# Modifications are dual-licensed: MIT and PSF. diff --git a/pythonFiles/release/jedi/parser/pgen2/grammar.py b/pythonFiles/release/jedi/parser/pgen2/grammar.py deleted file mode 100755 index 414c0dbe9f01..000000000000 --- a/pythonFiles/release/jedi/parser/pgen2/grammar.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -# Modifications: -# Copyright 2014 David Halter. Integration into Jedi. -# Modifications are dual-licensed: MIT and PSF. - -"""This module defines the data structures used to represent a grammar. - -These are a bit arcane because they are derived from the data -structures used by Python's 'pgen' parser generator. - -There's also a table here mapping operators to their names in the -token module; the Python tokenize module reports all operators as the -fallback token code OP, but the parser needs the actual token code. - -""" - -# Python imports -import pickle - - -class Grammar(object): - """Pgen parsing tables conversion class. - - Once initialized, this class supplies the grammar tables for the - parsing engine implemented by parse.py. The parsing engine - accesses the instance variables directly. The class here does not - provide initialization of the tables; several subclasses exist to - do this (see the conv and pgen modules). - - The load() method reads the tables from a pickle file, which is - much faster than the other ways offered by subclasses. The pickle - file is written by calling dump() (after loading the grammar - tables using a subclass). The report() method prints a readable - representation of the tables to stdout, for debugging. - - The instance variables are as follows: - - symbol2number -- a dict mapping symbol names to numbers. Symbol - numbers are always 256 or higher, to distinguish - them from token numbers, which are between 0 and - 255 (inclusive). - - number2symbol -- a dict mapping numbers to symbol names; - these two are each other's inverse. - - states -- a list of DFAs, where each DFA is a list of - states, each state is a list of arcs, and each - arc is a (i, j) pair where i is a label and j is - a state number. The DFA number is the index into - this list. (This name is slightly confusing.) - Final states are represented by a special arc of - the form (0, j) where j is its own state number. - - dfas -- a dict mapping symbol numbers to (DFA, first) - pairs, where DFA is an item from the states list - above, and first is a set of tokens that can - begin this grammar rule (represented by a dict - whose values are always 1). - - labels -- a list of (x, y) pairs where x is either a token - number or a symbol number, and y is either None - or a string; the strings are keywords. The label - number is the index in this list; label numbers - are used to mark state transitions (arcs) in the - DFAs. - - start -- the number of the grammar's start symbol. - - keywords -- a dict mapping keyword strings to arc labels. - - tokens -- a dict mapping token numbers to arc labels. - - """ - - def __init__(self): - self.symbol2number = {} - self.number2symbol = {} - self.states = [] - self.dfas = {} - self.labels = [(0, "EMPTY")] - self.keywords = {} - self.tokens = {} - self.symbol2label = {} - self.start = 256 - - def dump(self, filename): - """Dump the grammar tables to a pickle file.""" - with open(filename, "wb") as f: - pickle.dump(self.__dict__, f, 2) - - def load(self, filename): - """Load the grammar tables from a pickle file.""" - with open(filename, "rb") as f: - d = pickle.load(f) - self.__dict__.update(d) - - def copy(self): - """ - Copy the grammar. - """ - new = self.__class__() - for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords", - "tokens", "symbol2label"): - setattr(new, dict_attr, getattr(self, dict_attr).copy()) - new.labels = self.labels[:] - new.states = self.states[:] - new.start = self.start - return new - - def report(self): - """Dump the grammar tables to standard output, for debugging.""" - from pprint import pprint - print("s2n") - pprint(self.symbol2number) - print("n2s") - pprint(self.number2symbol) - print("states") - pprint(self.states) - print("dfas") - pprint(self.dfas) - print("labels") - pprint(self.labels) - print("start", self.start) diff --git a/pythonFiles/release/jedi/parser/pgen2/parse.py b/pythonFiles/release/jedi/parser/pgen2/parse.py deleted file mode 100755 index c8ba70d356be..000000000000 --- a/pythonFiles/release/jedi/parser/pgen2/parse.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -# Modifications: -# Copyright 2014 David Halter. Integration into Jedi. -# Modifications are dual-licensed: MIT and PSF. - -""" -Parser engine for the grammar tables generated by pgen. - -The grammar table must be loaded first. - -See Parser/parser.c in the Python distribution for additional info on -how this parsing engine works. -""" - -# Local imports -from jedi.parser import tokenize - - -class ParseError(Exception): - """Exception to signal the parser is stuck.""" - - def __init__(self, msg, type, value, start_pos): - Exception.__init__(self, "%s: type=%r, value=%r, start_pos=%r" % - (msg, tokenize.tok_name[type], value, start_pos)) - self.msg = msg - self.type = type - self.value = value - self.start_pos = start_pos - - -class PgenParser(object): - """Parser engine. - - The proper usage sequence is: - - p = Parser(grammar, [converter]) # create instance - p.setup([start]) # prepare for parsing - : - if p.addtoken(...): # parse a token; may raise ParseError - break - root = p.rootnode # root of abstract syntax tree - - A Parser instance may be reused by calling setup() repeatedly. - - A Parser instance contains state pertaining to the current token - sequence, and should not be used concurrently by different threads - to parse separate token sequences. - - See driver.py for how to get input tokens by tokenizing a file or - string. - - Parsing is complete when addtoken() returns True; the root of the - abstract syntax tree can then be retrieved from the rootnode - instance variable. When a syntax error occurs, addtoken() raises - the ParseError exception. There is no error recovery; the parser - cannot be used after a syntax error was reported (but it can be - reinitialized by calling setup()). - - """ - - def __init__(self, grammar, convert_node, convert_leaf, error_recovery): - """Constructor. - - The grammar argument is a grammar.Grammar instance; see the - grammar module for more information. - - The parser is not ready yet for parsing; you must call the - setup() method to get it started. - - The optional convert argument is a function mapping concrete - syntax tree nodes to abstract syntax tree nodes. If not - given, no conversion is done and the syntax tree produced is - the concrete syntax tree. If given, it must be a function of - two arguments, the first being the grammar (a grammar.Grammar - instance), and the second being the concrete syntax tree node - to be converted. The syntax tree is converted from the bottom - up. - - A concrete syntax tree node is a (type, nodes) tuple, where - type is the node type (a token or symbol number) and nodes - is a list of children for symbols, and None for tokens. - - An abstract syntax tree node may be anything; this is entirely - up to the converter function. - - """ - self.grammar = grammar - self.convert_node = convert_node - self.convert_leaf = convert_leaf - - # Prepare for parsing. - start = self.grammar.start - # Each stack entry is a tuple: (dfa, state, node). - # A node is a tuple: (type, children), - # where children is a list of nodes or None - newnode = (start, []) - stackentry = (self.grammar.dfas[start], 0, newnode) - self.stack = [stackentry] - self.rootnode = None - self.error_recovery = error_recovery - - def parse(self, tokenizer): - for type, value, prefix, start_pos in tokenizer: - if self.addtoken(type, value, prefix, start_pos): - break - else: - # We never broke out -- EOF is too soon -- Unfinished statement. - self.error_recovery(self.grammar, self.stack, type, value, - start_pos, prefix, self.addtoken) - # Add the ENDMARKER again. - if not self.addtoken(type, value, prefix, start_pos): - raise ParseError("incomplete input", type, value, start_pos) - return self.rootnode - - def addtoken(self, type, value, prefix, start_pos): - """Add a token; return True if this is the end of the program.""" - # Map from token to label - if type == tokenize.NAME: - # Check for reserved words (keywords) - try: - ilabel = self.grammar.keywords[value] - except KeyError: - ilabel = self.grammar.tokens[type] - else: - ilabel = self.grammar.tokens[type] - - # Loop until the token is shifted; may raise exceptions - while True: - dfa, state, node = self.stack[-1] - states, first = dfa - arcs = states[state] - # Look for a state with this label - for i, newstate in arcs: - t, v = self.grammar.labels[i] - if ilabel == i: - # Look it up in the list of labels - assert t < 256 - # Shift a token; we're done with it - self.shift(type, value, newstate, prefix, start_pos) - # Pop while we are in an accept-only state - state = newstate - while states[state] == [(0, state)]: - self.pop() - if not self.stack: - # Done parsing! - return True - dfa, state, node = self.stack[-1] - states, first = dfa - # Done with this token - return False - elif t >= 256: - # See if it's a symbol and if we're in its first set - itsdfa = self.grammar.dfas[t] - itsstates, itsfirst = itsdfa - if ilabel in itsfirst: - # Push a symbol - self.push(t, itsdfa, newstate) - break # To continue the outer while loop - else: - if (0, state) in arcs: - # An accepting state, pop it and try something else - self.pop() - if not self.stack: - # Done parsing, but another token is input - raise ParseError("too much input", type, value, start_pos) - else: - self.error_recovery(self.grammar, self.stack, type, - value, start_pos, prefix, self.addtoken) - break - - def shift(self, type, value, newstate, prefix, start_pos): - """Shift a token. (Internal)""" - dfa, state, node = self.stack[-1] - newnode = self.convert_leaf(self.grammar, type, value, prefix, start_pos) - node[-1].append(newnode) - self.stack[-1] = (dfa, newstate, node) - - def push(self, type, newdfa, newstate): - """Push a nonterminal. (Internal)""" - dfa, state, node = self.stack[-1] - newnode = (type, []) - self.stack[-1] = (dfa, newstate, node) - self.stack.append((newdfa, 0, newnode)) - - def pop(self): - """Pop a nonterminal. (Internal)""" - popdfa, popstate, (type, children) = self.stack.pop() - # If there's exactly one child, return that child instead of creating a - # new node. We still create expr_stmt and file_input though, because a - # lot of Jedi depends on its logic. - if len(children) == 1: - newnode = children[0] - else: - newnode = self.convert_node(self.grammar, type, children) - - try: - # Equal to: - # dfa, state, node = self.stack[-1] - # symbol, children = node - self.stack[-1][2][1].append(newnode) - except IndexError: - # Stack is empty, set the rootnode. - self.rootnode = newnode diff --git a/pythonFiles/release/jedi/parser/token.py b/pythonFiles/release/jedi/parser/token.py deleted file mode 100755 index e9ab3a6272ff..000000000000 --- a/pythonFiles/release/jedi/parser/token.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import absolute_import - -from jedi._compatibility import is_py3 -from token import * - - -COMMENT = N_TOKENS -tok_name[COMMENT] = 'COMMENT' -N_TOKENS += 1 - -NL = N_TOKENS -tok_name[NL] = 'NL' -N_TOKENS += 1 - -if is_py3: - BACKQUOTE = N_TOKENS - tok_name[BACKQUOTE] = 'BACKQUOTE' - N_TOKENS += 1 -else: - RARROW = N_TOKENS - tok_name[RARROW] = 'RARROW' - N_TOKENS += 1 - ELLIPSIS = N_TOKENS - tok_name[ELLIPSIS] = 'ELLIPSIS' - N_TOKENS += 1 - - - -# Map from operator to number (since tokenize doesn't do this) - -opmap_raw = """\ -( LPAR -) RPAR -[ LSQB -] RSQB -: COLON -, COMMA -; SEMI -+ PLUS -- MINUS -* STAR -/ SLASH -| VBAR -& AMPER -< LESS -> GREATER -= EQUAL -. DOT -% PERCENT -` BACKQUOTE -{ LBRACE -} RBRACE -@ AT -== EQEQUAL -!= NOTEQUAL -<> NOTEQUAL -<= LESSEQUAL ->= GREATEREQUAL -~ TILDE -^ CIRCUMFLEX -<< LEFTSHIFT ->> RIGHTSHIFT -** DOUBLESTAR -+= PLUSEQUAL --= MINEQUAL -*= STAREQUAL -/= SLASHEQUAL -%= PERCENTEQUAL -&= AMPEREQUAL -|= VBAREQUAL -^= CIRCUMFLEXEQUAL -<<= LEFTSHIFTEQUAL ->>= RIGHTSHIFTEQUAL -**= DOUBLESTAREQUAL -// DOUBLESLASH -//= DOUBLESLASHEQUAL --> RARROW -... ELLIPSIS -""" - -opmap = {} -for line in opmap_raw.splitlines(): - op, name = line.split() - opmap[op] = globals()[name] diff --git a/pythonFiles/release/jedi/parser/tokenize.py b/pythonFiles/release/jedi/parser/tokenize.py deleted file mode 100755 index b38490468349..000000000000 --- a/pythonFiles/release/jedi/parser/tokenize.py +++ /dev/null @@ -1,290 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This tokenizer has been copied from the ``tokenize.py`` standard library -tokenizer. The reason was simple: The standard library tokenizer fails -if the indentation is not right. The fast parser of jedi however requires -"wrong" indentation. - -Basically this is a stripped down version of the standard library module, so -you can read the documentation there. Additionally we included some speed and -memory optimizations here. -""" -from __future__ import absolute_import - -import string -import re -from io import StringIO -from jedi.parser.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, - NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT) -from jedi._compatibility import is_py3 - - -cookie_re = re.compile("coding[:=]\s*([-\w.]+)") - - -if is_py3: - # Python 3 has str.isidentifier() to check if a char is a valid identifier - is_identifier = str.isidentifier -else: - namechars = string.ascii_letters + '_' - is_identifier = lambda s: s in namechars - - -COMMENT = N_TOKENS -tok_name[COMMENT] = 'COMMENT' - - -def group(*choices): - return '(' + '|'.join(choices) + ')' - - -def maybe(*choices): - return group(*choices) + '?' - - -# Note: we use unicode matching for names ("\w") but ascii matching for -# number literals. -whitespace = r'[ \f\t]*' -comment = r'#[^\r\n]*' -name = r'\w+' - -hex_number = r'0[xX][0-9a-fA-F]+' -bin_number = r'0[bB][01]+' -oct_number = r'0[oO][0-7]+' -dec_number = r'(?:0+|[1-9][0-9]*)' -int_number = group(hex_number, bin_number, oct_number, dec_number) -exponent = r'[eE][-+]?[0-9]+' -point_float = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(exponent) -Expfloat = r'[0-9]+' + exponent -float_number = group(point_float, Expfloat) -imag_number = group(r'[0-9]+[jJ]', float_number + r'[jJ]') -number = group(imag_number, float_number, int_number) - -# Tail end of ' string. -single = r"[^'\\]*(?:\\.[^'\\]*)*'" -# Tail end of " string. -double = r'[^"\\]*(?:\\.[^"\\]*)*"' -# Tail end of ''' string. -single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" -# Tail end of """ string. -double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""') -# Single-line ' or " string. - -# Because of leftmost-then-longest match semantics, be sure to put the -# longest operators first (e.g., if = came before ==, == would get -# recognized as two instances of =). -operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", - r"//=?", r"->", - r"[+\-*/%&|^=<>]=?", - r"~") - -bracket = '[][(){}]' -special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') -funny = group(operator, bracket, special) - -# First (or only) line of ' or " string. -cont_str = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + - group("'", r'\\\r?\n'), - r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + - group('"', r'\\\r?\n')) -pseudo_extras = group(r'\\\r?\n', comment, triple) -pseudo_token = group(whitespace) + \ - group(pseudo_extras, number, funny, cont_str, name) - - -def _compile(expr): - return re.compile(expr, re.UNICODE) - - -pseudoprog, single3prog, double3prog = map( - _compile, (pseudo_token, single3, double3)) - -endprogs = {"'": _compile(single), '"': _compile(double), - "'''": single3prog, '"""': double3prog, - "r'''": single3prog, 'r"""': double3prog, - "b'''": single3prog, 'b"""': double3prog, - "u'''": single3prog, 'u"""': double3prog, - "R'''": single3prog, 'R"""': double3prog, - "B'''": single3prog, 'B"""': double3prog, - "U'''": single3prog, 'U"""': double3prog, - "br'''": single3prog, 'br"""': double3prog, - "bR'''": single3prog, 'bR"""': double3prog, - "Br'''": single3prog, 'Br"""': double3prog, - "BR'''": single3prog, 'BR"""': double3prog, - "ur'''": single3prog, 'ur"""': double3prog, - "uR'''": single3prog, 'uR"""': double3prog, - "Ur'''": single3prog, 'Ur"""': double3prog, - "UR'''": single3prog, 'UR"""': double3prog, - 'r': None, 'R': None, 'b': None, 'B': None} - -triple_quoted = {} -for t in ("'''", '"""', - "r'''", 'r"""', "R'''", 'R"""', - "b'''", 'b"""', "B'''", 'B"""', - "u'''", 'u"""', "U'''", 'U"""', - "br'''", 'br"""', "Br'''", 'Br"""', - "bR'''", 'bR"""', "BR'''", 'BR"""', - "ur'''", 'ur"""', "Ur'''", 'Ur"""', - "uR'''", 'uR"""', "UR'''", 'UR"""'): - triple_quoted[t] = t -single_quoted = {} -for t in ("'", '"', - "r'", 'r"', "R'", 'R"', - "b'", 'b"', "B'", 'B"', - "u'", 'u"', "U'", 'U"', - "br'", 'br"', "Br'", 'Br"', - "bR'", 'bR"', "BR'", 'BR"', - "ur'", 'ur"', "Ur'", 'Ur"', - "uR'", 'uR"', "UR'", 'UR"'): - single_quoted[t] = t - -del _compile - -tabsize = 8 - -ALWAYS_BREAK_TOKENS = (';', 'import', 'from', 'class', 'def', 'try', 'except', - 'finally', 'while', 'return') - - -def source_tokens(source): - """Generate tokens from a the source code (string).""" - source = source + '\n' # end with \n, because the parser needs it - readline = StringIO(source).readline - return generate_tokens(readline) - - -def generate_tokens(readline): - """ - A heavily modified Python standard library tokenizer. - - Additionally to the default information, yields also the prefix of each - token. This idea comes from lib2to3. The prefix contains all information - that is irrelevant for the parser like newlines in parentheses or comments. - """ - paren_level = 0 # count parentheses - indents = [0] - lnum = 0 - numchars = '0123456789' - contstr = '' - contline = None - # We start with a newline. This makes indent at the first position - # possible. It's not valid Python, but still better than an INDENT in the - # second line (and not in the first). This makes quite a few things in - # Jedi's fast parser possible. - new_line = True - prefix = '' # Should never be required, but here for safety - additional_prefix = '' - while True: # loop over lines in stream - line = readline() # readline returns empty when finished. See StringIO - if not line: - if contstr: - yield ERRORTOKEN, contstr, contstr_start, prefix - break - - lnum += 1 - pos, max = 0, len(line) - - if contstr: # continued string - endmatch = endprog.match(line) - if endmatch: - pos = endmatch.end(0) - yield STRING, contstr + line[:pos], contstr_start, prefix - contstr = '' - contline = None - else: - contstr = contstr + line - contline = contline + line - continue - - while pos < max: - pseudomatch = pseudoprog.match(line, pos) - if not pseudomatch: # scan for tokens - txt = line[pos] - if line[pos] in '"\'': - # If a literal starts but doesn't end the whole rest of the - # line is an error token. - txt = line[pos:] - yield ERRORTOKEN, txt, (lnum, pos), prefix - pos += 1 - continue - - prefix = additional_prefix + pseudomatch.group(1) - additional_prefix = '' - start, pos = pseudomatch.span(2) - spos = (lnum, start) - token, initial = line[start:pos], line[start] - - if new_line and initial not in '\r\n#': - new_line = False - if paren_level == 0: - if start > indents[-1]: - yield INDENT, '', spos, '' - indents.append(start) - while start < indents[-1]: - yield DEDENT, '', spos, '' - indents.pop() - - if (initial in numchars or # ordinary number - (initial == '.' and token != '.' and token != '...')): - yield NUMBER, token, spos, prefix - elif initial in '\r\n': - if not new_line and paren_level == 0: - yield NEWLINE, token, spos, prefix - else: - additional_prefix = prefix + token - new_line = True - elif initial == '#': # Comments - assert not token.endswith("\n") - additional_prefix = prefix + token - elif token in triple_quoted: - endprog = endprogs[token] - endmatch = endprog.match(line, pos) - if endmatch: # all on one line - pos = endmatch.end(0) - token = line[start:pos] - yield STRING, token, spos, prefix - else: - contstr_start = (lnum, start) # multiple lines - contstr = line[start:] - contline = line - break - elif initial in single_quoted or \ - token[:2] in single_quoted or \ - token[:3] in single_quoted: - if token[-1] == '\n': # continued string - contstr_start = lnum, start - endprog = (endprogs.get(initial) or endprogs.get(token[1]) - or endprogs.get(token[2])) - contstr = line[start:] - contline = line - break - else: # ordinary string - yield STRING, token, spos, prefix - elif is_identifier(initial): # ordinary name - if token in ALWAYS_BREAK_TOKENS: - paren_level = 0 - while True: - indent = indents.pop() - if indent > start: - yield DEDENT, '', spos, '' - else: - indents.append(indent) - break - yield NAME, token, spos, prefix - elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n'): # continued stmt - additional_prefix += prefix + line[start:] - break - else: - if token in '([{': - paren_level += 1 - elif token in ')]}': - paren_level -= 1 - yield OP, token, spos, prefix - - end_pos = (lnum, max - 1) - # As the last position we just take the maximally possible position. We - # remove -1 for the last new line. - for indent in indents[1:]: - yield DEDENT, '', end_pos, '' - yield ENDMARKER, '', end_pos, prefix diff --git a/pythonFiles/release/jedi/parser/tree.py b/pythonFiles/release/jedi/parser/tree.py deleted file mode 100755 index 619067e1905f..000000000000 --- a/pythonFiles/release/jedi/parser/tree.py +++ /dev/null @@ -1,1222 +0,0 @@ -""" -If you know what an abstract syntax tree (AST) is, you'll see that this module -is pretty much that. The classes represent syntax elements like functions and -imports. - -This is the "business logic" part of the parser. There's a lot of logic here -that makes it easier for Jedi (and other libraries to deal with a Python syntax -tree. - -By using `get_code` on a module, you can get back the 1-to-1 representation of -the input given to the parser. This is important if you are using refactoring. - -The easiest way to play with this module is to use :class:`parsing.Parser`. -:attr:`parsing.Parser.module` holds an instance of :class:`Module`: - ->>> from jedi._compatibility import u ->>> from jedi.parser import Parser, load_grammar ->>> parser = Parser(load_grammar(), u('import os'), 'example.py') ->>> submodule = parser.module ->>> submodule - - -Any subclasses of :class:`Scope`, including :class:`Module` has an attribute -:attr:`imports `: - ->>> submodule.imports -[] - -See also :attr:`Scope.subscopes` and :attr:`Scope.statements`. -""" -import os -import re -from inspect import cleandoc -from itertools import chain -import textwrap - -from jedi._compatibility import (Python3Method, encoding, is_py3, utf8_repr, - literal_eval, use_metaclass, unicode) -from jedi import cache - - -def is_node(node, *symbol_names): - try: - type = node.type - except AttributeError: - return False - else: - return type in symbol_names - - -class PositionModifier(object): - """A start_pos modifier for the fast parser.""" - def __init__(self): - self.line = 0 - - -zero_position_modifier = PositionModifier() - - -class DocstringMixin(object): - __slots__ = () - - @property - def raw_doc(self): - """ Returns a cleaned version of the docstring token. """ - if isinstance(self, Module): - node = self.children[0] - elif isinstance(self, ClassOrFunc): - node = self.children[self.children.index(':') + 1] - if is_node(node, 'suite'): # Normally a suite - node = node.children[2] # -> NEWLINE INDENT stmt - else: # ExprStmt - simple_stmt = self.parent - c = simple_stmt.parent.children - index = c.index(simple_stmt) - if not index: - return '' - node = c[index - 1] - - if is_node(node, 'simple_stmt'): - node = node.children[0] - - if node.type == 'string': - # TODO We have to check next leaves until there are no new - # leaves anymore that might be part of the docstring. A - # docstring can also look like this: ``'foo' 'bar' - # Returns a literal cleaned version of the ``Token``. - cleaned = cleandoc(literal_eval(node.value)) - # Since we want the docstr output to be always unicode, just - # force it. - if is_py3 or isinstance(cleaned, unicode): - return cleaned - else: - return unicode(cleaned, 'UTF-8', 'replace') - return '' - - -class Base(object): - """ - This is just here to have an isinstance check, which is also used on - evaluate classes. But since they have sometimes a special type of - delegation, it is important for those classes to override this method. - - I know that there is a chance to do such things with __instancecheck__, but - since Python 2.5 doesn't support it, I decided to do it this way. - """ - __slots__ = () - - def isinstance(self, *cls): - return isinstance(self, cls) - - @Python3Method - def get_parent_until(self, classes=(), reverse=False, - include_current=True): - """ - Searches the parent "chain" until the object is an instance of - classes. If classes is empty return the last parent in the chain - (is without a parent). - """ - if type(classes) not in (tuple, list): - classes = (classes,) - scope = self if include_current else self.parent - while scope.parent is not None: - # TODO why if classes? - if classes and reverse != scope.isinstance(*classes): - break - scope = scope.parent - return scope - - def get_parent_scope(self, include_flows=False): - """ - Returns the underlying scope. - """ - scope = self.parent - while scope is not None: - if include_flows and isinstance(scope, Flow): - return scope - if scope.is_scope(): - break - scope = scope.parent - return scope - - def is_scope(self): - # Default is not being a scope. Just inherit from Scope. - return False - - -class Leaf(Base): - __slots__ = ('position_modifier', 'value', 'parent', '_start_pos', 'prefix') - - def __init__(self, position_modifier, value, start_pos, prefix=''): - self.position_modifier = position_modifier - self.value = value - self._start_pos = start_pos - self.prefix = prefix - self.parent = None - - @property - def start_pos(self): - return self._start_pos[0] + self.position_modifier.line, self._start_pos[1] - - @start_pos.setter - def start_pos(self, value): - self._start_pos = value[0] - self.position_modifier.line, value[1] - - @property - def end_pos(self): - return (self._start_pos[0] + self.position_modifier.line, - self._start_pos[1] + len(self.value)) - - def move(self, line_offset, column_offset): - self._start_pos = (self._start_pos[0] + line_offset, - self._start_pos[1] + column_offset) - - def get_previous(self): - """ - Returns the previous leaf in the parser tree. - """ - node = self - while True: - c = node.parent.children - i = c.index(self) - if i == 0: - node = node.parent - if node.parent is None: - raise IndexError('Cannot access the previous element of the first one.') - else: - node = c[i - 1] - break - - while True: - try: - node = node.children[-1] - except AttributeError: # A Leaf doesn't have children. - return node - - def get_code(self): - return self.prefix + self.value - - def next_sibling(self): - """ - The node immediately following the invocant in their parent's children - list. If the invocant does not have a next sibling, it is None - """ - # Can't use index(); we need to test by identity - for i, child in enumerate(self.parent.children): - if child is self: - try: - return self.parent.children[i + 1] - except IndexError: - return None - - def prev_sibling(self): - """ - The node/leaf immediately preceding the invocant in their parent's - children list. If the invocant does not have a previous sibling, it is - None. - """ - # Can't use index(); we need to test by identity - for i, child in enumerate(self.parent.children): - if child is self: - if i == 0: - return None - return self.parent.children[i - 1] - - @utf8_repr - def __repr__(self): - return "<%s: %s>" % (type(self).__name__, self.value) - - -class LeafWithNewLines(Leaf): - __slots__ = () - - @property - def end_pos(self): - """ - Literals and whitespace end_pos are more complicated than normal - end_pos, because the containing newlines may change the indexes. - """ - end_pos_line, end_pos_col = self.start_pos - lines = self.value.split('\n') - end_pos_line += len(lines) - 1 - # Check for multiline token - if self.start_pos[0] == end_pos_line: - end_pos_col += len(lines[-1]) - else: - end_pos_col = len(lines[-1]) - return end_pos_line, end_pos_col - - -class Whitespace(LeafWithNewLines): - """Contains NEWLINE and ENDMARKER tokens.""" - __slots__ = () - type = 'whitespace' - - -class Name(Leaf): - """ - A string. Sometimes it is important to know if the string belongs to a name - or not. - """ - type = 'name' - __slots__ = () - - def __str__(self): - return self.value - - def __unicode__(self): - return self.value - - def __repr__(self): - return "<%s: %s@%s,%s>" % (type(self).__name__, self.value, - self.start_pos[0], self.start_pos[1]) - - def get_definition(self): - scope = self - while scope.parent is not None: - parent = scope.parent - if scope.isinstance(Node, Name) and parent.type != 'simple_stmt': - if scope.type == 'testlist_comp': - try: - if isinstance(scope.children[1], CompFor): - return scope.children[1] - except IndexError: - pass - scope = parent - else: - break - return scope - - def is_definition(self): - stmt = self.get_definition() - if stmt.type in ('funcdef', 'classdef', 'file_input', 'param'): - return self == stmt.name - elif stmt.type == 'for_stmt': - return self.start_pos < stmt.children[2].start_pos - elif stmt.type == 'try_stmt': - return self.prev_sibling() == 'as' - else: - return stmt.type in ('expr_stmt', 'import_name', 'import_from', - 'comp_for', 'with_stmt') \ - and self in stmt.get_defined_names() - - def assignment_indexes(self): - """ - Returns an array of ints of the indexes that are used in tuple - assignments. - - For example if the name is ``y`` in the following code:: - - x, (y, z) = 2, '' - - would result in ``[1, 0]``. - """ - indexes = [] - node = self.parent - compare = self - while node is not None: - if is_node(node, 'testlist_comp', 'testlist_star_expr', 'exprlist'): - for i, child in enumerate(node.children): - if child == compare: - indexes.insert(0, int(i / 2)) - break - else: - raise LookupError("Couldn't find the assignment.") - elif isinstance(node, (ExprStmt, CompFor)): - break - - compare = node - node = node.parent - return indexes - - -class Literal(LeafWithNewLines): - __slots__ = () - - def eval(self): - return literal_eval(self.value) - - -class Number(Literal): - type = 'number' - __slots__ = () - - -class String(Literal): - type = 'string' - __slots__ = () - - -class Operator(Leaf): - type = 'operator' - __slots__ = () - - def __str__(self): - return self.value - - def __eq__(self, other): - """ - Make comparisons with strings easy. - Improves the readability of the parser. - """ - if isinstance(other, Operator): - return self is other - else: - return self.value == other - - def __ne__(self, other): - """Python 2 compatibility.""" - return self.value != other - - def __hash__(self): - return hash(self.value) - - -class Keyword(Leaf): - type = 'keyword' - __slots__ = () - - def __eq__(self, other): - """ - Make comparisons with strings easy. - Improves the readability of the parser. - """ - if isinstance(other, Keyword): - return self is other - return self.value == other - - def __ne__(self, other): - """Python 2 compatibility.""" - return not self.__eq__(other) - - def __hash__(self): - return hash(self.value) - - -class BaseNode(Base): - """ - The super class for Scope, Import, Name and Statement. Every object in - the parser tree inherits from this class. - """ - __slots__ = ('children', 'parent') - type = None - - def __init__(self, children): - """ - Initialize :class:`BaseNode`. - - :param children: The module in which this Python object locates. - """ - for c in children: - c.parent = self - self.children = children - self.parent = None - - def move(self, line_offset, column_offset): - """ - Move the Node's start_pos. - """ - for c in self.children: - c.move(line_offset, column_offset) - - @property - def start_pos(self): - return self.children[0].start_pos - - @property - def end_pos(self): - return self.children[-1].end_pos - - def get_code(self): - return "".join(c.get_code() for c in self.children) - - @Python3Method - def name_for_position(self, position): - for c in self.children: - if isinstance(c, Leaf): - if isinstance(c, Name) and c.start_pos <= position <= c.end_pos: - return c - else: - result = c.name_for_position(position) - if result is not None: - return result - return None - - @Python3Method - def get_statement_for_position(self, pos): - for c in self.children: - if c.start_pos <= pos <= c.end_pos: - if c.type not in ('decorated', 'simple_stmt', 'suite') \ - and not isinstance(c, (Flow, ClassOrFunc)): - return c - else: - try: - return c.get_statement_for_position(pos) - except AttributeError: - pass # Must be a non-scope - return None - - def first_leaf(self): - try: - return self.children[0].first_leaf() - except AttributeError: - return self.children[0] - - @utf8_repr - def __repr__(self): - code = self.get_code().replace('\n', ' ') - if not is_py3: - code = code.encode(encoding, 'replace') - return "<%s: %s@%s,%s>" % \ - (type(self).__name__, code, self.start_pos[0], self.start_pos[1]) - - -class Node(BaseNode): - """Concrete implementation for interior nodes.""" - __slots__ = ('type',) - - def __init__(self, type, children): - """ - Initializer. - - Takes a type constant (a symbol number >= 256), a sequence of - child nodes, and an optional context keyword argument. - - As a side effect, the parent pointers of the children are updated. - """ - super(Node, self).__init__(children) - self.type = type - - def __repr__(self): - return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children) - - -class IsScopeMeta(type): - def __instancecheck__(self, other): - return other.is_scope() - - -class IsScope(use_metaclass(IsScopeMeta)): - pass - - -class Scope(BaseNode, DocstringMixin): - """ - Super class for the parser tree, which represents the state of a python - text file. - A Scope manages and owns its subscopes, which are classes and functions, as - well as variables and imports. It is used to access the structure of python - files. - - :param start_pos: The position (line and column) of the scope. - :type start_pos: tuple(int, int) - """ - __slots__ = ('names_dict',) - - def __init__(self, children): - super(Scope, self).__init__(children) - - @property - def returns(self): - # Needed here for fast_parser, because the fast_parser splits and - # returns will be in "normal" modules. - return self._search_in_scope(ReturnStmt) - - @property - def subscopes(self): - return self._search_in_scope(Scope) - - @property - def flows(self): - return self._search_in_scope(Flow) - - @property - def imports(self): - return self._search_in_scope(Import) - - @Python3Method - def _search_in_scope(self, typ): - def scan(children): - elements = [] - for element in children: - if isinstance(element, typ): - elements.append(element) - if is_node(element, 'suite', 'simple_stmt', 'decorated') \ - or isinstance(element, Flow): - elements += scan(element.children) - return elements - - return scan(self.children) - - @property - def statements(self): - return self._search_in_scope((ExprStmt, KeywordStatement)) - - def is_scope(self): - return True - - def __repr__(self): - try: - name = self.path - except AttributeError: - try: - name = self.name - except AttributeError: - name = self.command - - return "<%s: %s@%s-%s>" % (type(self).__name__, name, - self.start_pos[0], self.end_pos[0]) - - def walk(self): - yield self - for s in self.subscopes: - for scope in s.walk(): - yield scope - - for r in self.statements: - while isinstance(r, Flow): - for scope in r.walk(): - yield scope - r = r.next - - -class Module(Scope): - """ - The top scope, which is always a module. - Depending on the underlying parser this may be a full module or just a part - of a module. - """ - __slots__ = ('path', 'global_names', 'used_names', '_name', - 'error_statement_stacks') - type = 'file_input' - - def __init__(self, children): - """ - Initialize :class:`Module`. - - :type path: str - :arg path: File path to this module. - - .. todo:: Document `top_module`. - """ - super(Module, self).__init__(children) - self.path = None # Set later. - - @property - @cache.underscore_memoization - def name(self): - """ This is used for the goto functions. """ - if self.path is None: - string = '' # no path -> empty name - else: - sep = (re.escape(os.path.sep),) * 2 - r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self.path) - # Remove PEP 3149 names - string = re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) - # Positions are not real, but a module starts at (1, 0) - p = (1, 0) - name = Name(zero_position_modifier, string, p) - name.parent = self - return name - - @property - def has_explicit_absolute_import(self): - """ - Checks if imports in this module are explicitly absolute, i.e. there - is a ``__future__`` import. - """ - # TODO this is a strange scan and not fully correct. I think Python's - # parser does it in a different way and scans for the first - # statement/import with a tokenizer (to check for syntax changes like - # the future print statement). - for imp in self.imports: - if imp.type == 'import_from' and imp.level == 0: - for path in imp.paths(): - if [str(name) for name in path] == ['__future__', 'absolute_import']: - return True - return False - - -class Decorator(BaseNode): - type = 'decorator' - __slots__ = () - - -class ClassOrFunc(Scope): - __slots__ = () - - @property - def name(self): - return self.children[1] - - def get_decorators(self): - decorated = self.parent - if is_node(decorated, 'decorated'): - if is_node(decorated.children[0], 'decorators'): - return decorated.children[0].children - else: - return decorated.children[:1] - else: - return [] - - -class Class(ClassOrFunc): - """ - Used to store the parsed contents of a python class. - - :param name: The Class name. - :type name: str - :param supers: The super classes of a Class. - :type supers: list - :param start_pos: The start position (line, column) of the class. - :type start_pos: tuple(int, int) - """ - type = 'classdef' - __slots__ = () - - def __init__(self, children): - super(Class, self).__init__(children) - - def get_super_arglist(self): - if self.children[2] != '(': # Has no parentheses - return None - else: - if self.children[3] == ')': # Empty parentheses - return None - else: - return self.children[3] - - @property - def doc(self): - """ - Return a document string including call signature of __init__. - """ - docstr = self.raw_doc - for sub in self.subscopes: - if str(sub.name) == '__init__': - return '%s\n\n%s' % ( - sub.get_call_signature(func_name=self.name), docstr) - return docstr - - -def _create_params(parent, argslist_list): - """ - `argslist_list` is a list that can contain an argslist as a first item, but - most not. It's basically the items between the parameter brackets (which is - at most one item). - This function modifies the parser structure. It generates `Param` objects - from the normal ast. Those param objects do not exist in a normal ast, but - make the evaluation of the ast tree so much easier. - You could also say that this function replaces the argslist node with a - list of Param objects. - """ - def check_python2_nested_param(node): - """ - Python 2 allows params to look like ``def x(a, (b, c))``, which is - basically a way of unpacking tuples in params. Python 3 has ditched - this behavior. Jedi currently just ignores those constructs. - """ - return node.type == 'tfpdef' and node.children[0] == '(' - - try: - first = argslist_list[0] - except IndexError: - return [] - - if first.type in ('name', 'tfpdef'): - if check_python2_nested_param(first): - return [] - else: - return [Param([first], parent)] - else: # argslist is a `typedargslist` or a `varargslist`. - children = first.children - params = [] - start = 0 - # Start with offset 1, because the end is higher. - for end, child in enumerate(children + [None], 1): - if child is None or child == ',': - new_children = children[start:end] - if new_children: # Could as well be comma and then end. - if check_python2_nested_param(new_children[0]): - continue - params.append(Param(new_children, parent)) - start = end - return params - - -class Function(ClassOrFunc): - """ - Used to store the parsed contents of a python function. - """ - __slots__ = ('listeners',) - type = 'funcdef' - - def __init__(self, children): - super(Function, self).__init__(children) - self.listeners = set() # not used here, but in evaluation. - parameters = self.children[2] # After `def foo` - parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1]) - - @property - def params(self): - return self.children[2].children[1:-1] - - @property - def name(self): - return self.children[1] # First token after `def` - - @property - def yields(self): - # TODO This is incorrect, yields are also possible in a statement. - return self._search_in_scope(YieldExpr) - - def is_generator(self): - return bool(self.yields) - - def annotation(self): - try: - return self.children[6] # 6th element: def foo(...) -> bar - except IndexError: - return None - - def get_call_signature(self, width=72, func_name=None): - """ - Generate call signature of this function. - - :param width: Fold lines if a line is longer than this value. - :type width: int - :arg func_name: Override function name when given. - :type func_name: str - - :rtype: str - """ - func_name = func_name or self.children[1] - code = unicode(func_name) + self.children[2].get_code() - return '\n'.join(textwrap.wrap(code, width)) - - @property - def doc(self): - """ Return a document string including call signature. """ - docstr = self.raw_doc - return '%s\n\n%s' % (self.get_call_signature(), docstr) - - -class Lambda(Function): - """ - Lambdas are basically trimmed functions, so give it the same interface. - """ - type = 'lambda' - __slots__ = () - - def __init__(self, children): - # We don't want to call the Function constructor, call its parent. - super(Function, self).__init__(children) - self.listeners = set() # not used here, but in evaluation. - lst = self.children[1:-2] # After `def foo` - self.children[1:-2] = _create_params(self, lst) - - @property - def params(self): - return self.children[1:-2] - - def is_generator(self): - return False - - def yields(self): - return [] - - def __repr__(self): - return "<%s@%s>" % (self.__class__.__name__, self.start_pos) - - -class Flow(BaseNode): - __slots__ = () - - -class IfStmt(Flow): - type = 'if_stmt' - __slots__ = () - - def check_nodes(self): - """ - Returns all the `test` nodes that are defined as x, here: - - if x: - pass - elif x: - pass - """ - for i, c in enumerate(self.children): - if c in ('elif', 'if'): - yield self.children[i + 1] - - def node_in_which_check_node(self, node): - for check_node in reversed(list(self.check_nodes())): - if check_node.start_pos < node.start_pos: - return check_node - - def node_after_else(self, node): - """ - Checks if a node is defined after `else`. - """ - for c in self.children: - if c == 'else': - if node.start_pos > c.start_pos: - return True - else: - return False - - -class WhileStmt(Flow): - type = 'while_stmt' - __slots__ = () - - -class ForStmt(Flow): - type = 'for_stmt' - __slots__ = () - - -class TryStmt(Flow): - type = 'try_stmt' - __slots__ = () - - def except_clauses(self): - """ - Returns the ``test`` nodes found in ``except_clause`` nodes. - Returns ``[None]`` for except clauses without an exception given. - """ - for node in self.children: - if node.type == 'except_clause': - yield node.children[1] - elif node == 'except': - yield None - - -class WithStmt(Flow): - type = 'with_stmt' - __slots__ = () - - def get_defined_names(self): - names = [] - for with_item in self.children[1:-2:2]: - # Check with items for 'as' names. - if is_node(with_item, 'with_item'): - names += _defined_names(with_item.children[2]) - return names - - def node_from_name(self, name): - node = name - while True: - node = node.parent - if is_node(node, 'with_item'): - return node.children[0] - - -class Import(BaseNode): - __slots__ = () - - def path_for_name(self, name): - try: - # The name may be an alias. If it is, just map it back to the name. - name = self.aliases()[name] - except KeyError: - pass - - for path in self.paths(): - if name in path: - return path[:path.index(name) + 1] - raise ValueError('Name should be defined in the import itself') - - def is_nested(self): - return False # By default, sub classes may overwrite this behavior - - def is_star_import(self): - return self.children[-1] == '*' - - -class ImportFrom(Import): - type = 'import_from' - __slots__ = () - - def get_defined_names(self): - return [alias or name for name, alias in self._as_name_tuples()] - - def aliases(self): - """Mapping from alias to its corresponding name.""" - return dict((alias, name) for name, alias in self._as_name_tuples() - if alias is not None) - - def get_from_names(self): - for n in self.children[1:]: - if n not in ('.', '...'): - break - if is_node(n, 'dotted_name'): # from x.y import - return n.children[::2] - elif n == 'import': # from . import - return [] - else: # from x import - return [n] - - @property - def level(self): - """The level parameter of ``__import__``.""" - level = 0 - for n in self.children[1:]: - if n in ('.', '...'): - level += len(n.value) - else: - break - return level - - def _as_name_tuples(self): - last = self.children[-1] - if last == ')': - last = self.children[-2] - elif last == '*': - return # No names defined directly. - - if is_node(last, 'import_as_names'): - as_names = last.children[::2] - else: - as_names = [last] - for as_name in as_names: - if as_name.type == 'name': - yield as_name, None - else: - yield as_name.children[::2] # yields x, y -> ``x as y`` - - def star_import_name(self): - """ - The last name defined in a star import. - """ - return self.paths()[-1][-1] - - def paths(self): - """ - The import paths defined in an import statement. Typically an array - like this: ``[, ]``. - """ - dotted = self.get_from_names() - - if self.children[-1] == '*': - return [dotted] - return [dotted + [name] for name, alias in self._as_name_tuples()] - - -class ImportName(Import): - """For ``import_name`` nodes. Covers normal imports without ``from``.""" - type = 'import_name' - __slots__ = () - - def get_defined_names(self): - return [alias or path[0] for path, alias in self._dotted_as_names()] - - @property - def level(self): - """The level parameter of ``__import__``.""" - return 0 # Obviously 0 for imports without from. - - def paths(self): - return [path for path, alias in self._dotted_as_names()] - - def _dotted_as_names(self): - """Generator of (list(path), alias) where alias may be None.""" - dotted_as_names = self.children[1] - if is_node(dotted_as_names, 'dotted_as_names'): - as_names = dotted_as_names.children[::2] - else: - as_names = [dotted_as_names] - - for as_name in as_names: - if is_node(as_name, 'dotted_as_name'): - alias = as_name.children[2] - as_name = as_name.children[0] - else: - alias = None - if as_name.type == 'name': - yield [as_name], alias - else: - # dotted_names - yield as_name.children[::2], alias - - def is_nested(self): - """ - This checks for the special case of nested imports, without aliases and - from statement:: - - import foo.bar - """ - return [1 for path, alias in self._dotted_as_names() - if alias is None and len(path) > 1] - - def aliases(self): - return dict((alias, path[-1]) for path, alias in self._dotted_as_names() - if alias is not None) - - -class KeywordStatement(BaseNode): - """ - For the following statements: `assert`, `del`, `global`, `nonlocal`, - `raise`, `return`, `yield`, `pass`, `continue`, `break`, `return`, `yield`. - """ - __slots__ = () - - @property - def keyword(self): - return self.children[0].value - - -class AssertStmt(KeywordStatement): - type = 'assert_stmt' - __slots__ = () - - def assertion(self): - return self.children[1] - - -class GlobalStmt(KeywordStatement): - type = 'global_stmt' - __slots__ = () - - def get_defined_names(self): - return [] - - def get_global_names(self): - return self.children[1::2] - - -class ReturnStmt(KeywordStatement): - type = 'return_stmt' - __slots__ = () - - -class YieldExpr(BaseNode): - type = 'yield_expr' - __slots__ = () - - -def _defined_names(current): - """ - A helper function to find the defined names in statements, for loops and - list comprehensions. - """ - names = [] - if is_node(current, 'testlist_star_expr', 'testlist_comp', 'exprlist'): - for child in current.children[::2]: - names += _defined_names(child) - elif is_node(current, 'atom'): - names += _defined_names(current.children[1]) - elif is_node(current, 'power'): - if current.children[-2] != '**': # Just if there's no operation - trailer = current.children[-1] - if trailer.children[0] == '.': - names.append(trailer.children[1]) - else: - names.append(current) - return names - - -class ExprStmt(BaseNode, DocstringMixin): - type = 'expr_stmt' - __slots__ = () - - def get_defined_names(self): - return list(chain.from_iterable(_defined_names(self.children[i]) - for i in range(0, len(self.children) - 2, 2) - if '=' in self.children[i + 1].value)) - - def get_rhs(self): - """Returns the right-hand-side of the equals.""" - return self.children[-1] - - def first_operation(self): - """ - Returns `+=`, `=`, etc or None if there is no operation. - """ - try: - return self.children[1] - except IndexError: - return None - - -class Param(BaseNode): - """ - It's a helper class that makes business logic with params much easier. The - Python grammar defines no ``param`` node. It defines it in a different way - that is not really suited to working with parameters. - """ - type = 'param' - - def __init__(self, children, parent): - super(Param, self).__init__(children) - self.parent = parent - for child in children: - child.parent = self - - @property - def stars(self): - first = self.children[0] - if first in ('*', '**'): - return len(first.value) - return 0 - - @property - def default(self): - try: - return self.children[int(self.children[0] in ('*', '**')) + 2] - except IndexError: - return None - - def annotation(self): - # Generate from tfpdef. - raise NotImplementedError - - def _tfpdef(self): - """ - tfpdef: see grammar.txt. - """ - offset = int(self.children[0] in ('*', '**')) - return self.children[offset] - - @property - def name(self): - if is_node(self._tfpdef(), 'tfpdef'): - return self._tfpdef().children[0] - else: - return self._tfpdef() - - @property - def position_nr(self): - return self.parent.children.index(self) - 1 - - @property - def parent_function(self): - return self.get_parent_until(IsScope) - - def __repr__(self): - default = '' if self.default is None else '=%s' % self.default - return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default) - - -class CompFor(BaseNode): - type = 'comp_for' - __slots__ = () - - def is_scope(self): - return True - - @property - def names_dict(self): - dct = {} - for name in self.get_defined_names(): - arr = dct.setdefault(name.value, []) - arr.append(name) - return dct - - def names_dicts(self, search_global): - yield self.names_dict - - def get_defined_names(self): - return _defined_names(self.children[1]) diff --git a/pythonFiles/release/jedi/parser/user_context.py b/pythonFiles/release/jedi/parser/user_context.py deleted file mode 100755 index 3cb24a7ceeb5..000000000000 --- a/pythonFiles/release/jedi/parser/user_context.py +++ /dev/null @@ -1,339 +0,0 @@ -import re -import os -import keyword - -from jedi import cache -from jedi import common -from jedi.parser import tokenize, Parser -from jedi._compatibility import u -from jedi.parser.fast import FastParser -from jedi.parser import tree -from jedi import debug -from jedi.common import PushBackIterator - - -REPLACE_STR = r"[bBuU]?[rR]?" + (r"(?:(')[^\n'\\]*(?:\\.[^\n'\\]*)*(?:'|$)" + - '|' + - r'(")[^\n"\\]*(?:\\.[^\n"\\]*)*(?:"|$))') -REPLACE_STR = re.compile(REPLACE_STR) - - -class UserContext(object): - """ - :param source: The source code of the file. - :param position: The position, the user is currently in. Only important \ - for the main file. - """ - def __init__(self, source, position): - self.source = source - self.position = position - self._line_cache = None - - self._relevant_temp = None - - @cache.underscore_memoization - def get_path_until_cursor(self): - """ Get the path under the cursor. """ - path, self._start_cursor_pos = self._calc_path_until_cursor(self.position) - return path - - def _backwards_line_generator(self, start_pos): - self._line_temp, self._column_temp = start_pos - first_line = self.get_line(start_pos[0])[:self._column_temp] - - self._line_length = self._column_temp - yield first_line[::-1] + '\n' - - while True: - self._line_temp -= 1 - line = self.get_line(self._line_temp) - self._line_length = len(line) - yield line[::-1] + '\n' - - def _get_backwards_tokenizer(self, start_pos, line_gen=None): - if line_gen is None: - line_gen = self._backwards_line_generator(start_pos) - token_gen = tokenize.generate_tokens(lambda: next(line_gen)) - for typ, tok_str, tok_start_pos, prefix in token_gen: - line = self.get_line(self._line_temp) - # Calculate the real start_pos of the token. - if tok_start_pos[0] == 1: - # We are in the first checked line - column = start_pos[1] - tok_start_pos[1] - else: - column = len(line) - tok_start_pos[1] - # Multi-line docstrings must be accounted for. - first_line = common.splitlines(tok_str)[0] - column -= len(first_line) - # Reverse the token again, so that it is in normal order again. - yield typ, tok_str[::-1], (self._line_temp, column), prefix[::-1] - - def _calc_path_until_cursor(self, start_pos): - """ - Something like a reverse tokenizer that tokenizes the reversed strings. - """ - open_brackets = ['(', '[', '{'] - close_brackets = [')', ']', '}'] - - start_cursor = start_pos - gen = PushBackIterator(self._get_backwards_tokenizer(start_pos)) - string = u('') - level = 0 - force_point = False - last_type = None - is_first = True - for tok_type, tok_str, tok_start_pos, prefix in gen: - if is_first: - if prefix: # whitespace is not a path - return u(''), start_cursor - is_first = False - - if last_type == tok_type == tokenize.NAME: - string = ' ' + string - - if level: - if tok_str in close_brackets: - level += 1 - elif tok_str in open_brackets: - level -= 1 - elif tok_str == '.': - force_point = False - elif force_point: - # Reversed tokenizing, therefore a number is recognized as a - # floating point number. - # The same is true for string prefixes -> represented as a - # combination of string and name. - if tok_type == tokenize.NUMBER and tok_str[-1] == '.' \ - or tok_type == tokenize.NAME and last_type == tokenize.STRING \ - and tok_str.lower() in ('b', 'u', 'r', 'br', 'ur'): - force_point = False - else: - break - elif tok_str in close_brackets: - level += 1 - elif tok_type in [tokenize.NAME, tokenize.STRING]: - if keyword.iskeyword(tok_str) and string: - # If there's already something in the string, a keyword - # never adds any meaning to the current statement. - break - force_point = True - elif tok_type == tokenize.NUMBER: - pass - else: - if tok_str == '-': - next_tok = next(gen) - if next_tok[1] == 'e': - gen.push_back(next_tok) - else: - break - else: - break - - start_cursor = tok_start_pos - string = tok_str + prefix + string - last_type = tok_type - - # Don't need whitespace around a statement. - return string.strip(), start_cursor - - def get_path_under_cursor(self): - """ - Return the path under the cursor. If there is a rest of the path left, - it will be added to the stuff before it. - """ - return self.get_path_until_cursor() + self.get_path_after_cursor() - - def get_path_after_cursor(self): - line = self.get_line(self.position[0]) - return re.search("[\w\d]*", line[self.position[1]:]).group(0) - - def get_operator_under_cursor(self): - line = self.get_line(self.position[0]) - after = re.match("[^\w\s]+", line[self.position[1]:]) - before = re.match("[^\w\s]+", line[:self.position[1]][::-1]) - return (before.group(0) if before is not None else '') \ - + (after.group(0) if after is not None else '') - - def call_signature(self): - """ - :return: Tuple of string of the call and the index of the cursor. - """ - def get_line(pos): - def simplify_str(match): - """ - To avoid having strings without end marks (error tokens) and - strings that just screw up all the call signatures, just - simplify everything. - """ - mark = match.group(1) or match.group(2) - return mark + ' ' * (len(match.group(0)) - 2) + mark - - line_gen = self._backwards_line_generator(pos) - for line in line_gen: - # We have to switch the already backwards lines twice, because - # we scan them from start. - line = line[::-1] - modified = re.sub(REPLACE_STR, simplify_str, line) - yield modified[::-1] - - index = 0 - level = 0 - next_must_be_name = False - next_is_key = False - key_name = None - generator = self._get_backwards_tokenizer(self.position, get_line(self.position)) - for tok_type, tok_str, start_pos, prefix in generator: - if tok_str in tokenize.ALWAYS_BREAK_TOKENS: - break - elif next_must_be_name: - if tok_type == tokenize.NUMBER: - # If there's a number at the end of the string, it will be - # tokenized as a number. So add it to the name. - tok_type, t, _, _ = next(generator) - if tok_type == tokenize.NAME: - end_pos = start_pos[0], start_pos[1] + len(tok_str) - call, start_pos = self._calc_path_until_cursor(start_pos=end_pos) - return call, index, key_name, start_pos - index = 0 - next_must_be_name = False - elif next_is_key: - if tok_type == tokenize.NAME: - key_name = tok_str - next_is_key = False - - if tok_str == '(': - level += 1 - if level == 1: - next_must_be_name = True - level = 0 - elif tok_str == ')': - level -= 1 - elif tok_str == ',': - index += 1 - elif tok_str == '=': - next_is_key = True - return None, 0, None, (0, 0) - - def get_context(self, yield_positions=False): - self.get_path_until_cursor() # In case _start_cursor_pos is undefined. - pos = self._start_cursor_pos - while True: - # remove non important white space - line = self.get_line(pos[0]) - while True: - if pos[1] == 0: - line = self.get_line(pos[0] - 1) - if line and line[-1] == '\\': - pos = pos[0] - 1, len(line) - 1 - continue - else: - break - - if line[pos[1] - 1].isspace(): - pos = pos[0], pos[1] - 1 - else: - break - - try: - result, pos = self._calc_path_until_cursor(start_pos=pos) - if yield_positions: - yield pos - else: - yield result - except StopIteration: - if yield_positions: - yield None - else: - yield '' - - def get_line(self, line_nr): - if not self._line_cache: - self._line_cache = common.splitlines(self.source) - - if line_nr == 0: - # This is a fix for the zeroth line. We need a newline there, for - # the backwards parser. - return u('') - if line_nr < 0: - raise StopIteration() - try: - return self._line_cache[line_nr - 1] - except IndexError: - raise StopIteration() - - def get_position_line(self): - return self.get_line(self.position[0])[:self.position[1]] - - -class UserContextParser(object): - def __init__(self, grammar, source, path, position, user_context, - parser_done_callback, use_fast_parser=True): - self._grammar = grammar - self._source = source - self._path = path and os.path.abspath(path) - self._position = position - self._user_context = user_context - self._use_fast_parser = use_fast_parser - self._parser_done_callback = parser_done_callback - - @cache.underscore_memoization - def _parser(self): - cache.invalidate_star_import_cache(self._path) - if self._use_fast_parser: - parser = FastParser(self._grammar, self._source, self._path) - # Don't pickle that module, because the main module is changing quickly - cache.save_parser(self._path, parser, pickling=False) - else: - parser = Parser(self._grammar, self._source, self._path) - self._parser_done_callback(parser) - return parser - - @cache.underscore_memoization - def user_stmt(self): - module = self.module() - debug.speed('parsed') - return module.get_statement_for_position(self._position) - - @cache.underscore_memoization - def user_stmt_with_whitespace(self): - """ - Returns the statement under the cursor even if the statement lies - before the cursor. - """ - user_stmt = self.user_stmt() - - if not user_stmt: - # for statements like `from x import ` (cursor not in statement) - # or `abs( ` where the cursor is out in the whitespace. - if self._user_context.get_path_under_cursor(): - # We really should have a user_stmt, but the parser couldn't - # process it - probably a Syntax Error (or in a comment). - debug.warning('No statement under the cursor.') - return - pos = next(self._user_context.get_context(yield_positions=True)) - user_stmt = self.module().get_statement_for_position(pos) - return user_stmt - - @cache.underscore_memoization - def user_scope(self): - """ - Returns the scope in which the user resides. This includes flows. - """ - user_stmt = self.user_stmt() - if user_stmt is None: - def scan(scope): - for s in scope.children: - if s.start_pos <= self._position <= s.end_pos: - if isinstance(s, (tree.Scope, tree.Flow)): - if isinstance(s, tree.Flow): - return s - return scan(s) or s - elif s.type in ('suite', 'decorated'): - return scan(s) - - return scan(self.module()) or self.module() - else: - return user_stmt.get_parent_scope(include_flows=True) - - def module(self): - return self._parser().module diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index 0a2160fc15c5..bf20fb3f44ba 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -59,7 +59,7 @@ export class Tokenizer implements ITokenizer { throw new Error('Invalid range length'); } - this.mode = mode ? mode : TokenizerMode.Full; + this.mode = mode !== undefined ? mode : TokenizerMode.Full; this.cs = new CharacterStream(text); this.cs.position = start; @@ -127,6 +127,10 @@ export class Tokenizer implements ITokenizer { case Char.Colon: this.tokens.push(new Token(TokenType.Colon, this.cs.position, 1)); break; + case Char.Period: + case Char.At: + this.tokens.push(new Token(TokenType.Unknown, this.cs.position, 1)); + break; default: if (this.isPossibleNumber()) { if (this.tryNumber()) { diff --git a/src/client/providers/completionProvider.ts b/src/client/providers/completionProvider.ts index ba18829db73d..fb0ae33bfb1a 100644 --- a/src/client/providers/completionProvider.ts +++ b/src/client/providers/completionProvider.ts @@ -33,8 +33,7 @@ export class PythonCompletionItemProvider implements vscode.CompletionItemProvid if (!item.documentation) { const itemInfos = await this.completionSource.getDocumentation(item, token); if (itemInfos && itemInfos.length > 0) { - item.detail = itemInfos[0].detail; - item.documentation = itemInfos[0].documentation; + item.documentation = itemInfos[0].tooltip; } } return item; diff --git a/src/client/providers/itemInfoSource.ts b/src/client/providers/itemInfoSource.ts index b78515c1822f..b851f61b533d 100644 --- a/src/client/providers/itemInfoSource.ts +++ b/src/client/providers/itemInfoSource.ts @@ -7,13 +7,12 @@ import * as vscode from 'vscode'; import { RestTextConverter } from '../common/markdown/restTextConverter'; import { JediFactory } from '../languageServices/jediProxyFactory'; import * as proxy from './jediProxy'; -import { IHoverItem } from './jediProxy'; export class LanguageItemInfo { constructor( public tooltip: vscode.MarkdownString, public detail: string, - public documentation: vscode.MarkdownString) { } + public signature: vscode.MarkdownString) { } } export class ItemInfoSource { @@ -83,14 +82,12 @@ export class ItemInfoSource { private getItemInfoFromHoverResult(data: proxy.IHoverResult, currentWord: string): LanguageItemInfo[] { const infos: LanguageItemInfo[] = []; - const capturedInfo: string[] = []; data.items.forEach(item => { const signature = this.getSignature(item, currentWord); let tooltip = new vscode.MarkdownString(); if (item.docstring) { let lines = item.docstring.split(/\r?\n/); - const dnd = this.getDetailAndDescription(item, lines); // If the docstring starts with the signature, then remove those lines from the docstring. if (lines.length > 0 && item.signature.indexOf(lines[0]) === 0) { @@ -100,11 +97,10 @@ export class ItemInfoSource { lines = lines.filter((line, index) => index > endIndex); } } - if (lines.length > 0 && item.signature.startsWith(currentWord) && lines[0].startsWith(currentWord) && lines[0].endsWith(')')) { + if (lines.length > 0 && currentWord.length > 0 && item.signature.startsWith(currentWord) && lines[0].startsWith(currentWord) && lines[0].endsWith(')')) { lines.shift(); } - // Tooltip is only used in hover if (signature.length > 0) { tooltip = tooltip.appendMarkdown(['```python', signature, '```', ''].join(EOL)); } @@ -112,16 +108,7 @@ export class ItemInfoSource { const description = this.textConverter.toMarkdown(lines.join(EOL)); tooltip = tooltip.appendMarkdown(description); - const documentation = this.textConverter.toMarkdown(dnd[1]); // Used only in completion list - infos.push(new LanguageItemInfo(tooltip, dnd[0], new vscode.MarkdownString(documentation))); - - const key = signature + lines.join(''); - // Sometimes we have duplicate documentation, one with a period at the end. - if (capturedInfo.indexOf(key) >= 0 || capturedInfo.indexOf(`${key}.`) >= 0) { - return; - } - capturedInfo.push(key); - capturedInfo.push(`${key}.`); + infos.push(new LanguageItemInfo(tooltip, item.description, new vscode.MarkdownString(signature))); return; } @@ -131,40 +118,21 @@ export class ItemInfoSource { } const description = this.textConverter.toMarkdown(item.description); tooltip.appendMarkdown(description); - - const lines = item.description.split(EOL); - const dd = this.getDetailAndDescription(item, lines); - const documentation = this.textConverter.escapeMarkdown(dd[1]); - infos.push(new LanguageItemInfo(tooltip, dd[0], new vscode.MarkdownString(documentation))); - - const key = signature + lines.join(''); - // Sometimes we have duplicate documentation, one with a period at the end. - if (capturedInfo.indexOf(key) >= 0 || capturedInfo.indexOf(`${key}.`) >= 0) { - return; - } - - capturedInfo.push(key); - capturedInfo.push(`${key}.`); + infos.push(new LanguageItemInfo(tooltip, item.description, new vscode.MarkdownString(signature))); return; } + + if (item.text) { // Most probably variable type + const code = currentWord && currentWord.length > 0 + ? `${currentWord}: ${item.text}` + : item.text; + tooltip.appendMarkdown(['```python', code, '```', ''].join(EOL)); + infos.push(new LanguageItemInfo(tooltip, '', new vscode.MarkdownString())); + } }); return infos; } - private getDetailAndDescription(item: IHoverItem, lines: string[]): [string, string] { - let detail: string; - let description: string; - - if (item.signature && item.signature.length > 0 && lines.length > 0 && lines[0].indexOf(item.signature) >= 0) { - detail = lines.length > 0 ? lines[0] : ''; - description = lines.filter((line, index) => index > 0).join(EOL).trim(); - } else { - detail = item.description; - description = lines.join(EOL).trim(); - } - return [detail, description]; - } - private getSignature(item: proxy.IHoverItem, currentWord: string): string { let { signature } = item; switch (item.kind) { diff --git a/src/client/providers/jediProxy.ts b/src/client/providers/jediProxy.ts index df2c537a53c0..4634b729f6ec 100644 --- a/src/client/providers/jediProxy.ts +++ b/src/client/providers/jediProxy.ts @@ -289,14 +289,8 @@ export class JediProxy implements vscode.Disposable { const pythonProcess = await this.serviceContainer.get(IPythonExecutionFactory).create(Uri.file(this.workspacePath)); const args = ['completion.py']; if (typeof this.pythonSettings.jediPath !== 'string' || this.pythonSettings.jediPath.length === 0) { - if (Array.isArray(this.pythonSettings.devOptions) && - this.pythonSettings.devOptions.some(item => item.toUpperCase().trim() === 'USERELEASEAUTOCOMP')) { - // Use standard version of jedi. - args.push('std'); - } else { - // Use preview version of jedi. - args.push('preview'); - } + // Use released version of jedi. + args.push('release'); } else { args.push('custom'); args.push(this.pythonSettings.jediPath); diff --git a/src/test/autocomplete/base.test.ts b/src/test/autocomplete/base.test.ts index 100e009e042f..bf07ac4fd783 100644 --- a/src/test/autocomplete/base.test.ts +++ b/src/test/autocomplete/base.test.ts @@ -217,6 +217,5 @@ function checkDocumentation(item: vscode.CompletionItem, expectedContains: strin assert.notEqual(documentation, null, 'Documentation is not MarkdownString'); const inDoc = documentation.value.indexOf(expectedContains) >= 0; - const inDetails = item.detail!.indexOf(expectedContains) >= 0; - assert.equal(inDoc !== inDetails, true, 'Documentation incorrect'); + assert.equal(inDoc, true, 'Documentation incorrect'); } diff --git a/src/test/definitions/hover.test.ts b/src/test/definitions/hover.test.ts index 0abda7ea51c8..ba194a902446 100644 --- a/src/test/definitions/hover.test.ts +++ b/src/test/definitions/hover.test.ts @@ -228,9 +228,6 @@ suite('Hover Definition', () => { assert.equal(normalizeMarkedString(def[0].contents[0]), '```python' + EOL + 'class Thread(group=None, target=None, name=None, args=(), kwargs=None, verbose=None)' + EOL + '```' + EOL + - 'Thread(self, group=None, target=None, name=None,' + EOL + - 'args=(), kwargs=None, verbose=None)' + EOL + - '' + EOL + 'A class that represents a thread of control.' + EOL + '' + EOL + 'This class can be safely subclassed in a limited fashion.', 'Invalid content items'); @@ -254,11 +251,8 @@ suite('Hover Definition', () => { if (contents.indexOf('```python') === -1) { assert.fail(contents, '', 'First line is incorrect', 'compare'); } - if (contents.indexOf('Random number generator base class used by bound module functions.') === -1) { - assert.fail(contents, '', '\'Random number generator\' message missing', 'compare'); - } - if (contents.indexOf('Class Random can also be subclassed if you want to use a different basic') === -1) { - assert.fail(contents, '', '\'Class Random message\' missing', 'compare'); + if (contents.indexOf('rnd: Random') === -1) { + assert.fail(contents, '', 'Variable name or type are missing', 'compare'); } }).then(done, done); }); diff --git a/src/test/format/extension.format.test.ts b/src/test/format/extension.format.test.ts index 03a44962bb06..8a9d09dff517 100644 --- a/src/test/format/extension.format.test.ts +++ b/src/test/format/extension.format.test.ts @@ -1,4 +1,3 @@ -import * as assert from 'assert'; import * as fs from 'fs-extra'; import * as path from 'path'; import * as vscode from 'vscode'; @@ -8,6 +7,7 @@ import { AutoPep8Formatter } from '../../client/formatters/autoPep8Formatter'; import { YapfFormatter } from '../../client/formatters/yapfFormatter'; import { closeActiveWindows, initialize, initializeTest } from '../initialize'; import { MockProcessService } from '../mocks/proc'; +import { compareFiles } from '../textUtils'; import { UnitTestIocContainer } from '../unittests/serviceRegistry'; const ch = vscode.window.createOutputChannel('Tests'); @@ -92,7 +92,7 @@ suite('Formatting', () => { await textEditor.edit(editBuilder => { edits.forEach(edit => editBuilder.replace(edit.range, edit.newText)); }); - assert.equal(textEditor.document.getText(), formattedContents, 'Formatted text is not the same'); + compareFiles(formattedContents, textEditor.document.getText()); } test('AutoPep8', async () => await testFormatting(new AutoPep8Formatter(ioc.serviceContainer), formattedAutoPep8, autoPep8FileToFormat, 'autopep8.output')); diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 86deb9282249..77074be82258 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -78,9 +78,11 @@ suite('Language.Tokenizer', () => { }); test('Unknown token', async () => { const t = new Tokenizer(); - const tokens = t.tokenize('.'); - assert.equal(tokens.count, 1); + const tokens = t.tokenize('.@x'); + assert.equal(tokens.count, 3); assert.equal(tokens.getItemAt(0).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(1).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(2).type, TokenType.Identifier); }); }); diff --git a/src/test/markdown/restTextConverter.test.ts b/src/test/markdown/restTextConverter.test.ts index 9b43d4d57657..ee08a8a9f2d6 100644 --- a/src/test/markdown/restTextConverter.test.ts +++ b/src/test/markdown/restTextConverter.test.ts @@ -1,30 +1,13 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -import { expect } from 'chai'; import * as fs from 'fs-extra'; import * as path from 'path'; import { RestTextConverter } from '../../client/common/markdown/restTextConverter'; +import { compareFiles } from '../textUtils'; const srcPythoFilesPath = path.join(__dirname, '..', '..', '..', 'src', 'test', 'pythonFiles', 'markdown'); -function compareFiles(expectedContent: string, actualContent: string) { - const expectedLines = expectedContent.split(/\r?\n/); - const actualLines = actualContent.split(/\r?\n/); - - for (let i = 0; i < Math.min(expectedLines.length, actualLines.length); i += 1) { - const e = expectedLines[i]; - const a = actualLines[i]; - expect(e, `Difference at line ${i}`).to.be.equal(a); - } - - expect(actualLines.length, - expectedLines.length > actualLines.length - ? 'Actual contains more lines than expected' - : 'Expected contains more lines than the actual' - ).to.be.equal(expectedLines.length); -} - async function testConversion(fileName: string): Promise { const cvt = new RestTextConverter(); const file = path.join(srcPythoFilesPath, fileName); diff --git a/src/test/pythonFiles/autocomp/hoverTest.py b/src/test/pythonFiles/autocomp/hoverTest.py index cd16059499a5..d3c243991390 100644 --- a/src/test/pythonFiles/autocomp/hoverTest.py +++ b/src/test/pythonFiles/autocomp/hoverTest.py @@ -1,11 +1,11 @@ import random import math -for x in range(0,10): +for x in range(0, 10): print(x) rnd = random.Random() -print(rnd.randint(0,5)) +print(rnd.randint(0, 5)) print(math.acos(90)) import misc @@ -13,4 +13,4 @@ rnd2.randint() t = misc.Thread() -t.__init__() \ No newline at end of file +t.__init__() diff --git a/src/test/textUtils.ts b/src/test/textUtils.ts index b5d31b6e43f1..3805ab911dfd 100644 --- a/src/test/textUtils.ts +++ b/src/test/textUtils.ts @@ -1,5 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { expect } from 'chai'; import { MarkedString } from 'vscode'; export function normalizeMarkedString(content: MarkedString): string { return typeof content === 'string' ? content : content.value; } + +export function compareFiles(expectedContent: string, actualContent: string) { + const expectedLines = expectedContent.split(/\r?\n/); + const actualLines = actualContent.split(/\r?\n/); + + for (let i = 0; i < Math.min(expectedLines.length, actualLines.length); i += 1) { + const e = expectedLines[i]; + const a = actualLines[i]; + expect(e, `Difference at line ${i}`).to.be.equal(a); + } + + expect(actualLines.length, + expectedLines.length > actualLines.length + ? 'Actual contains more lines than expected' + : 'Expected contains more lines than the actual' + ).to.be.equal(expectedLines.length); +} From f8eaa93d18ef5789b1d0c6face447b45eecbbd0a Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 13 Feb 2018 14:47:16 -0800 Subject: [PATCH 055/103] Undo changes --- .vscode/settings.json | 11 +++-------- pythonFiles/completion.py | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 33a5c99438be..be66f967c5c4 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -15,13 +15,8 @@ }, "typescript.tsdk": "./node_modules/typescript/lib", // we want to use the TS server from our node_modules folder to control its version "tslint.enable": true, - "python.linting.enabled": true, + "python.linting.enabled": false, "python.unitTest.promptToConfigure": false, "python.workspaceSymbols.enabled": false, - "python.formatting.provider": "none", - "files.insertFinalNewline": true, - "python.linting.pep8Enabled": false, - "python.linting.prospectorEnabled": false, - "python.linting.pydocstyleEnabled": false, - "python.linting.pylintEnabled": true -} + "python.formatting.provider": "none" +} \ No newline at end of file diff --git a/pythonFiles/completion.py b/pythonFiles/completion.py index 7a740f70a635..6669324a7cb1 100644 --- a/pythonFiles/completion.py +++ b/pythonFiles/completion.py @@ -88,7 +88,7 @@ def _generate_signature(self, completion): return '' return '%s(%s)' % ( completion.name, - ', '.join(p.description[6:] for p in completion.params if p)) + ', '.join(self._get_param_name(p.description) for p in completion.params if p)) def _get_call_signatures(self, script): """Extract call signatures from jedi.api.Script object in failsafe way. From e4372c60f9b7aeae49e3a7c9e86e00bfca5658bb Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 13 Feb 2018 15:13:00 -0800 Subject: [PATCH 056/103] Undo changes --- src/test/pythonFiles/autocomp/hoverTest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/pythonFiles/autocomp/hoverTest.py b/src/test/pythonFiles/autocomp/hoverTest.py index d3c243991390..0ff88d80dffc 100644 --- a/src/test/pythonFiles/autocomp/hoverTest.py +++ b/src/test/pythonFiles/autocomp/hoverTest.py @@ -13,4 +13,4 @@ rnd2.randint() t = misc.Thread() -t.__init__() +t.__init__() \ No newline at end of file From 469c8a70b54e20aa428cbfe33bc2bd7f42cb50a3 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 13 Feb 2018 15:44:21 -0800 Subject: [PATCH 057/103] Test fixes --- pythonFiles/completion.py | 8 ++++---- src/client/providers/jediProxy.ts | 5 +---- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/pythonFiles/completion.py b/pythonFiles/completion.py index 6669324a7cb1..e530be32b367 100644 --- a/pythonFiles/completion.py +++ b/pythonFiles/completion.py @@ -88,7 +88,7 @@ def _generate_signature(self, completion): return '' return '%s(%s)' % ( completion.name, - ', '.join(self._get_param_name(p.description) for p in completion.params if p)) + ', '.join(p.description[6:] for p in completion.params if p)) def _get_call_signatures(self, script): """Extract call signatures from jedi.api.Script object in failsafe way. @@ -639,7 +639,7 @@ def watch(self): if __name__ == '__main__': cachePrefix = 'v' modulesToLoad = '' - if len(sys.argv) > 0 and sys.argv[1] == 'custom': + if len(sys.argv) > 2 and sys.argv[1] == 'custom': jediPath = sys.argv[2] jediPreview = True cachePrefix = 'custom_v' @@ -648,8 +648,8 @@ def watch(self): else: #release jediPath = os.path.dirname(__file__) - if len(sys.argv) > 2: - modulesToLoad = sys.argv[2] + if len(sys.argv) > 1: + modulesToLoad = sys.argv[1] sys.path.insert(0, jediPath) import jedi diff --git a/src/client/providers/jediProxy.ts b/src/client/providers/jediProxy.ts index 4634b729f6ec..194134b949d8 100644 --- a/src/client/providers/jediProxy.ts +++ b/src/client/providers/jediProxy.ts @@ -288,10 +288,7 @@ export class JediProxy implements vscode.Disposable { this.languageServerStarted = createDeferred(); const pythonProcess = await this.serviceContainer.get(IPythonExecutionFactory).create(Uri.file(this.workspacePath)); const args = ['completion.py']; - if (typeof this.pythonSettings.jediPath !== 'string' || this.pythonSettings.jediPath.length === 0) { - // Use released version of jedi. - args.push('release'); - } else { + if (typeof this.pythonSettings.jediPath === 'string' && this.pythonSettings.jediPath.length > 0) { args.push('custom'); args.push(this.pythonSettings.jediPath); } From 609bbdd61e1af26a5d7fc2699a7f02075566d1f3 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 14 Feb 2018 16:18:33 -0800 Subject: [PATCH 058/103] More tests --- src/test/language/tokenizer.test.ts | 75 ++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 77074be82258..1799990caf69 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -76,7 +76,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(i).type, TokenType.Comment); } }); - test('Unknown token', async () => { + test('Period/At to unknown token', async () => { const t = new Tokenizer(); const tokens = t.tokenize('.@x'); assert.equal(tokens.count, 3); @@ -85,4 +85,77 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(1).type, TokenType.Unknown); assert.equal(tokens.getItemAt(2).type, TokenType.Identifier); }); + test('Unknown token', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('~$'); + assert.equal(tokens.count, 1); + + assert.equal(tokens.getItemAt(0).type, TokenType.Unknown); + }); + test('Hex number', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('1 0X2 0x3 0x'); + assert.equal(tokens.count, 4); + + assert.equal(tokens.getItemAt(0).type, TokenType.Number); + assert.equal(tokens.getItemAt(0).length, 1); + + assert.equal(tokens.getItemAt(1).type, TokenType.Number); + assert.equal(tokens.getItemAt(1).length, 3); + + assert.equal(tokens.getItemAt(2).type, TokenType.Number); + assert.equal(tokens.getItemAt(2).length, 3); + + assert.equal(tokens.getItemAt(3).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(3).length, 2); + }); + test('Binary number', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('1 0B1 0b010 0b3 0b'); + assert.equal(tokens.count, 6); + + assert.equal(tokens.getItemAt(0).type, TokenType.Number); + assert.equal(tokens.getItemAt(0).length, 1); + + assert.equal(tokens.getItemAt(1).type, TokenType.Number); + assert.equal(tokens.getItemAt(1).length, 3); + + assert.equal(tokens.getItemAt(2).type, TokenType.Number); + assert.equal(tokens.getItemAt(2).length, 5); + + assert.equal(tokens.getItemAt(3).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(3).length, 3); + + assert.equal(tokens.getItemAt(4).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(4).length, 1); + + assert.equal(tokens.getItemAt(5).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(5).length, 1); + }); + test('Octal number', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('1 0o4 0o077 0o9 0oO'); + assert.equal(tokens.count, 7); + + assert.equal(tokens.getItemAt(0).type, TokenType.Number); + assert.equal(tokens.getItemAt(0).length, 1); + + assert.equal(tokens.getItemAt(1).type, TokenType.Number); + assert.equal(tokens.getItemAt(1).length, 3); + + assert.equal(tokens.getItemAt(2).type, TokenType.Number); + assert.equal(tokens.getItemAt(2).length, 5); + + assert.equal(tokens.getItemAt(3).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(3).length, 3); + + assert.equal(tokens.getItemAt(4).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(4).length, 1); + + assert.equal(tokens.getItemAt(5).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(5).length, 1); + + assert.equal(tokens.getItemAt(6).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(6).length, 1); + }); }); From 7ea6fdabbd8f64d8895c4dce6e7a2d901df7aa75 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 14 Feb 2018 16:43:17 -0800 Subject: [PATCH 059/103] Tests --- src/test/language/tokenizer.test.ts | 47 ++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 1799990caf69..c397ffec95e5 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -123,19 +123,19 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(2).type, TokenType.Number); assert.equal(tokens.getItemAt(2).length, 5); - assert.equal(tokens.getItemAt(3).type, TokenType.Unknown); - assert.equal(tokens.getItemAt(3).length, 3); + assert.equal(tokens.getItemAt(3).type, TokenType.Number); + assert.equal(tokens.getItemAt(3).length, 1); - assert.equal(tokens.getItemAt(4).type, TokenType.Unknown); - assert.equal(tokens.getItemAt(4).length, 1); + assert.equal(tokens.getItemAt(4).type, TokenType.Identifier); + assert.equal(tokens.getItemAt(4).length, 2); assert.equal(tokens.getItemAt(5).type, TokenType.Unknown); - assert.equal(tokens.getItemAt(5).length, 1); + assert.equal(tokens.getItemAt(5).length, 2); }); test('Octal number', async () => { const t = new Tokenizer(); const tokens = t.tokenize('1 0o4 0o077 0o9 0oO'); - assert.equal(tokens.count, 7); + assert.equal(tokens.count, 6); assert.equal(tokens.getItemAt(0).type, TokenType.Number); assert.equal(tokens.getItemAt(0).length, 1); @@ -146,16 +146,35 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(2).type, TokenType.Number); assert.equal(tokens.getItemAt(2).length, 5); - assert.equal(tokens.getItemAt(3).type, TokenType.Unknown); - assert.equal(tokens.getItemAt(3).length, 3); + assert.equal(tokens.getItemAt(3).type, TokenType.Number); + assert.equal(tokens.getItemAt(3).length, 1); - assert.equal(tokens.getItemAt(4).type, TokenType.Unknown); - assert.equal(tokens.getItemAt(4).length, 1); + assert.equal(tokens.getItemAt(4).type, TokenType.Identifier); + assert.equal(tokens.getItemAt(4).length, 2); assert.equal(tokens.getItemAt(5).type, TokenType.Unknown); - assert.equal(tokens.getItemAt(5).length, 1); - - assert.equal(tokens.getItemAt(6).type, TokenType.Unknown); - assert.equal(tokens.getItemAt(6).length, 1); + assert.equal(tokens.getItemAt(5).length, 3); + }); + test('Operators', async () => { + const text = '< <> << <<= ' + + '== != > >> >>= ' + + '+ -' + + '* ** / /= //=' + + '*= += -= **= ' + + '& &= | |= ^ ^='; + const tokens = new Tokenizer().tokenize(text); + const lengths = [ + 1, 2, 2, 3, + 2, 2, 1, 2, 3, + 1, 1, + 1, 2, 1, 2, 3, + 2, 2, 2, 3, + 1, 2, 1, 2, 1, 2]; + assert.equal(tokens.count, lengths.length); + for (let i = 0; i < tokens.count; i += 1) { + const t = tokens.getItemAt(i); + assert.equal(t.type, TokenType.Operator, `${t.type} at ${i} is not an operator`); + assert.equal(t.length, lengths[i], `Length ${t.length} at ${i} (text ${text.substr(t.start, t.length)}), expected ${lengths[i]}`); + } }); }); From 654689286183a40ec7d78bdb5e303f97af23d5c3 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Fri, 16 Feb 2018 10:50:36 -0800 Subject: [PATCH 060/103] Fix pylint search --- src/client/linters/pylint.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/linters/pylint.ts b/src/client/linters/pylint.ts index c650714e45dd..6b7fb6dfc826 100644 --- a/src/client/linters/pylint.ts +++ b/src/client/linters/pylint.ts @@ -30,7 +30,7 @@ export class Pylint extends BaseLinter { const settings = this.configService.getSettings(uri); if (settings.linting.pylintUseMinimalCheckers && this.info.linterArgs(uri).length === 0 - && !await Pylint.hasConfigurationFile(this.fileSystem, uri.fsPath, this.platformService)) { + && !await Pylint.hasConfigurationFile(this.fileSystem, this.getWorkspaceRootPath(document), this.platformService)) { minArgs = [ '--disable=all', '--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,unused-wildcard-import,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode' From 76af122d7ede6cf8947df0bd99b1466d96c0a10d Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Fri, 16 Feb 2018 12:41:31 -0800 Subject: [PATCH 061/103] Handle quote escapes in strings --- src/client/language/tokenizer.ts | 5 ++++- src/test/language/tokenizer.test.ts | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index bf20fb3f44ba..07c6717cbe10 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -359,7 +359,10 @@ export class Tokenizer implements ITokenizer { } private skipToSingleEndQuote(quote: number): void { - while (!this.cs.isEndOfStream() && this.cs.currentChar !== quote) { + while (!this.cs.isEndOfStream()) { + if (this.cs.currentChar === quote && this.cs.prevChar !== Char.Backslash) { + break; + } this.cs.moveNext(); } this.cs.moveNext(); diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index c397ffec95e5..e11df6a147b0 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -64,6 +64,21 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(i).type, TokenType.String); } }); + test('Strings: single quote escape', async () => { + const t = new Tokenizer(); + // tslint:disable-next-line:quotemark + const tokens = t.tokenize("'\\'quoted\\''"); + assert.equal(tokens.count, 1); + assert.equal(tokens.getItemAt(0).type, TokenType.String); + assert.equal(tokens.getItemAt(0).length, 12); + }); + test('Strings: double quote escape', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('"\\"quoted\\""'); + assert.equal(tokens.count, 1); + assert.equal(tokens.getItemAt(0).type, TokenType.String); + assert.equal(tokens.getItemAt(0).length, 12); + }); test('Comments', async () => { const t = new Tokenizer(); const tokens = t.tokenize(' #co"""mment1\n\t\n#comm\'ent2 '); From 5d4d022825f4a3f4217ea73b49d033b3ee42f174 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Fri, 16 Feb 2018 12:52:05 -0800 Subject: [PATCH 062/103] Escapes in strings --- src/client/language/tokenizer.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index 07c6717cbe10..1a4d800fed0c 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -360,7 +360,11 @@ export class Tokenizer implements ITokenizer { private skipToSingleEndQuote(quote: number): void { while (!this.cs.isEndOfStream()) { - if (this.cs.currentChar === quote && this.cs.prevChar !== Char.Backslash) { + if (this.cs.currentChar === Char.Backslash && this.cs.nextChar === quote) { + this.cs.advance(2); + continue; + } + if (this.cs.currentChar === quote) { break; } this.cs.moveNext(); From 29edac2d9063d6ee04ba16f4418a5ae8b79ce6c4 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Fri, 16 Feb 2018 14:02:20 -0800 Subject: [PATCH 063/103] CR feedback --- src/client/linters/pylint.ts | 9 ++++++--- src/test/linters/pylint.test.ts | 15 +++++++-------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/src/client/linters/pylint.ts b/src/client/linters/pylint.ts index 6b7fb6dfc826..c6f6e466ba10 100644 --- a/src/client/linters/pylint.ts +++ b/src/client/linters/pylint.ts @@ -25,11 +25,14 @@ export class Pylint extends BaseLinter { let minArgs: string[] = []; // Only use minimal checkers if // a) there are no custom arguments and - // b) there is no pylintrc file + // b) there is no pylintrc file next to the file or at the workspace root const uri = document.uri; const settings = this.configService.getSettings(uri); if (settings.linting.pylintUseMinimalCheckers && this.info.linterArgs(uri).length === 0 + // Check pylintrc next to the file + && !await Pylint.hasConfigurationFile(this.fileSystem, path.dirname(uri.fsPath), this.platformService) + // Checn for pylintrc at the root (function will strip the file name) && !await Pylint.hasConfigurationFile(this.fileSystem, this.getWorkspaceRootPath(document), this.platformService)) { minArgs = [ '--disable=all', @@ -51,7 +54,7 @@ export class Pylint extends BaseLinter { } // tslint:disable-next-line:member-ordering - public static async hasConfigurationFile(fs: IFileSystem, filePath: string, platformService: IPlatformService): Promise { + public static async hasConfigurationFile(fs: IFileSystem, folder: string, platformService: IPlatformService): Promise { // https://pylint.readthedocs.io/en/latest/user_guide/run.html // https://github.com/PyCQA/pylint/blob/975e08148c0faa79958b459303c47be1a2e1500a/pylint/config.py // 1. pylintrc in the current working directory @@ -69,7 +72,7 @@ export class Pylint extends BaseLinter { return true; } - let dir = path.dirname(filePath); + let dir = folder; const pylintrc = 'pylintrc'; const dotPylintrc = '.pylintrc'; if (await fs.fileExistsAsync(path.join(dir, pylintrc)) || await fs.fileExistsAsync(path.join(dir, dotPylintrc))) { diff --git a/src/test/linters/pylint.test.ts b/src/test/linters/pylint.test.ts index cc527e320478..5ed565b2d44f 100644 --- a/src/test/linters/pylint.test.ts +++ b/src/test/linters/pylint.test.ts @@ -9,7 +9,6 @@ import { Pylint } from '../../client/linters/pylint'; suite('Linting - Pylintrc search', () => { const basePath = '/user/a/b/c/d'; - const file = path.join(basePath, 'file.py'); const pylintrc = 'pylintrc'; const dotPylintrc = '.pylintrc'; @@ -23,11 +22,11 @@ suite('Linting - Pylintrc search', () => { test('pylintrc in the file folder', async () => { fileSystem.setup(x => x.fileExistsAsync(path.join(basePath, pylintrc))).returns(() => Promise.resolve(true)); - let result = await Pylint.hasConfigurationFile(fileSystem.object, file, platformService.object); + let result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${pylintrc}' not detected in the file folder.`); fileSystem.setup(x => x.fileExistsAsync(path.join(basePath, dotPylintrc))).returns(() => Promise.resolve(true)); - result = await Pylint.hasConfigurationFile(fileSystem.object, file, platformService.object); + result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${dotPylintrc}' not detected in the file folder.`); }); test('pylintrc up the module tree', async () => { @@ -41,7 +40,7 @@ suite('Linting - Pylintrc search', () => { fileSystem.setup(x => x.fileExistsAsync(module3)).returns(() => Promise.resolve(true)); fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); - const result = await Pylint.hasConfigurationFile(fileSystem.object, file, platformService.object); + const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${pylintrc}' not detected in the module tree.`); }); test('.pylintrc up the module tree', async () => { @@ -56,7 +55,7 @@ suite('Linting - Pylintrc search', () => { fileSystem.setup(x => x.fileExistsAsync(module3)).returns(() => Promise.resolve(true)); fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); - const result = await Pylint.hasConfigurationFile(fileSystem.object, file, platformService.object); + const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${dotPylintrc}' not detected in the module tree.`); }); test('.pylintrc up the ~ folder', async () => { @@ -64,7 +63,7 @@ suite('Linting - Pylintrc search', () => { const rc = path.join(home, dotPylintrc); fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); - const result = await Pylint.hasConfigurationFile(fileSystem.object, file, platformService.object); + const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${dotPylintrc}' not detected in the ~ folder.`); }); test('pylintrc up the ~/.config folder', async () => { @@ -72,7 +71,7 @@ suite('Linting - Pylintrc search', () => { const rc = path.join(home, '.config', pylintrc); fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); - const result = await Pylint.hasConfigurationFile(fileSystem.object, file, platformService.object); + const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${pylintrc}' not detected in the ~/.config folder.`); }); test('pylintrc in the /etc folder', async () => { @@ -80,7 +79,7 @@ suite('Linting - Pylintrc search', () => { const rc = path.join('/etc', pylintrc); fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); - const result = await Pylint.hasConfigurationFile(fileSystem.object, file, platformService.object); + const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${pylintrc}' not detected in the /etc folder.`); }); }); From 1ee0be28809f52ad3e41c8a9f1e8b0c5e7700b49 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Sun, 18 Feb 2018 22:51:04 -0800 Subject: [PATCH 064/103] Discover pylintrc better + tests --- src/client/linters/baseLinter.ts | 18 ++--- src/client/linters/pylint.ts | 29 ++++++-- src/test/linters/pylint.test.ts | 118 ++++++++++++++++++++++++++++++- src/test/mockClasses.ts | 37 ++++++++++ 4 files changed, 187 insertions(+), 15 deletions(-) diff --git a/src/client/linters/baseLinter.ts b/src/client/linters/baseLinter.ts index 2c90e5c42731..742008ea7346 100644 --- a/src/client/linters/baseLinter.ts +++ b/src/client/linters/baseLinter.ts @@ -1,10 +1,10 @@ import * as path from 'path'; import * as vscode from 'vscode'; -import { CancellationToken, OutputChannel, TextDocument, Uri } from 'vscode'; +import { IWorkspaceService } from '../common/application/types'; import '../common/extensions'; import { IPythonToolExecutionService } from '../common/process/types'; -import { ExecutionInfo, ILogger, Product } from '../common/types'; import { IConfigurationService, IPythonSettings } from '../common/types'; +import { ExecutionInfo, ILogger, Product } from '../common/types'; import { IServiceContainer } from '../ioc/types'; import { ErrorHandler } from './errorHandlers/errorHandler'; import { ILinter, ILinterInfo, ILinterManager, ILintMessage, LintMessageSeverity } from './types'; @@ -38,25 +38,27 @@ export abstract class BaseLinter implements ILinter { private errorHandler: ErrorHandler; private _pythonSettings: IPythonSettings; private _info: ILinterInfo; + private workspace: IWorkspaceService; protected get pythonSettings(): IPythonSettings { return this._pythonSettings; } constructor(product: Product, - protected readonly outputChannel: OutputChannel, + protected readonly outputChannel: vscode.OutputChannel, protected readonly serviceContainer: IServiceContainer, protected readonly columnOffset = 0) { this._info = serviceContainer.get(ILinterManager).getLinterInfo(product); this.errorHandler = new ErrorHandler(this.info.product, outputChannel, serviceContainer); this.configService = serviceContainer.get(IConfigurationService); + this.workspace = serviceContainer.get(IWorkspaceService); } public get info(): ILinterInfo { return this._info; } - public isLinterExecutableSpecified(resource: Uri) { + public isLinterExecutableSpecified(resource: vscode.Uri) { const executablePath = this.info.pathName(resource); return path.basename(executablePath).length > 0 && path.basename(executablePath) !== executablePath; } @@ -66,7 +68,7 @@ export abstract class BaseLinter implements ILinter { } protected getWorkspaceRootPath(document: vscode.TextDocument): string { - const workspaceFolder = vscode.workspace.getWorkspaceFolder(document.uri); + const workspaceFolder = this.workspace.getWorkspaceFolder(document.uri); const workspaceRootPath = (workspaceFolder && typeof workspaceFolder.uri.fsPath === 'string') ? workspaceFolder.uri.fsPath : undefined; return typeof workspaceRootPath === 'string' ? workspaceRootPath : __dirname; } @@ -107,7 +109,7 @@ export abstract class BaseLinter implements ILinter { const cwd = this.getWorkspaceRootPath(document); const pythonToolsExecutionService = this.serviceContainer.get(IPythonToolExecutionService); try { - const result = await pythonToolsExecutionService.exec(executionInfo, {cwd, token: cancellation, mergeStdOutErr: true}, document.uri); + const result = await pythonToolsExecutionService.exec(executionInfo, { cwd, token: cancellation, mergeStdOutErr: true }, document.uri); this.displayLinterResultHeader(result.stdout); return await this.parseMessages(result.stdout, document, cancellation, regEx); } catch (error) { @@ -116,12 +118,12 @@ export abstract class BaseLinter implements ILinter { } } - protected async parseMessages(output: string, document: TextDocument, token: CancellationToken, regEx: string) { + protected async parseMessages(output: string, document: vscode.TextDocument, token: vscode.CancellationToken, regEx: string) { const outputLines = output.splitLines({ removeEmptyEntries: false, trim: false }); return this.parseLines(outputLines, regEx); } - protected handleError(error: Error, resource: Uri, execInfo: ExecutionInfo) { + protected handleError(error: Error, resource: vscode.Uri, execInfo: ExecutionInfo) { this.errorHandler.handleError(error, resource, execInfo) .catch(this.logger.logError.bind(this, 'Error in errorHandler.handleError')); } diff --git a/src/client/linters/pylint.ts b/src/client/linters/pylint.ts index c6f6e466ba10..53caae8a8669 100644 --- a/src/client/linters/pylint.ts +++ b/src/client/linters/pylint.ts @@ -11,6 +11,9 @@ import { IServiceContainer } from '../ioc/types'; import { BaseLinter } from './baseLinter'; import { ILintMessage } from './types'; +const pylintrc = 'pylintrc'; +const dotPylintrc = '.pylintrc'; + export class Pylint extends BaseLinter { private fileSystem: IFileSystem; private platformService: IPlatformService; @@ -27,12 +30,13 @@ export class Pylint extends BaseLinter { // a) there are no custom arguments and // b) there is no pylintrc file next to the file or at the workspace root const uri = document.uri; + const workspaceRoot = this.getWorkspaceRootPath(document); const settings = this.configService.getSettings(uri); if (settings.linting.pylintUseMinimalCheckers && this.info.linterArgs(uri).length === 0 - // Check pylintrc next to the file - && !await Pylint.hasConfigurationFile(this.fileSystem, path.dirname(uri.fsPath), this.platformService) - // Checn for pylintrc at the root (function will strip the file name) + // Check pylintrc next to the file or above up to and including the workspace root + && !await Pylint.hasConfigrationFileInWorkspace(this.fileSystem, path.dirname(uri.fsPath), workspaceRoot) + // Check for pylintrc at the root and above && !await Pylint.hasConfigurationFile(this.fileSystem, this.getWorkspaceRootPath(document), this.platformService)) { minArgs = [ '--disable=all', @@ -73,8 +77,6 @@ export class Pylint extends BaseLinter { } let dir = folder; - const pylintrc = 'pylintrc'; - const dotPylintrc = '.pylintrc'; if (await fs.fileExistsAsync(path.join(dir, pylintrc)) || await fs.fileExistsAsync(path.join(dir, dotPylintrc))) { return true; } @@ -90,7 +92,7 @@ export class Pylint extends BaseLinter { } current = above; above = path.dirname(above); - } while (current !== above); + } while (!fs.arePathsSame(current, above)); dir = path.resolve('~'); if (await fs.fileExistsAsync(path.join(dir, dotPylintrc))) { @@ -107,4 +109,19 @@ export class Pylint extends BaseLinter { } return false; } + + // tslint:disable-next-line:member-ordering + public static async hasConfigrationFileInWorkspace(fs: IFileSystem, folder: string, root: string): Promise { + // Search up from file location to the workspace root + let current = folder; + let above = path.dirname(current); + do { + if (await fs.fileExistsAsync(path.join(current, pylintrc)) || await fs.fileExistsAsync(path.join(current, dotPylintrc))) { + return true; + } + current = above; + above = path.dirname(above); + } while (!fs.arePathsSame(current, root) && !fs.arePathsSame(current, above)); + return false; + } } diff --git a/src/test/linters/pylint.test.ts b/src/test/linters/pylint.test.ts index 5ed565b2d44f..75cd78f148f3 100644 --- a/src/test/linters/pylint.test.ts +++ b/src/test/linters/pylint.test.ts @@ -2,11 +2,22 @@ // Licensed under the MIT License. import { expect } from 'chai'; +import { Container } from 'inversify'; import * as path from 'path'; import * as TypeMoq from 'typemoq'; +import { CancellationTokenSource, OutputChannel, TextDocument, Uri, WorkspaceFolder } from 'vscode'; +import { IWorkspaceService } from '../../client/common/application/types'; import { IFileSystem, IPlatformService } from '../../client/common/platform/types'; +import { IPythonToolExecutionService } from '../../client/common/process/types'; +import { ExecutionInfo, IConfigurationService, IInstaller, ILogger, IPythonSettings } from '../../client/common/types'; +import { ServiceContainer } from '../../client/ioc/container'; +import { ServiceManager } from '../../client/ioc/serviceManager'; +import { LinterManager } from '../../client/linters/linterManager'; import { Pylint } from '../../client/linters/pylint'; +import { ILinterManager } from '../../client/linters/types'; +import { MockLintingSettings } from '../mockClasses'; +// tslint:disable-next-line:max-func-body-length suite('Linting - Pylintrc search', () => { const basePath = '/user/a/b/c/d'; const pylintrc = 'pylintrc'; @@ -14,10 +25,40 @@ suite('Linting - Pylintrc search', () => { let fileSystem: TypeMoq.IMock; let platformService: TypeMoq.IMock; + let workspace: TypeMoq.IMock; + let execService: TypeMoq.IMock; + let config: TypeMoq.IMock; + let serviceContainer: ServiceContainer; setup(() => { fileSystem = TypeMoq.Mock.ofType(); + fileSystem + .setup(x => x.arePathsSame(TypeMoq.It.isAnyString(), TypeMoq.It.isAnyString())) + .returns((a, b) => a === b); + platformService = TypeMoq.Mock.ofType(); + platformService.setup(x => x.isWindows).returns(() => false); + + workspace = TypeMoq.Mock.ofType(); + execService = TypeMoq.Mock.ofType(); + + const cont = new Container(); + const serviceManager = new ServiceManager(cont); + serviceContainer = new ServiceContainer(cont); + + serviceManager.addSingletonInstance(IFileSystem, fileSystem.object); + serviceManager.addSingletonInstance(IWorkspaceService, workspace.object); + serviceManager.addSingletonInstance(IPythonToolExecutionService, execService.object); + serviceManager.addSingletonInstance(IPlatformService, platformService.object); + + config = TypeMoq.Mock.ofType(); + serviceManager.addSingletonInstance(IConfigurationService, config.object); + const linterManager = new LinterManager(serviceContainer); + serviceManager.addSingletonInstance(ILinterManager, linterManager); + const logger = TypeMoq.Mock.ofType(); + serviceManager.addSingletonInstance(ILogger, logger.object); + const installer = TypeMoq.Mock.ofType(); + serviceManager.addSingletonInstance(IInstaller, installer.object); }); test('pylintrc in the file folder', async () => { @@ -75,11 +116,86 @@ suite('Linting - Pylintrc search', () => { expect(result).to.be.equal(true, `'${pylintrc}' not detected in the ~/.config folder.`); }); test('pylintrc in the /etc folder', async () => { - platformService.setup(x => x.isWindows).returns(() => false); const rc = path.join('/etc', pylintrc); fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${pylintrc}' not detected in the /etc folder.`); }); + test('pylintrc between file and workspace root', async () => { + const root = '/user/a'; + const midFolder = '/user/a/b'; + fileSystem + .setup(x => x.fileExistsAsync(path.join(midFolder, pylintrc))) + .returns(() => Promise.resolve(true)); + + const result = await Pylint.hasConfigrationFileInWorkspace(fileSystem.object, basePath, root); + expect(result).to.be.equal(true, `'${pylintrc}' not detected in the workspace tree.`); + }); + + test('minArgs - pylintrc between the file and the workspace root', async () => { + fileSystem + .setup(x => x.fileExistsAsync(path.join('/user/a/b', pylintrc))) + .returns(() => Promise.resolve(true)); + + await testPylintArguments('/user/a/b/c', '/user/a', false); + }); + + test('minArgs - no pylintrc between the file and the workspace root', async () => { + await testPylintArguments('/user/a/b/c', '/user/a', true); + }); + + test('minArgs - pylintrc next to the file', async () => { + const fileFolder = '/user/a/b/c'; + fileSystem + .setup(x => x.fileExistsAsync(path.join(fileFolder, pylintrc))) + .returns(() => Promise.resolve(true)); + + await testPylintArguments(fileFolder, '/user/a', false); + }); + + test('minArgs - pylintrc at the workspace root', async () => { + const root = '/user/a'; + fileSystem + .setup(x => x.fileExistsAsync(path.join(root, pylintrc))) + .returns(() => Promise.resolve(true)); + + await testPylintArguments('/user/a/b/c', root, false); + }); + + async function testPylintArguments(fileFolder: string, wsRoot: string, expectedMinArgs: boolean): Promise { + const outputChannel = TypeMoq.Mock.ofType(); + const pylinter = new Pylint(outputChannel.object, serviceContainer); + + const document = TypeMoq.Mock.ofType(); + document.setup(x => x.uri).returns(() => Uri.file(path.join(fileFolder, 'test.py'))); + + const wsf = TypeMoq.Mock.ofType(); + wsf.setup(x => x.uri).returns(() => Uri.file(wsRoot)); + + workspace.setup(x => x.getWorkspaceFolder(TypeMoq.It.isAny())).returns(() => wsf.object); + + let execInfo: ExecutionInfo | undefined; + execService + .setup(x => x.exec(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())) + .callback((e: ExecutionInfo, b, c) => { + execInfo = e; + }) + .returns(() => Promise.resolve({ stdout: '', stderr: '' })); + + const lintSettings = new MockLintingSettings(); + lintSettings.pylintUseMinimalCheckers = true; + // tslint:disable-next-line:no-string-literal + lintSettings['pylintPath'] = 'pyLint'; + // tslint:disable-next-line:no-string-literal + lintSettings['pylintEnabled'] = true; + + const settings = TypeMoq.Mock.ofType(); + settings.setup(x => x.linting).returns(() => lintSettings); + config.setup(x => x.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); + + await pylinter.lint(document.object, new CancellationTokenSource().token); + expect(execInfo!.args.findIndex(x => x.indexOf('--disable=all') >= 0), + 'Minimal args passed to pylint while pylintrc exists.').to.be.eq(expectedMinArgs ? 0 : -1); + } }); diff --git a/src/test/mockClasses.ts b/src/test/mockClasses.ts index 3cbae4ddfc55..7849e2a2137a 100644 --- a/src/test/mockClasses.ts +++ b/src/test/mockClasses.ts @@ -1,4 +1,8 @@ import * as vscode from 'vscode'; +import { + Flake8CategorySeverity, ILintingSettings, IMypyCategorySeverity, + IPep8CategorySeverity, IPylintCategorySeverity +} from '../client/common/types'; export class MockOutputChannel implements vscode.OutputChannel { public name: string; @@ -44,3 +48,36 @@ export class MockStatusBarItem implements vscode.StatusBarItem { public dispose(): void { } } + +export class MockLintingSettings implements ILintingSettings { + public enabled: boolean; + public ignorePatterns: string[]; + public prospectorEnabled: boolean; + public prospectorArgs: string[]; + public pylintEnabled: boolean; + public pylintArgs: string[]; + public pep8Enabled: boolean; + public pep8Args: string[]; + public pylamaEnabled: boolean; + public pylamaArgs: string[]; + public flake8Enabled: boolean; + public flake8Args: string[]; + public pydocstyleEnabled: boolean; + public pydocstyleArgs: string[]; + public lintOnSave: boolean; + public maxNumberOfProblems: number; + public pylintCategorySeverity: IPylintCategorySeverity; + public pep8CategorySeverity: IPep8CategorySeverity; + public flake8CategorySeverity: Flake8CategorySeverity; + public mypyCategorySeverity: IMypyCategorySeverity; + public prospectorPath: string; + public pylintPath: string; + public pep8Path: string; + public pylamaPath: string; + public flake8Path: string; + public pydocstylePath: string; + public mypyEnabled: boolean; + public mypyArgs: string[]; + public mypyPath: string; + public pylintUseMinimalCheckers: boolean; +} From 33efd6e4599783f603c20fea76393eecf32bd429 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 27 Feb 2018 15:40:17 -0800 Subject: [PATCH 065/103] Fix .pyenv/versions search --- .../interpreter/locators/services/globalVirtualEnvService.ts | 4 +++- src/test/interpreters/venv.test.ts | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/client/interpreter/locators/services/globalVirtualEnvService.ts b/src/client/interpreter/locators/services/globalVirtualEnvService.ts index 8f3160386dc8..12a03d006fac 100644 --- a/src/client/interpreter/locators/services/globalVirtualEnvService.ts +++ b/src/client/interpreter/locators/services/globalVirtualEnvService.ts @@ -42,9 +42,11 @@ export class GlobalVirtualEnvironmentsSearchPathProvider implements IVirtualEnvi folders.push(pyenvRoot); folders.push(path.join(pyenvRoot, 'versions')); } else { + // Check if .pyenv/versions is in the list const pyenvVersions = path.join('.pyenv', 'versions'); if (venvFolders.indexOf('.pyenv') >= 0 && venvFolders.indexOf(pyenvVersions) < 0) { - folders.push(pyenvVersions); + // if .pyenv is in the list, but .pyenv/versions is not, add it. + folders.push(path.join(homedir, pyenvVersions)); } } return folders; diff --git a/src/test/interpreters/venv.test.ts b/src/test/interpreters/venv.test.ts index 7c26035e1096..13f064bcbc4a 100644 --- a/src/test/interpreters/venv.test.ts +++ b/src/test/interpreters/venv.test.ts @@ -49,7 +49,7 @@ suite('Virtual environments', () => { let paths = pathProvider.getSearchPaths(); let expected = folders.map(item => path.join(homedir, item)); - expected.push(path.join('.pyenv', 'versions')); + expected.push(path.join(homedir, '.pyenv', 'versions')); expect(paths).to.deep.equal(expected, 'Global search folder list is incorrect.'); From 713983ec326bc0a76517002298300a0a2d76a943 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 27 Feb 2018 16:05:33 -0800 Subject: [PATCH 066/103] Fix multiple linters output --- src/client/linters/lintingEngine.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/client/linters/lintingEngine.ts b/src/client/linters/lintingEngine.ts index b14d33454d04..cc71da741c9a 100644 --- a/src/client/linters/lintingEngine.ts +++ b/src/client/linters/lintingEngine.ts @@ -114,7 +114,6 @@ export class LintingEngine implements ILintingEngine { break; } - diagnostics = []; if (this.isDocumentOpen(document.uri)) { // Build the message and suffix the message with the name of the linter used. for (const m of msgs) { @@ -131,9 +130,9 @@ export class LintingEngine implements ILintingEngine { // Limit the number of messages to the max value. diagnostics = diagnostics.filter((value, index) => index <= settings.linting.maxNumberOfProblems); } - // Set all diagnostics found in this pass, as this method always clears existing diagnostics. - this.diagnosticCollection.set(document.uri, diagnostics); } + // Set all diagnostics found in this pass, as this method always clears existing diagnostics. + this.diagnosticCollection.set(document.uri, diagnostics); } // tslint:disable-next-line:no-any From e64b371992d3eb11837c3a6f44d651c48be40b74 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 27 Feb 2018 16:30:45 -0800 Subject: [PATCH 067/103] Better handle markdown underscore --- src/client/common/markdown/restTextConverter.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client/common/markdown/restTextConverter.ts b/src/client/common/markdown/restTextConverter.ts index e606fd46bfbc..2119775173fb 100644 --- a/src/client/common/markdown/restTextConverter.ts +++ b/src/client/common/markdown/restTextConverter.ts @@ -36,7 +36,8 @@ export class RestTextConverter { return text .replace(/\#/g, '\\#') .replace(/\*/g, '\\*') - .replace(/\_/g, '\\_'); + .replace(/\ _/g, ' \\_') + .replace(/^_/, '\\_'); } private transformLines(docstring: string): string { From 73c9617e42c69deaf75dcac95f67e056cf9497f0 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 28 Feb 2018 11:26:20 -0800 Subject: [PATCH 068/103] Test --- src/client/linters/linterCommands.ts | 4 ++-- src/client/linters/lintingEngine.ts | 21 ++++++++++----------- src/client/linters/types.ts | 4 ++-- src/test/linters/lint.test.ts | 27 ++++++++++++++++++++++++++- src/test/pythonFiles/linting/print.py | 1 + 5 files changed, 41 insertions(+), 16 deletions(-) create mode 100644 src/test/pythonFiles/linting/print.py diff --git a/src/client/linters/linterCommands.ts b/src/client/linters/linterCommands.ts index 5ecde143c1cf..9ab7a673467a 100644 --- a/src/client/linters/linterCommands.ts +++ b/src/client/linters/linterCommands.ts @@ -79,9 +79,9 @@ export class LinterCommands implements vscode.Disposable { } } - public runLinting(): void { + public runLinting(): Promise { const engine = this.serviceContainer.get(ILintingEngine); - engine.lintOpenPythonFiles(); + return engine.lintOpenPythonFiles(); } private get settingsUri(): vscode.Uri | undefined { diff --git a/src/client/linters/lintingEngine.ts b/src/client/linters/lintingEngine.ts index cc71da741c9a..fed2b844c978 100644 --- a/src/client/linters/lintingEngine.ts +++ b/src/client/linters/lintingEngine.ts @@ -51,32 +51,31 @@ export class LintingEngine implements ILintingEngine { this.diagnosticCollection = vscode.languages.createDiagnosticCollection('python'); } - public lintOpenPythonFiles(): void { - this.documents.textDocuments.forEach(async document => { + public async lintOpenPythonFiles(): Promise { + this.documents.textDocuments.forEach(document => { if (document.languageId === PythonLanguage.language) { - await this.lintDocument(document, 'auto'); + this.lintDocument(document, 'auto').ignoreErrors(); } }); + return this.diagnosticCollection; } - public async lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise { + public async lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise { // Check if we need to lint this document const workspaceFolder = this.workspace.getWorkspaceFolder(document.uri); const workspaceRootPath = (workspaceFolder && typeof workspaceFolder.uri.fsPath === 'string') ? workspaceFolder.uri.fsPath : undefined; const relativeFileName = typeof workspaceRootPath === 'string' ? path.relative(workspaceRootPath, document.fileName) : document.fileName; const settings = this.configurationService.getSettings(document.uri); if (document.languageId !== PythonLanguage.language) { - return; + return this.diagnosticCollection; } if (!this.linterManager.isLintingEnabled(document.uri)) { this.diagnosticCollection.set(document.uri, []); } - const ignoreMinmatches = settings.linting.ignorePatterns.map(pattern => { - return new Minimatch(pattern); - }); + const ignoreMinmatches = settings.linting.ignorePatterns.map(pattern => new Minimatch(pattern)); if (ignoreMinmatches.some(matcher => matcher.match(document.fileName) || matcher.match(relativeFileName))) { - return; + return this.diagnosticCollection; } if (this.pendingLintings.has(document.uri.fsPath)) { @@ -122,17 +121,17 @@ export class LintingEngine implements ILintingEngine { (m.code === LinterErrors.pylint.InvalidSyntax || m.code === LinterErrors.prospector.InvalidSyntax || m.code === LinterErrors.flake8.InvalidSyntax)) { - return; + continue; } diagnostics.push(this.createDiagnostics(m, document)); } - // Limit the number of messages to the max value. diagnostics = diagnostics.filter((value, index) => index <= settings.linting.maxNumberOfProblems); } } // Set all diagnostics found in this pass, as this method always clears existing diagnostics. this.diagnosticCollection.set(document.uri, diagnostics); + return this.diagnosticCollection; } // tslint:disable-next-line:no-any diff --git a/src/client/linters/types.ts b/src/client/linters/types.ts index 21a63419ab39..407635b55174 100644 --- a/src/client/linters/types.ts +++ b/src/client/linters/types.ts @@ -61,8 +61,8 @@ export enum LintMessageSeverity { export const ILintingEngine = Symbol('ILintingEngine'); export interface ILintingEngine { - lintOpenPythonFiles(): void; - lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise; + lintOpenPythonFiles(): Promise; + lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise; // tslint:disable-next-line:no-any linkJupiterExtension(jupiter: vscode.Extension | undefined): Promise; } diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index 906bdb2eef96..567ac29f0293 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -8,7 +8,7 @@ import { Product } from '../../client/common/installer/productInstaller'; import { IConfigurationService, IOutputChannel } from '../../client/common/types'; import { LinterManager } from '../../client/linters/linterManager'; import { ILinterManager, ILintMessage, LintMessageSeverity } from '../../client/linters/types'; -import { deleteFile, PythonSettingKeys, rootWorkspaceUri } from '../common'; +import { deleteFile, PythonSettingKeys, rootWorkspaceUri, sleep } from '../common'; import { closeActiveWindows, initialize, initializeTest, IS_MULTI_ROOT_TEST } from '../initialize'; import { MockOutputChannel } from '../mockClasses'; import { UnitTestIocContainer } from '../unittests/serviceRegistry'; @@ -250,4 +250,29 @@ suite('Linting', () => { await configService.updateSettingAsync('linting.pylintUseMinimalCheckers', false, workspaceUri); await testEnablingDisablingOfLinter(Product.pylint, true, file); }); + test('Multiple linters', async () => { + await linterManager.setActiveLintersAsync([Product.pylint, Product.flake8]); + + const document = await vscode.workspace.openTextDocument(path.join(pythoFilesPath, 'print.py')); + const collection = await vscode.commands.executeCommand('python.runLinting') as vscode.DiagnosticCollection; + assert.notEqual(collection, undefined, 'python.runLinting did not return valid diagnostics collection.'); + + const ready = await waitForCondition(() => collection!.has(document.uri) && collection!.get(document.uri)!.length >= 3); + assert.equal(ready, true, 'Timeout expired but linting results are not available still.'); + + const messages = collection!.get(document.uri); + assert.notEqual(messages!.filter(x => x.source === 'pylint').length, 0, 'No pylint message.'); + assert.notEqual(messages!.filter(x => x.source === 'flake8').length, 0, 'No flake8 message.'); + }); + + async function waitForCondition(predicate: () => boolean, interval = 1000, maxAttempts = 10): Promise { + return new Promise(async (resolve) => { + let retries = 0; + while (!predicate() && retries < maxAttempts) { + await sleep(1000); + retries += 1; + } + resolve(retries < maxAttempts); + }); + } }); diff --git a/src/test/pythonFiles/linting/print.py b/src/test/pythonFiles/linting/print.py new file mode 100644 index 000000000000..fca61311fc84 --- /dev/null +++ b/src/test/pythonFiles/linting/print.py @@ -0,0 +1 @@ +print x \ No newline at end of file From eea64def9c0e02528e70778d7cc53b1af244b29a Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 28 Feb 2018 15:45:24 -0800 Subject: [PATCH 069/103] Fix 916: PyLint checks wrong files --- src/client/linters/lintingEngine.ts | 43 ++++++++---- src/client/providers/linterProvider.ts | 26 +++----- src/test/linters/lint.test.ts | 4 +- src/test/linters/lintengine.test.ts | 90 +++++++++++++++++++++----- 4 files changed, 113 insertions(+), 50 deletions(-) diff --git a/src/client/linters/lintingEngine.ts b/src/client/linters/lintingEngine.ts index fed2b844c978..fad029d366ba 100644 --- a/src/client/linters/lintingEngine.ts +++ b/src/client/linters/lintingEngine.ts @@ -7,6 +7,7 @@ import * as path from 'path'; import * as vscode from 'vscode'; import { IDocumentManager, IWorkspaceService } from '../common/application/types'; import { LinterErrors, PythonLanguage, STANDARD_OUTPUT_CHANNEL } from '../common/constants'; +import { IFileSystem } from '../common/platform/types'; import { IConfigurationService, IOutputChannel } from '../common/types'; import { IServiceContainer } from '../ioc/types'; import { JupyterProvider } from '../jupyter/provider'; @@ -40,6 +41,7 @@ export class LintingEngine implements ILintingEngine { private diagnosticCollection: vscode.DiagnosticCollection; private pendingLintings = new Map(); private outputChannel: vscode.OutputChannel; + private fileSystem: IFileSystem; constructor(@inject(IServiceContainer) private serviceContainer: IServiceContainer) { this.documentHasJupyterCodeCells = (a, b) => Promise.resolve(false); @@ -48,6 +50,7 @@ export class LintingEngine implements ILintingEngine { this.configurationService = serviceContainer.get(IConfigurationService); this.outputChannel = serviceContainer.get(IOutputChannel, STANDARD_OUTPUT_CHANNEL); this.linterManager = serviceContainer.get(ILinterManager); + this.fileSystem = serviceContainer.get(IFileSystem); this.diagnosticCollection = vscode.languages.createDiagnosticCollection('python'); } @@ -62,19 +65,7 @@ export class LintingEngine implements ILintingEngine { public async lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise { // Check if we need to lint this document - const workspaceFolder = this.workspace.getWorkspaceFolder(document.uri); - const workspaceRootPath = (workspaceFolder && typeof workspaceFolder.uri.fsPath === 'string') ? workspaceFolder.uri.fsPath : undefined; - const relativeFileName = typeof workspaceRootPath === 'string' ? path.relative(workspaceRootPath, document.fileName) : document.fileName; - const settings = this.configurationService.getSettings(document.uri); - if (document.languageId !== PythonLanguage.language) { - return this.diagnosticCollection; - } - if (!this.linterManager.isLintingEnabled(document.uri)) { - this.diagnosticCollection.set(document.uri, []); - } - - const ignoreMinmatches = settings.linting.ignorePatterns.map(pattern => new Minimatch(pattern)); - if (ignoreMinmatches.some(matcher => matcher.match(document.fileName) || matcher.match(relativeFileName))) { + if (!await this.shouldLintDocument(document)) { return this.diagnosticCollection; } @@ -106,6 +97,7 @@ export class LintingEngine implements ILintingEngine { // linters will resolve asynchronously - keep a track of all // diagnostics reported as them come in. let diagnostics: vscode.Diagnostic[] = []; + const settings = this.configurationService.getSettings(document.uri); for (const p of promises) { const msgs = await p; @@ -173,4 +165,29 @@ export class LintingEngine implements ILintingEngine { diagnostic.source = message.provider; return diagnostic; } + + private async shouldLintDocument(document: vscode.TextDocument): Promise { + if (!this.linterManager.isLintingEnabled(document.uri)) { + this.diagnosticCollection.set(document.uri, []); + return false; + } + + if (document.languageId !== PYTHON.language) { + return false; + } + + const workspaceFolder = this.workspace.getWorkspaceFolder(document.uri); + const workspaceRootPath = (workspaceFolder && typeof workspaceFolder.uri.fsPath === 'string') ? workspaceFolder.uri.fsPath : undefined; + const relativeFileName = typeof workspaceRootPath === 'string' ? path.relative(workspaceRootPath, document.fileName) : document.fileName; + + const settings = this.configurationService.getSettings(document.uri); + const ignoreMinmatches = settings.linting.ignorePatterns.map(pattern => new Minimatch(pattern)); + if (ignoreMinmatches.some(matcher => matcher.match(document.fileName) || matcher.match(relativeFileName))) { + return false; + } + if (document.uri.scheme !== 'file' || !document.uri.fsPath) { + return false; + } + return await this.fileSystem.fileExistsAsync(document.uri.fsPath); + } } diff --git a/src/client/providers/linterProvider.ts b/src/client/providers/linterProvider.ts index fd44b9b457c8..45356c70f8df 100644 --- a/src/client/providers/linterProvider.ts +++ b/src/client/providers/linterProvider.ts @@ -12,8 +12,6 @@ import { IInterpreterService } from '../interpreter/contracts'; import { IServiceContainer } from '../ioc/types'; import { ILinterManager, ILintingEngine } from '../linters/types'; -const uriSchemesToIgnore = ['git', 'showModifications', 'svn']; - export class LinterProvider implements vscode.Disposable { private diagnosticCollection: vscode.DiagnosticCollection; private context: vscode.ExtensionContext; @@ -46,6 +44,10 @@ export class LinterProvider implements vscode.Disposable { this.configMonitor = new ConfigSettingMonitor('linting'); this.configMonitor.on('change', this.lintSettingsChangedHandler.bind(this)); + + // On workspace reopen we don't get `onDocumentOpened` since it is first opened + // and then the extension is activated. So schedule linting pass now. + setTimeout(() => this.engine.lintOpenPythonFiles().ignoreErrors(), 2000); } public get diagnostics(): vscode.DiagnosticCollection { @@ -63,35 +65,23 @@ export class LinterProvider implements vscode.Disposable { private lintSettingsChangedHandler(configTarget: ConfigurationTarget, wkspaceOrFolder: Uri) { if (configTarget === ConfigurationTarget.Workspace) { - this.engine.lintOpenPythonFiles(); + this.engine.lintOpenPythonFiles().ignoreErrors(); return; } // Look for python files that belong to the specified workspace folder. workspace.textDocuments.forEach(async document => { const wkspaceFolder = workspace.getWorkspaceFolder(document.uri); if (wkspaceFolder && wkspaceFolder.uri.fsPath === wkspaceOrFolder.fsPath) { - await this.engine.lintDocument(document, 'auto'); + this.engine.lintDocument(document, 'auto').ignoreErrors(); } }); } - private async onDocumentOpened(document: vscode.TextDocument): Promise { - const settings = this.configuration.getSettings(document.uri); - if (document.languageId !== 'python' || !settings.linting.enabled) { - return; - } - // Exclude files opened by vscode when showing a diff view. - if (uriSchemesToIgnore.indexOf(document.uri.scheme) >= 0) { - return; - } - if (!document.uri.path || - (path.basename(document.uri.path) === document.uri.path && !await this.fs.fileExistsAsync(document.uri.path))) { - return; - } + private onDocumentOpened(document: vscode.TextDocument): void { this.engine.lintDocument(document, 'auto').ignoreErrors(); } - private onDocumentSaved(document: vscode.TextDocument) { + private onDocumentSaved(document: vscode.TextDocument): void { const settings = this.configuration.getSettings(document.uri); if (document.languageId === 'python' && settings.linting.enabled && settings.linting.lintOnSave) { this.engine.lintDocument(document, 'save').ignoreErrors(); diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index 567ac29f0293..306cd86d48fe 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -261,8 +261,8 @@ suite('Linting', () => { assert.equal(ready, true, 'Timeout expired but linting results are not available still.'); const messages = collection!.get(document.uri); - assert.notEqual(messages!.filter(x => x.source === 'pylint').length, 0, 'No pylint message.'); - assert.notEqual(messages!.filter(x => x.source === 'flake8').length, 0, 'No flake8 message.'); + assert.notEqual(messages!.filter(x => x.source === 'pylint').length, 0, 'No pylint messages.'); + assert.notEqual(messages!.filter(x => x.source === 'flake8').length, 0, 'No flake8 messages.'); }); async function waitForCondition(predicate: () => boolean, interval = 1000, maxAttempts = 10): Promise { diff --git a/src/test/linters/lintengine.test.ts b/src/test/linters/lintengine.test.ts index 50b24f07700f..6534b4c40235 100644 --- a/src/test/linters/lintengine.test.ts +++ b/src/test/linters/lintengine.test.ts @@ -6,6 +6,7 @@ import { OutputChannel, TextDocument, Uri } from 'vscode'; import { IDocumentManager, IWorkspaceService } from '../../client/common/application/types'; import { PythonLanguage, STANDARD_OUTPUT_CHANNEL } from '../../client/common/constants'; import '../../client/common/extensions'; +import { IFileSystem } from '../../client/common/platform/types'; import { IConfigurationService, ILintingSettings, IOutputChannel, IPythonSettings } from '../../client/common/types'; import { IServiceContainer } from '../../client/ioc/types'; import { LintingEngine } from '../../client/linters/lintingEngine'; @@ -14,12 +15,16 @@ import { initialize } from '../initialize'; // tslint:disable-next-line:max-func-body-length suite('Linting - LintingEngine', () => { - let lintingEnging: ILintingEngine; - let document: TextDocument; + let serviceContainer: TypeMoq.IMock; let lintManager: TypeMoq.IMock; + let settings: TypeMoq.IMock; + let lintSettings: TypeMoq.IMock; + let fileSystem: TypeMoq.IMock; + let lintingEngine: ILintingEngine; + suiteSetup(initialize); setup(async () => { - const serviceContainer = TypeMoq.Mock.ofType(); + serviceContainer = TypeMoq.Mock.ofType(); const docManager = TypeMoq.Mock.ofType(); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IDocumentManager), TypeMoq.It.isAny())).returns(() => docManager.object); @@ -27,10 +32,12 @@ suite('Linting - LintingEngine', () => { const workspaceService = TypeMoq.Mock.ofType(); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IWorkspaceService), TypeMoq.It.isAny())).returns(() => workspaceService.object); - const lintSettings = TypeMoq.Mock.ofType(); - lintSettings.setup(l => l.ignorePatterns).returns(() => []); - const settings = TypeMoq.Mock.ofType(); - settings.setup(x => x.linting).returns(() => lintSettings.object); + fileSystem = TypeMoq.Mock.ofType(); + serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IFileSystem), TypeMoq.It.isAny())).returns(() => fileSystem.object); + + lintSettings = TypeMoq.Mock.ofType(); + settings = TypeMoq.Mock.ofType(); + const configService = TypeMoq.Mock.ofType(); configService.setup(x => x.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService), TypeMoq.It.isAny())).returns(() => configService.object); @@ -39,28 +46,77 @@ suite('Linting - LintingEngine', () => { serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IOutputChannel), TypeMoq.It.isValue(STANDARD_OUTPUT_CHANNEL))).returns(() => outputChannel.object); lintManager = TypeMoq.Mock.ofType(); + lintManager.setup(x => x.isLintingEnabled(TypeMoq.It.isAny())).returns(() => true); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(ILinterManager), TypeMoq.It.isAny())).returns(() => lintManager.object); - const mockDocument = TypeMoq.Mock.ofType(); - mockDocument.setup(d => d.uri).returns(() => Uri.file('a.py')); - mockDocument.setup(d => d.languageId).returns(() => PythonLanguage.language); - document = mockDocument.object; - - lintingEnging = new LintingEngine(serviceContainer.object); + lintingEngine = new LintingEngine(serviceContainer.object); }); test('Ensure document.uri is passed into isLintingEnabled', () => { + const doc = mockTextDocument('a.py', PythonLanguage.language, true); try { - lintingEnging.lintDocument(document, 'auto').ignoreErrors(); + lintingEngine.lintDocument(doc, 'auto').ignoreErrors(); } catch { - lintManager.verify(l => l.isLintingEnabled(TypeMoq.It.isValue(document.uri)), TypeMoq.Times.once()); + lintManager.verify(l => l.isLintingEnabled(TypeMoq.It.isValue(doc.uri)), TypeMoq.Times.once()); } }); test('Ensure document.uri is passed into createLinter', () => { + const doc = mockTextDocument('a.py', PythonLanguage.language, true); try { - lintingEnging.lintDocument(document, 'auto').ignoreErrors(); + lintingEngine.lintDocument(doc, 'auto').ignoreErrors(); } catch { - lintManager.verify(l => l.createLinter(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isValue(document.uri)), TypeMoq.Times.atLeastOnce()); + lintManager.verify(l => l.createLinter(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isValue(doc.uri)), TypeMoq.Times.atLeastOnce()); } }); + + test('Verify files that match ignore pattern are not linted', async () => { + const doc = mockTextDocument('a1.py', PythonLanguage.language, true, ['a*.py']); + await lintingEngine.lintDocument(doc, 'auto'); + lintManager.verify(l => l.createLinter(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny()), TypeMoq.Times.never()); + }); + + test('Ensure non-Python files are not linted', async () => { + const doc = mockTextDocument('a.ts', 'typescript', true); + await lintingEngine.lintDocument(doc, 'auto'); + lintManager.verify(l => l.createLinter(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny()), TypeMoq.Times.never()); + }); + + test('Ensure files with git scheme are not linted', async () => { + const doc = mockTextDocument('a1.py', PythonLanguage.language, false, [], 'git'); + await lintingEngine.lintDocument(doc, 'auto'); + lintManager.verify(l => l.createLinter(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny()), TypeMoq.Times.never()); + }); + test('Ensure files with showModifications scheme are not linted', async () => { + const doc = mockTextDocument('a1.py', PythonLanguage.language, false, [], 'showModifications'); + await lintingEngine.lintDocument(doc, 'auto'); + lintManager.verify(l => l.createLinter(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny()), TypeMoq.Times.never()); + }); + test('Ensure files with svn scheme are not linted', async () => { + const doc = mockTextDocument('a1.py', PythonLanguage.language, false, [], 'svn'); + await lintingEngine.lintDocument(doc, 'auto'); + lintManager.verify(l => l.createLinter(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny()), TypeMoq.Times.never()); + }); + + test('Ensure non-existing files are not linted', async () => { + const doc = mockTextDocument('file.py', PythonLanguage.language, false, []); + await lintingEngine.lintDocument(doc, 'auto'); + lintManager.verify(l => l.createLinter(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny()), TypeMoq.Times.never()); + }); + + function mockTextDocument(fileName: string, language: string, exists: boolean, ignorePattern: string[] = [], scheme?: string): TextDocument { + fileSystem.setup(x => x.fileExistsAsync(TypeMoq.It.isAnyString())).returns(() => Promise.resolve(exists)); + + lintSettings.setup(l => l.ignorePatterns).returns(() => ignorePattern); + settings.setup(x => x.linting).returns(() => lintSettings.object); + + const doc = TypeMoq.Mock.ofType(); + if (scheme) { + doc.setup(d => d.uri).returns(() => Uri.parse(`${scheme}:${fileName}`)); + } else { + doc.setup(d => d.uri).returns(() => Uri.file(fileName)); + } + doc.setup(d => d.fileName).returns(() => fileName); + doc.setup(d => d.languageId).returns(() => language); + return doc.object; + } }); From 5a0a553309a505bab7cad98ec7224621c1e6d587 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 28 Feb 2018 16:07:17 -0800 Subject: [PATCH 070/103] Test stability --- src/client/providers/linterProvider.ts | 5 ++++- src/test/linters/lintengine.test.ts | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client/providers/linterProvider.ts b/src/client/providers/linterProvider.ts index 45356c70f8df..89a3222ac3e5 100644 --- a/src/client/providers/linterProvider.ts +++ b/src/client/providers/linterProvider.ts @@ -6,6 +6,7 @@ import * as vscode from 'vscode'; import { ConfigurationTarget, Uri, workspace } from 'vscode'; import { IDocumentManager } from '../common/application/types'; import { ConfigSettingMonitor } from '../common/configSettingMonitor'; +import { isTestExecution } from '../common/constants'; import { IFileSystem } from '../common/platform/types'; import { IConfigurationService } from '../common/types'; import { IInterpreterService } from '../interpreter/contracts'; @@ -47,7 +48,9 @@ export class LinterProvider implements vscode.Disposable { // On workspace reopen we don't get `onDocumentOpened` since it is first opened // and then the extension is activated. So schedule linting pass now. - setTimeout(() => this.engine.lintOpenPythonFiles().ignoreErrors(), 2000); + if (!isTestExecution) { + setTimeout(() => this.engine.lintOpenPythonFiles().ignoreErrors(), 2000); + } } public get diagnostics(): vscode.DiagnosticCollection { diff --git a/src/test/linters/lintengine.test.ts b/src/test/linters/lintengine.test.ts index 6534b4c40235..44520df46cdc 100644 --- a/src/test/linters/lintengine.test.ts +++ b/src/test/linters/lintengine.test.ts @@ -40,6 +40,7 @@ suite('Linting - LintingEngine', () => { const configService = TypeMoq.Mock.ofType(); configService.setup(x => x.getSettings(TypeMoq.It.isAny())).returns(() => settings.object); + configService.setup(x => x.isTestExecution()).returns(() => true); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IConfigurationService), TypeMoq.It.isAny())).returns(() => configService.object); const outputChannel = TypeMoq.Mock.ofType(); @@ -50,6 +51,7 @@ suite('Linting - LintingEngine', () => { serviceContainer.setup(c => c.get(TypeMoq.It.isValue(ILinterManager), TypeMoq.It.isAny())).returns(() => lintManager.object); lintingEngine = new LintingEngine(serviceContainer.object); + serviceContainer.setup(c => c.get(TypeMoq.It.isValue(ILintingEngine), TypeMoq.It.isAny())).returns(() => lintingEngine); }); test('Ensure document.uri is passed into isLintingEnabled', () => { From 2c635ba20c3be2c798999c66f61dab853b073ce6 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Wed, 28 Feb 2018 22:19:39 -0800 Subject: [PATCH 071/103] Try increase timeout --- src/test/linters/lint.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index 306cd86d48fe..595f5acb7775 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -265,7 +265,7 @@ suite('Linting', () => { assert.notEqual(messages!.filter(x => x.source === 'flake8').length, 0, 'No flake8 messages.'); }); - async function waitForCondition(predicate: () => boolean, interval = 1000, maxAttempts = 10): Promise { + async function waitForCondition(predicate: () => boolean, interval = 1000, maxAttempts = 30): Promise { return new Promise(async (resolve) => { let retries = 0; while (!predicate() && retries < maxAttempts) { From f37c27c5f6037cd719831d07247a58d50df38257 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 1 Mar 2018 09:41:34 -0800 Subject: [PATCH 072/103] Make sure linting is enabled in tests --- src/test/linters/lint.test.ts | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index 595f5acb7775..7ae5c7e019bc 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -99,7 +99,7 @@ suite('Linting', () => { suiteSetup(initialize); setup(async () => { - initializeDI(); + await initializeDI(); await initializeTest(); await resetSettings(); }); @@ -112,7 +112,7 @@ suite('Linting', () => { await deleteFile(path.join(workspaceUri.fsPath, '.pydocstyle')); }); - function initializeDI() { + async function initializeDI() { ioc = new UnitTestIocContainer(); ioc.registerCommonTypes(false); ioc.registerProcessTypes(); @@ -122,6 +122,7 @@ suite('Linting', () => { linterManager = new LinterManager(ioc.serviceContainer); configService = ioc.serviceContainer.get(IConfigurationService); + await linterManager.enableLintingAsync(true); } async function resetSettings() { @@ -265,7 +266,7 @@ suite('Linting', () => { assert.notEqual(messages!.filter(x => x.source === 'flake8').length, 0, 'No flake8 messages.'); }); - async function waitForCondition(predicate: () => boolean, interval = 1000, maxAttempts = 30): Promise { + async function waitForCondition(predicate: () => boolean, interval = 1000, maxAttempts = 15): Promise { return new Promise(async (resolve) => { let retries = 0; while (!predicate() && retries < maxAttempts) { From 8fd2d14b39b7b922865832761a4c57e6372e639f Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 1 Mar 2018 10:29:59 -0800 Subject: [PATCH 073/103] Try another way of waiting --- src/test/linters/lint.test.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index 7ae5c7e019bc..b899fd038785 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -99,7 +99,7 @@ suite('Linting', () => { suiteSetup(initialize); setup(async () => { - await initializeDI(); + initializeDI(); await initializeTest(); await resetSettings(); }); @@ -112,7 +112,7 @@ suite('Linting', () => { await deleteFile(path.join(workspaceUri.fsPath, '.pydocstyle')); }); - async function initializeDI() { + function initializeDI() { ioc = new UnitTestIocContainer(); ioc.registerCommonTypes(false); ioc.registerProcessTypes(); @@ -122,7 +122,6 @@ suite('Linting', () => { linterManager = new LinterManager(ioc.serviceContainer); configService = ioc.serviceContainer.get(IConfigurationService); - await linterManager.enableLintingAsync(true); } async function resetSettings() { @@ -253,20 +252,21 @@ suite('Linting', () => { }); test('Multiple linters', async () => { await linterManager.setActiveLintersAsync([Product.pylint, Product.flake8]); + await linterManager.enableLintingAsync(true); const document = await vscode.workspace.openTextDocument(path.join(pythoFilesPath, 'print.py')); const collection = await vscode.commands.executeCommand('python.runLinting') as vscode.DiagnosticCollection; assert.notEqual(collection, undefined, 'python.runLinting did not return valid diagnostics collection.'); - const ready = await waitForCondition(() => collection!.has(document.uri) && collection!.get(document.uri)!.length >= 3); - assert.equal(ready, true, 'Timeout expired but linting results are not available still.'); + await waitForCondition(() => collection!.has(document.uri) && collection!.get(document.uri)!.length >= 3); const messages = collection!.get(document.uri); + assert.notEqual(messages!.length, 0, 'No diagnostic messages.'); assert.notEqual(messages!.filter(x => x.source === 'pylint').length, 0, 'No pylint messages.'); assert.notEqual(messages!.filter(x => x.source === 'flake8').length, 0, 'No flake8 messages.'); }); - async function waitForCondition(predicate: () => boolean, interval = 1000, maxAttempts = 15): Promise { + async function waitForCondition(predicate: () => boolean, interval = 1000, maxAttempts = 30): Promise { return new Promise(async (resolve) => { let retries = 0; while (!predicate() && retries < maxAttempts) { From 6ac00d8636f784c163358aec253fb739661f2eab Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 1 Mar 2018 12:46:10 -0800 Subject: [PATCH 074/103] Simplify --- src/client/linters/lintingEngine.ts | 17 ++++++++--------- src/client/linters/types.ts | 2 +- src/test/linters/lint.test.ts | 16 +--------------- 3 files changed, 10 insertions(+), 25 deletions(-) diff --git a/src/client/linters/lintingEngine.ts b/src/client/linters/lintingEngine.ts index fad029d366ba..e5c59eaa693a 100644 --- a/src/client/linters/lintingEngine.ts +++ b/src/client/linters/lintingEngine.ts @@ -6,7 +6,7 @@ import { Minimatch } from 'minimatch'; import * as path from 'path'; import * as vscode from 'vscode'; import { IDocumentManager, IWorkspaceService } from '../common/application/types'; -import { LinterErrors, PythonLanguage, STANDARD_OUTPUT_CHANNEL } from '../common/constants'; +import { LinterErrors, STANDARD_OUTPUT_CHANNEL } from '../common/constants'; import { IFileSystem } from '../common/platform/types'; import { IConfigurationService, IOutputChannel } from '../common/types'; import { IServiceContainer } from '../ioc/types'; @@ -55,18 +55,18 @@ export class LintingEngine implements ILintingEngine { } public async lintOpenPythonFiles(): Promise { - this.documents.textDocuments.forEach(document => { - if (document.languageId === PythonLanguage.language) { - this.lintDocument(document, 'auto').ignoreErrors(); - } - }); + this.diagnosticCollection.clear(); + const promises = this.documents.textDocuments.map(async document => await this.lintDocument(document, 'auto')); + await Promise.all(promises); return this.diagnosticCollection; } - public async lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise { + public async lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise { + this.diagnosticCollection.set(document.uri, []); + // Check if we need to lint this document if (!await this.shouldLintDocument(document)) { - return this.diagnosticCollection; + return; } if (this.pendingLintings.has(document.uri.fsPath)) { @@ -123,7 +123,6 @@ export class LintingEngine implements ILintingEngine { } // Set all diagnostics found in this pass, as this method always clears existing diagnostics. this.diagnosticCollection.set(document.uri, diagnostics); - return this.diagnosticCollection; } // tslint:disable-next-line:no-any diff --git a/src/client/linters/types.ts b/src/client/linters/types.ts index 407635b55174..6c0ca9a44b6e 100644 --- a/src/client/linters/types.ts +++ b/src/client/linters/types.ts @@ -62,7 +62,7 @@ export enum LintMessageSeverity { export const ILintingEngine = Symbol('ILintingEngine'); export interface ILintingEngine { lintOpenPythonFiles(): Promise; - lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise; + lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise; // tslint:disable-next-line:no-any linkJupiterExtension(jupiter: vscode.Extension | undefined): Promise; } diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index b899fd038785..d27be93cdbcc 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -8,7 +8,7 @@ import { Product } from '../../client/common/installer/productInstaller'; import { IConfigurationService, IOutputChannel } from '../../client/common/types'; import { LinterManager } from '../../client/linters/linterManager'; import { ILinterManager, ILintMessage, LintMessageSeverity } from '../../client/linters/types'; -import { deleteFile, PythonSettingKeys, rootWorkspaceUri, sleep } from '../common'; +import { deleteFile, PythonSettingKeys, rootWorkspaceUri } from '../common'; import { closeActiveWindows, initialize, initializeTest, IS_MULTI_ROOT_TEST } from '../initialize'; import { MockOutputChannel } from '../mockClasses'; import { UnitTestIocContainer } from '../unittests/serviceRegistry'; @@ -252,28 +252,14 @@ suite('Linting', () => { }); test('Multiple linters', async () => { await linterManager.setActiveLintersAsync([Product.pylint, Product.flake8]); - await linterManager.enableLintingAsync(true); const document = await vscode.workspace.openTextDocument(path.join(pythoFilesPath, 'print.py')); const collection = await vscode.commands.executeCommand('python.runLinting') as vscode.DiagnosticCollection; assert.notEqual(collection, undefined, 'python.runLinting did not return valid diagnostics collection.'); - await waitForCondition(() => collection!.has(document.uri) && collection!.get(document.uri)!.length >= 3); - const messages = collection!.get(document.uri); assert.notEqual(messages!.length, 0, 'No diagnostic messages.'); assert.notEqual(messages!.filter(x => x.source === 'pylint').length, 0, 'No pylint messages.'); assert.notEqual(messages!.filter(x => x.source === 'flake8').length, 0, 'No flake8 messages.'); }); - - async function waitForCondition(predicate: () => boolean, interval = 1000, maxAttempts = 30): Promise { - return new Promise(async (resolve) => { - let retries = 0; - while (!predicate() && retries < maxAttempts) { - await sleep(1000); - retries += 1; - } - resolve(retries < maxAttempts); - }); - } }); From a9c2708a736d4fc7141f09cf75bf2acc8adf7284 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 1 Mar 2018 13:46:32 -0800 Subject: [PATCH 075/103] Fix clear diags on close tests --- src/client/linters/lintingEngine.ts | 10 ++++++++++ src/client/linters/types.ts | 2 ++ src/client/providers/linterProvider.ts | 14 ++++---------- src/test/linters/lint.provider.test.ts | 10 +++------- src/test/linters/lint.test.ts | 6 ++++-- 5 files changed, 23 insertions(+), 19 deletions(-) diff --git a/src/client/linters/lintingEngine.ts b/src/client/linters/lintingEngine.ts index e5c59eaa693a..650df50fa913 100644 --- a/src/client/linters/lintingEngine.ts +++ b/src/client/linters/lintingEngine.ts @@ -54,6 +54,16 @@ export class LintingEngine implements ILintingEngine { this.diagnosticCollection = vscode.languages.createDiagnosticCollection('python'); } + public get diagnostics(): vscode.DiagnosticCollection { + return this.diagnosticCollection; + } + + public clearDiagnostics(document: vscode.TextDocument): void { + if (this.diagnosticCollection.has(document.uri)) { + this.diagnosticCollection.delete(document.uri); + } + } + public async lintOpenPythonFiles(): Promise { this.diagnosticCollection.clear(); const promises = this.documents.textDocuments.map(async document => await this.lintDocument(document, 'auto')); diff --git a/src/client/linters/types.ts b/src/client/linters/types.ts index 6c0ca9a44b6e..bbd89558fa47 100644 --- a/src/client/linters/types.ts +++ b/src/client/linters/types.ts @@ -61,8 +61,10 @@ export enum LintMessageSeverity { export const ILintingEngine = Symbol('ILintingEngine'); export interface ILintingEngine { + readonly diagnostics: vscode.DiagnosticCollection; lintOpenPythonFiles(): Promise; lintDocument(document: vscode.TextDocument, trigger: LinterTrigger): Promise; // tslint:disable-next-line:no-any linkJupiterExtension(jupiter: vscode.Extension | undefined): Promise; + clearDiagnostics(document: vscode.TextDocument): void; } diff --git a/src/client/providers/linterProvider.ts b/src/client/providers/linterProvider.ts index 89a3222ac3e5..fb66aab3971b 100644 --- a/src/client/providers/linterProvider.ts +++ b/src/client/providers/linterProvider.ts @@ -14,7 +14,6 @@ import { IServiceContainer } from '../ioc/types'; import { ILinterManager, ILintingEngine } from '../linters/types'; export class LinterProvider implements vscode.Disposable { - private diagnosticCollection: vscode.DiagnosticCollection; private context: vscode.ExtensionContext; private disposables: vscode.Disposable[]; private configMonitor: ConfigSettingMonitor; @@ -36,7 +35,6 @@ export class LinterProvider implements vscode.Disposable { this.documents = serviceContainer.get(IDocumentManager); this.configuration = serviceContainer.get(IConfigurationService); - this.diagnosticCollection = vscode.languages.createDiagnosticCollection('python'); this.disposables.push(this.interpreterService.onDidChangeInterpreter(() => this.engine.lintOpenPythonFiles())); this.documents.onDidOpenTextDocument(e => this.onDocumentOpened(e), this.context.subscriptions); @@ -48,15 +46,11 @@ export class LinterProvider implements vscode.Disposable { // On workspace reopen we don't get `onDocumentOpened` since it is first opened // and then the extension is activated. So schedule linting pass now. - if (!isTestExecution) { - setTimeout(() => this.engine.lintOpenPythonFiles().ignoreErrors(), 2000); + if (!isTestExecution()) { + setTimeout(() => this.engine.lintOpenPythonFiles().ignoreErrors(), 1200); } } - public get diagnostics(): vscode.DiagnosticCollection { - return this.diagnosticCollection; - } - public dispose() { this.disposables.forEach(d => d.dispose()); this.configMonitor.dispose(); @@ -104,8 +98,8 @@ export class LinterProvider implements vscode.Disposable { return; } // Check if this document is still open as a duplicate editor. - if (!this.isDocumentOpen(document.uri) && this.diagnosticCollection.has(document.uri)) { - this.diagnosticCollection.set(document.uri, []); + if (!this.isDocumentOpen(document.uri)) { + this.engine.clearDiagnostics(document); } } } diff --git a/src/test/linters/lint.provider.test.ts b/src/test/linters/lint.provider.test.ts index 53b0b56cedfa..023ee86223be 100644 --- a/src/test/linters/lint.provider.test.ts +++ b/src/test/linters/lint.provider.test.ts @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -import { expect } from 'chai'; import { Container } from 'inversify'; import * as TypeMoq from 'typemoq'; import * as vscode from 'vscode'; @@ -150,14 +149,11 @@ suite('Linting - Provider', () => { document.setup(x => x.isClosed).returns(() => closed); docManager.setup(x => x.textDocuments).returns(() => closed ? [] : [document.object]); - + // tslint:disable-next-line:prefer-const no-unused-variable const provider = new LinterProvider(context.object, serviceContainer); - const diags: vscode.Diagnostic[] = []; - diags.push(new vscode.Diagnostic(new vscode.Range(new vscode.Position(0, 0), new vscode.Position(0, 1)), 'error')); - provider.diagnostics.set(uri, diags); emitter.fire(document.object); - const d = provider.diagnostics.get(uri); - expect(d).to.be.lengthOf(closed ? 0 : 1, 'Diagnostic collection not of expected length after file close.'); + const timesExpected = closed ? TypeMoq.Times.once() : TypeMoq.Times.never(); + engine.verify(x => x.clearDiagnostics(TypeMoq.It.isAny()), timesExpected); } }); diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index d27be93cdbcc..aa8bba673224 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -251,9 +251,11 @@ suite('Linting', () => { await testEnablingDisablingOfLinter(Product.pylint, true, file); }); test('Multiple linters', async () => { - await linterManager.setActiveLintersAsync([Product.pylint, Product.flake8]); - + await closeActiveWindows(); const document = await vscode.workspace.openTextDocument(path.join(pythoFilesPath, 'print.py')); + await vscode.window.showTextDocument(document); + + await linterManager.setActiveLintersAsync([Product.pylint, Product.flake8]); const collection = await vscode.commands.executeCommand('python.runLinting') as vscode.DiagnosticCollection; assert.notEqual(collection, undefined, 'python.runLinting did not return valid diagnostics collection.'); From 5fbc7035d60fa4f23e66d383a6ff9b621ea243ba Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 1 Mar 2018 15:04:14 -0800 Subject: [PATCH 076/103] Try writing settings directly --- src/test/linters/lint.test.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index aa8bba673224..950ab8263e15 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -255,7 +255,11 @@ suite('Linting', () => { const document = await vscode.workspace.openTextDocument(path.join(pythoFilesPath, 'print.py')); await vscode.window.showTextDocument(document); - await linterManager.setActiveLintersAsync([Product.pylint, Product.flake8]); + await configService.updateSettingAsync('linting.enabled', true, workspaceUri); + await configService.updateSettingAsync('linting.pylintUseMinimalCheckers', false, workspaceUri); + await configService.updateSettingAsync('linting.pylintEnabled', true, workspaceUri); + await configService.updateSettingAsync('linting.flake8Enabled', true, workspaceUri); + const collection = await vscode.commands.executeCommand('python.runLinting') as vscode.DiagnosticCollection; assert.notEqual(collection, undefined, 'python.runLinting did not return valid diagnostics collection.'); From 46090c6fefc0d805d3ef2f6a73a3f9ed3d5ca5f3 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Thu, 1 Mar 2018 22:03:47 -0800 Subject: [PATCH 077/103] Increase timeout --- src/test/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/index.ts b/src/test/index.ts index 3717f37fd021..ff6b16c02900 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -21,7 +21,7 @@ const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : undefined; const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, - timeout: 25000, + timeout: 35000, retries: 3, grep }; From c54b0eef4f2ebf2f60d6a0faef3157aa639abed6 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 2 Mar 2018 09:31:50 -0800 Subject: [PATCH 078/103] Measure test time --- src/test/index.ts | 2 +- src/test/linters/lint.test.ts | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/test/index.ts b/src/test/index.ts index ff6b16c02900..3717f37fd021 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -21,7 +21,7 @@ const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : undefined; const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, - timeout: 35000, + timeout: 25000, retries: 3, grep }; diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index 950ab8263e15..307a850cd61a 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -8,6 +8,7 @@ import { Product } from '../../client/common/installer/productInstaller'; import { IConfigurationService, IOutputChannel } from '../../client/common/types'; import { LinterManager } from '../../client/linters/linterManager'; import { ILinterManager, ILintMessage, LintMessageSeverity } from '../../client/linters/types'; +import { StopWatch } from '../../client/telemetry/stopWatch'; import { deleteFile, PythonSettingKeys, rootWorkspaceUri } from '../common'; import { closeActiveWindows, initialize, initializeTest, IS_MULTI_ROOT_TEST } from '../initialize'; import { MockOutputChannel } from '../mockClasses'; @@ -251,19 +252,41 @@ suite('Linting', () => { await testEnablingDisablingOfLinter(Product.pylint, true, file); }); test('Multiple linters', async () => { + // tslint:disable-next-line:no-invalid-this + this.timeout(40000); + + const sw = new StopWatch(); await closeActiveWindows(); + // tslint:disable-next-line:no-console + console.log(`Closed active windows ${sw.elapsedTime}`); const document = await vscode.workspace.openTextDocument(path.join(pythoFilesPath, 'print.py')); + // tslint:disable-next-line:no-console + console.log(`Opened document ${sw.elapsedTime}`); await vscode.window.showTextDocument(document); + // tslint:disable-next-line:no-console + console.log(`Shown document ${sw.elapsedTime}`); await configService.updateSettingAsync('linting.enabled', true, workspaceUri); + // tslint:disable-next-line:no-console + console.log(`Updated linting.enabled ${sw.elapsedTime}`); await configService.updateSettingAsync('linting.pylintUseMinimalCheckers', false, workspaceUri); + // tslint:disable-next-line:no-console + console.log(`Updated pylintUseMinimalCheckers ${sw.elapsedTime}`); await configService.updateSettingAsync('linting.pylintEnabled', true, workspaceUri); + // tslint:disable-next-line:no-console + console.log(`Updated pylintEnabled ${sw.elapsedTime}`); await configService.updateSettingAsync('linting.flake8Enabled', true, workspaceUri); + // tslint:disable-next-line:no-console + console.log(`Updated flake8Enabled ${sw.elapsedTime}`); const collection = await vscode.commands.executeCommand('python.runLinting') as vscode.DiagnosticCollection; + // tslint:disable-next-line:no-console + console.log(`Executed command ${sw.elapsedTime}`); assert.notEqual(collection, undefined, 'python.runLinting did not return valid diagnostics collection.'); const messages = collection!.get(document.uri); + // tslint:disable-next-line:no-console + console.log(`Got messages ${sw.elapsedTime}`); assert.notEqual(messages!.length, 0, 'No diagnostic messages.'); assert.notEqual(messages!.filter(x => x.source === 'pylint').length, 0, 'No pylint messages.'); assert.notEqual(messages!.filter(x => x.source === 'flake8').length, 0, 'No flake8 messages.'); From 895418028528dd5a4ec0f81d27f93d1510e0be94 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 2 Mar 2018 10:07:01 -0800 Subject: [PATCH 079/103] Measure time --- src/test/linters/lint.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index 307a850cd61a..b455ebe160e4 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -251,7 +251,8 @@ suite('Linting', () => { await configService.updateSettingAsync('linting.pylintUseMinimalCheckers', false, workspaceUri); await testEnablingDisablingOfLinter(Product.pylint, true, file); }); - test('Multiple linters', async () => { + // tslint:disable-next-line:no-function-expression + test('Multiple linters', async function () { // tslint:disable-next-line:no-invalid-this this.timeout(40000); From f6707c17c0f664f99186898b048d01a46c0d30fd Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 2 Mar 2018 11:09:59 -0800 Subject: [PATCH 080/103] Simplify --- src/test/linters/lint.test.ts | 31 ++++--------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index b455ebe160e4..4169e1ae2f1a 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -3,12 +3,12 @@ import * as fs from 'fs-extra'; import * as path from 'path'; import { Uri } from 'vscode'; import * as vscode from 'vscode'; +import { ICommandManager } from '../../client/common/application/types'; import { STANDARD_OUTPUT_CHANNEL } from '../../client/common/constants'; import { Product } from '../../client/common/installer/productInstaller'; import { IConfigurationService, IOutputChannel } from '../../client/common/types'; import { LinterManager } from '../../client/linters/linterManager'; import { ILinterManager, ILintMessage, LintMessageSeverity } from '../../client/linters/types'; -import { StopWatch } from '../../client/telemetry/stopWatch'; import { deleteFile, PythonSettingKeys, rootWorkspaceUri } from '../common'; import { closeActiveWindows, initialize, initializeTest, IS_MULTI_ROOT_TEST } from '../initialize'; import { MockOutputChannel } from '../mockClasses'; @@ -251,43 +251,20 @@ suite('Linting', () => { await configService.updateSettingAsync('linting.pylintUseMinimalCheckers', false, workspaceUri); await testEnablingDisablingOfLinter(Product.pylint, true, file); }); - // tslint:disable-next-line:no-function-expression - test('Multiple linters', async function () { - // tslint:disable-next-line:no-invalid-this - this.timeout(40000); - - const sw = new StopWatch(); + test('Multiple linters', async () => { await closeActiveWindows(); - // tslint:disable-next-line:no-console - console.log(`Closed active windows ${sw.elapsedTime}`); const document = await vscode.workspace.openTextDocument(path.join(pythoFilesPath, 'print.py')); - // tslint:disable-next-line:no-console - console.log(`Opened document ${sw.elapsedTime}`); await vscode.window.showTextDocument(document); - // tslint:disable-next-line:no-console - console.log(`Shown document ${sw.elapsedTime}`); - await configService.updateSettingAsync('linting.enabled', true, workspaceUri); - // tslint:disable-next-line:no-console - console.log(`Updated linting.enabled ${sw.elapsedTime}`); await configService.updateSettingAsync('linting.pylintUseMinimalCheckers', false, workspaceUri); - // tslint:disable-next-line:no-console - console.log(`Updated pylintUseMinimalCheckers ${sw.elapsedTime}`); await configService.updateSettingAsync('linting.pylintEnabled', true, workspaceUri); - // tslint:disable-next-line:no-console - console.log(`Updated pylintEnabled ${sw.elapsedTime}`); await configService.updateSettingAsync('linting.flake8Enabled', true, workspaceUri); - // tslint:disable-next-line:no-console - console.log(`Updated flake8Enabled ${sw.elapsedTime}`); - const collection = await vscode.commands.executeCommand('python.runLinting') as vscode.DiagnosticCollection; - // tslint:disable-next-line:no-console - console.log(`Executed command ${sw.elapsedTime}`); + const commands = ioc.serviceContainer.get(ICommandManager); + const collection = await commands.executeCommand('python.runLinting') as vscode.DiagnosticCollection; assert.notEqual(collection, undefined, 'python.runLinting did not return valid diagnostics collection.'); const messages = collection!.get(document.uri); - // tslint:disable-next-line:no-console - console.log(`Got messages ${sw.elapsedTime}`); assert.notEqual(messages!.length, 0, 'No diagnostic messages.'); assert.notEqual(messages!.filter(x => x.source === 'pylint').length, 0, 'No pylint messages.'); assert.notEqual(messages!.filter(x => x.source === 'flake8').length, 0, 'No flake8 messages.'); From 588313bcd14241db03eb774f7a90c984ae7dd606 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 2 Mar 2018 13:08:37 -0800 Subject: [PATCH 081/103] Set timeout --- src/test/linters/lint.test.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index 4169e1ae2f1a..65b160da49a9 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -251,7 +251,11 @@ suite('Linting', () => { await configService.updateSettingAsync('linting.pylintUseMinimalCheckers', false, workspaceUri); await testEnablingDisablingOfLinter(Product.pylint, true, file); }); - test('Multiple linters', async () => { + // tslint:disable-next-line:no-function-expression + test('Multiple linters', async function () { + // tslint:disable-next-line:no-invalid-this + this.timeout(40000); + await closeActiveWindows(); const document = await vscode.workspace.openTextDocument(path.join(pythoFilesPath, 'print.py')); await vscode.window.showTextDocument(document); From 21924a0b190febda00fcb65e6534e22edf799da7 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Mar 2018 12:15:35 -0800 Subject: [PATCH 082/103] Better venv detection --- src/client/interpreter/virtualEnvs/index.ts | 10 ++--- src/client/linters/pylint.ts | 3 +- .../interpreters/virtualEnvManager.test.ts | 42 +++++++++++++++++++ 3 files changed, 49 insertions(+), 6 deletions(-) create mode 100644 src/test/interpreters/virtualEnvManager.test.ts diff --git a/src/client/interpreter/virtualEnvs/index.ts b/src/client/interpreter/virtualEnvs/index.ts index af5545398696..216787a207bc 100644 --- a/src/client/interpreter/virtualEnvs/index.ts +++ b/src/client/interpreter/virtualEnvs/index.ts @@ -14,12 +14,12 @@ export class VirtualEnvironmentManager implements IVirtualEnvironmentManager { } public async getEnvironmentName(pythonPath: string): Promise { // https://stackoverflow.com/questions/1871549/determine-if-python-is-running-inside-virtualenv - const output = await this.processService.exec(pythonPath, ['-c', 'import sys;print(hasattr(sys, "real_prefix"))']); + // hasattr(sys, 'real_prefix') works for virtualenv while + // '(hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix))' works for venv + const code = 'import sys\nif hasattr(sys, "real_prefix"):\n print("virtualenv")\nif hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix:\n print("venv")'; + const output = await this.processService.exec(pythonPath, ['-c', code]); if (output.stdout.length > 0) { - const result = output.stdout.trim(); - if (result === 'True') { - return 'virtualenv'; - } + return output.stdout.trim(); } return ''; } diff --git a/src/client/linters/pylint.ts b/src/client/linters/pylint.ts index b5aaf43c3743..923068362be3 100644 --- a/src/client/linters/pylint.ts +++ b/src/client/linters/pylint.ts @@ -41,7 +41,8 @@ export class Pylint extends BaseLinter { && !await Pylint.hasConfigurationFile(this.fileSystem, this.getWorkspaceRootPath(document), this.platformService)) { minArgs = [ '--disable=all', - '--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,unused-wildcard-import,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode' + '--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,unused-wildcard-import,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode', + '–-disable-msg=E1601' ]; } const args = [ diff --git a/src/test/interpreters/virtualEnvManager.test.ts b/src/test/interpreters/virtualEnvManager.test.ts new file mode 100644 index 000000000000..c8a8816ee255 --- /dev/null +++ b/src/test/interpreters/virtualEnvManager.test.ts @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { expect } from 'chai'; +import { Container } from 'inversify'; +import * as TypeMoq from 'typemoq'; +import { IProcessService } from '../../client/common/process/types'; +import { VirtualEnvironmentManager } from '../../client/interpreter/virtualEnvs'; +import { ServiceContainer } from '../../client/ioc/container'; +import { ServiceManager } from '../../client/ioc/serviceManager'; + +suite('Virtual environment manager', () => { + let serviceManager: ServiceManager; + let serviceContainer: ServiceContainer; + let process: TypeMoq.IMock; + + setup(async () => { + const cont = new Container(); + serviceManager = new ServiceManager(cont); + serviceContainer = new ServiceContainer(cont); + + process = TypeMoq.Mock.ofType(); + serviceManager.addSingletonInstance(IProcessService, process.object); + }); + + test('Plain Python environment suffix', async () => await testSuffix('')); + test('Venv environment suffix', async () => await testSuffix('venv')); + test('Virtualenv Python environment suffix', async () => await testSuffix('virtualenv')); + + async function testSuffix(expectedName: string) { + const venvManager = new VirtualEnvironmentManager(serviceContainer); + process + .setup(x => x.exec('python', TypeMoq.It.isAny())) + .returns(() => Promise.resolve({ + stdout: expectedName, + stderr: '' + })); + + const name = await venvManager.getEnvironmentName('python'); + expect(name).to.be.equal(expectedName, 'Virtual envrironment name suffix is incorrect.'); + } +}); From 22945063c944b0fa7ec85ac6483594aab23dc23c Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Mar 2018 12:20:57 -0800 Subject: [PATCH 083/103] Add test --- .../interpreters/virtualEnvManager.test.ts | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/test/interpreters/virtualEnvManager.test.ts b/src/test/interpreters/virtualEnvManager.test.ts index c8a8816ee255..e9050a469885 100644 --- a/src/test/interpreters/virtualEnvManager.test.ts +++ b/src/test/interpreters/virtualEnvManager.test.ts @@ -4,7 +4,9 @@ import { expect } from 'chai'; import { Container } from 'inversify'; import * as TypeMoq from 'typemoq'; -import { IProcessService } from '../../client/common/process/types'; +import { BufferDecoder } from '../../client/common/process/decoder'; +import { ProcessService } from '../../client/common/process/proc'; +import { IBufferDecoder, IProcessService } from '../../client/common/process/types'; import { VirtualEnvironmentManager } from '../../client/interpreter/virtualEnvs'; import { ServiceContainer } from '../../client/ioc/container'; import { ServiceManager } from '../../client/ioc/serviceManager'; @@ -18,16 +20,24 @@ suite('Virtual environment manager', () => { const cont = new Container(); serviceManager = new ServiceManager(cont); serviceContainer = new ServiceContainer(cont); - - process = TypeMoq.Mock.ofType(); - serviceManager.addSingletonInstance(IProcessService, process.object); }); test('Plain Python environment suffix', async () => await testSuffix('')); test('Venv environment suffix', async () => await testSuffix('venv')); test('Virtualenv Python environment suffix', async () => await testSuffix('virtualenv')); + test('Run actual virtual env detection code', async () => { + serviceManager.addSingleton(IProcessService, ProcessService); + serviceManager.addSingleton(IBufferDecoder, BufferDecoder); + const venvManager = new VirtualEnvironmentManager(serviceContainer); + const name = await venvManager.getEnvironmentName('python'); + expect(name).to.be.equal('', 'Running venv detection code failed.'); + }); + async function testSuffix(expectedName: string) { + process = TypeMoq.Mock.ofType(); + serviceManager.addSingletonInstance(IProcessService, process.object); + const venvManager = new VirtualEnvironmentManager(serviceContainer); process .setup(x => x.exec('python', TypeMoq.It.isAny())) From 4af3cb2d0ce9e8a5ca18710149029214965006e4 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Mar 2018 13:17:58 -0800 Subject: [PATCH 084/103] More reliable check --- src/test/interpreters/virtualEnvManager.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/test/interpreters/virtualEnvManager.test.ts b/src/test/interpreters/virtualEnvManager.test.ts index e9050a469885..bfd498b55de3 100644 --- a/src/test/interpreters/virtualEnvManager.test.ts +++ b/src/test/interpreters/virtualEnvManager.test.ts @@ -31,7 +31,8 @@ suite('Virtual environment manager', () => { serviceManager.addSingleton(IBufferDecoder, BufferDecoder); const venvManager = new VirtualEnvironmentManager(serviceContainer); const name = await venvManager.getEnvironmentName('python'); - expect(name).to.be.equal('', 'Running venv detection code failed.'); + const result = name === '' || name === 'venv' || name === 'virtualenv'; + expect(result).to.be.equal(true, 'Running venv detection code failed.'); }); async function testSuffix(expectedName: string) { From 4b629f0d56230d7a6773e5735684ffec2c8e8597 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Mar 2018 14:41:46 -0800 Subject: [PATCH 085/103] Fix pylint switch key --- src/client/linters/pylint.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/linters/pylint.ts b/src/client/linters/pylint.ts index 923068362be3..8674585bb91f 100644 --- a/src/client/linters/pylint.ts +++ b/src/client/linters/pylint.ts @@ -42,7 +42,7 @@ export class Pylint extends BaseLinter { minArgs = [ '--disable=all', '--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,unused-wildcard-import,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode', - '–-disable-msg=E1601' + '–-disable=E1601' ]; } const args = [ From 29b9dabb59c140364c44412d39fe36468ed8caf5 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Mar 2018 14:57:21 -0800 Subject: [PATCH 086/103] Remove incorrect flag --- src/client/linters/pylint.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/client/linters/pylint.ts b/src/client/linters/pylint.ts index 8674585bb91f..b5aaf43c3743 100644 --- a/src/client/linters/pylint.ts +++ b/src/client/linters/pylint.ts @@ -41,8 +41,7 @@ export class Pylint extends BaseLinter { && !await Pylint.hasConfigurationFile(this.fileSystem, this.getWorkspaceRootPath(document), this.platformService)) { minArgs = [ '--disable=all', - '--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,unused-wildcard-import,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode', - '–-disable=E1601' + '--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,unused-wildcard-import,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode' ]; } const args = [ From 95ccb363d45ac196e235b5dc2cb0111223ff014c Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Mar 2018 15:14:33 -0800 Subject: [PATCH 087/103] Disable print --- src/client/linters/pylint.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client/linters/pylint.ts b/src/client/linters/pylint.ts index b5aaf43c3743..196071a4d5ab 100644 --- a/src/client/linters/pylint.ts +++ b/src/client/linters/pylint.ts @@ -41,7 +41,8 @@ export class Pylint extends BaseLinter { && !await Pylint.hasConfigurationFile(this.fileSystem, this.getWorkspaceRootPath(document), this.platformService)) { minArgs = [ '--disable=all', - '--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,unused-wildcard-import,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode' + '--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,unused-wildcard-import,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode', + '–-disable=print-statement' ]; } const args = [ From b66c55e7ef26172400b1ac00b60b59550cb16e78 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Mar 2018 15:25:20 -0800 Subject: [PATCH 088/103] Require pylint 1.8 on CI --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0b17d7594a72..752eee82422f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ autopep8==1.2.1 yapf==0.6.2 -pylint==1.5.4 +pylint==1.8.2 pep8==1.7.0 prospector==0.11.7 flake8==2.6.0 From fa3b5e6a41c01d46cb09140c1396ce9e22532804 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 5 Mar 2018 16:17:21 -0800 Subject: [PATCH 089/103] Fix working directory for standalone files --- src/client/linters/baseLinter.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/linters/baseLinter.ts b/src/client/linters/baseLinter.ts index 742008ea7346..d941f73246b6 100644 --- a/src/client/linters/baseLinter.ts +++ b/src/client/linters/baseLinter.ts @@ -70,7 +70,7 @@ export abstract class BaseLinter implements ILinter { protected getWorkspaceRootPath(document: vscode.TextDocument): string { const workspaceFolder = this.workspace.getWorkspaceFolder(document.uri); const workspaceRootPath = (workspaceFolder && typeof workspaceFolder.uri.fsPath === 'string') ? workspaceFolder.uri.fsPath : undefined; - return typeof workspaceRootPath === 'string' ? workspaceRootPath : __dirname; + return typeof workspaceRootPath === 'string' ? workspaceRootPath : path.dirname(document.uri.fsPath); } protected get logger(): ILogger { return this.serviceContainer.get(ILogger); From 707a8cab9a38abd04a23ffd9758974f6a3ecfe76 Mon Sep 17 00:00:00 2001 From: Brett Cannon Date: Mon, 5 Mar 2018 16:43:52 -0800 Subject: [PATCH 090/103] Use an 'elif' --- src/client/interpreter/virtualEnvs/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/interpreter/virtualEnvs/index.ts b/src/client/interpreter/virtualEnvs/index.ts index 216787a207bc..4f535cb99151 100644 --- a/src/client/interpreter/virtualEnvs/index.ts +++ b/src/client/interpreter/virtualEnvs/index.ts @@ -16,7 +16,7 @@ export class VirtualEnvironmentManager implements IVirtualEnvironmentManager { // https://stackoverflow.com/questions/1871549/determine-if-python-is-running-inside-virtualenv // hasattr(sys, 'real_prefix') works for virtualenv while // '(hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix))' works for venv - const code = 'import sys\nif hasattr(sys, "real_prefix"):\n print("virtualenv")\nif hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix:\n print("venv")'; + const code = 'import sys\nif hasattr(sys, "real_prefix"):\n print("virtualenv")\nelif hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix:\n print("venv")'; const output = await this.processService.exec(pythonPath, ['-c', code]); if (output.stdout.length > 0) { return output.stdout.trim(); From bd2e936ba5eb973cefdeadf1dbaaf5ea76018f85 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Mon, 5 Mar 2018 22:15:14 -0800 Subject: [PATCH 091/103] Separate file for pylint root config --- src/test/linters/lint.test.ts | 2 +- .../pythonFiles/linting/pylintconfig/file2.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 src/test/pythonFiles/linting/pylintconfig/file2.py diff --git a/src/test/linters/lint.test.ts b/src/test/linters/lint.test.ts index 65b160da49a9..8e73a2878abf 100644 --- a/src/test/linters/lint.test.ts +++ b/src/test/linters/lint.test.ts @@ -231,7 +231,7 @@ suite('Linting', () => { }); test('PyLint with config in root', async () => { await fs.copy(path.join(pylintConfigPath, '.pylintrc'), path.join(workspaceUri.fsPath, '.pylintrc')); - await testLinterMessages(Product.pylint, path.join(pylintConfigPath, 'file.py'), []); + await testLinterMessages(Product.pylint, path.join(pylintConfigPath, 'file2.py'), []); }); test('Flake8 with config in root', async () => { await testLinterMessages(Product.flake8, path.join(flake8ConfigPath, 'file.py'), filteredFlake8MessagesToBeReturned); diff --git a/src/test/pythonFiles/linting/pylintconfig/file2.py b/src/test/pythonFiles/linting/pylintconfig/file2.py new file mode 100644 index 000000000000..f375c984aa2e --- /dev/null +++ b/src/test/pythonFiles/linting/pylintconfig/file2.py @@ -0,0 +1,19 @@ +"""pylint option block-disable""" + +__revision__ = None + +class Foo(object): + """block-disable test""" + + def __init__(self): + pass + + def meth1(self, arg): + """meth1""" + print self.blop + + def meth2(self, arg): + """meth2""" + # pylint: disable=unused-argument + print self\ + + "foo" From af573be27372a79d5589e2134002cc753bb54f2a Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 16:26:53 -0800 Subject: [PATCH 092/103] Remove double event listening --- src/client/providers/linterProvider.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/client/providers/linterProvider.ts b/src/client/providers/linterProvider.ts index fb66aab3971b..27aa85ffa61f 100644 --- a/src/client/providers/linterProvider.ts +++ b/src/client/providers/linterProvider.ts @@ -9,7 +9,6 @@ import { ConfigSettingMonitor } from '../common/configSettingMonitor'; import { isTestExecution } from '../common/constants'; import { IFileSystem } from '../common/platform/types'; import { IConfigurationService } from '../common/types'; -import { IInterpreterService } from '../interpreter/contracts'; import { IServiceContainer } from '../ioc/types'; import { ILinterManager, ILintingEngine } from '../linters/types'; @@ -17,7 +16,6 @@ export class LinterProvider implements vscode.Disposable { private context: vscode.ExtensionContext; private disposables: vscode.Disposable[]; private configMonitor: ConfigSettingMonitor; - private interpreterService: IInterpreterService; private documents: IDocumentManager; private configuration: IConfigurationService; private linterManager: ILinterManager; @@ -31,12 +29,9 @@ export class LinterProvider implements vscode.Disposable { this.fs = serviceContainer.get(IFileSystem); this.engine = serviceContainer.get(ILintingEngine); this.linterManager = serviceContainer.get(ILinterManager); - this.interpreterService = serviceContainer.get(IInterpreterService); this.documents = serviceContainer.get(IDocumentManager); this.configuration = serviceContainer.get(IConfigurationService); - this.disposables.push(this.interpreterService.onDidChangeInterpreter(() => this.engine.lintOpenPythonFiles())); - this.documents.onDidOpenTextDocument(e => this.onDocumentOpened(e), this.context.subscriptions); this.documents.onDidCloseTextDocument(e => this.onDocumentClosed(e), this.context.subscriptions); this.documents.onDidSaveTextDocument((e) => this.onDocumentSaved(e), this.context.subscriptions); From e240c3fd117c38b9e6fdcbdd1ba2715789fefe48 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 16:35:39 -0800 Subject: [PATCH 093/103] Remove test --- src/test/linters/lint.provider.test.ts | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/test/linters/lint.provider.test.ts b/src/test/linters/lint.provider.test.ts index 023ee86223be..51e49d3d35b9 100644 --- a/src/test/linters/lint.provider.test.ts +++ b/src/test/linters/lint.provider.test.ts @@ -113,16 +113,6 @@ suite('Linting - Provider', () => { engine.verify(x => x.lintDocument(document.object, 'save'), TypeMoq.Times.never()); }); - test('Lint on change interpreters', () => { - const e = new vscode.EventEmitter(); - interpreterService.setup(x => x.onDidChangeInterpreter).returns(() => e.event); - - // tslint:disable-next-line:no-unused-variable - const provider = new LinterProvider(context.object, serviceContainer); - e.fire(); - engine.verify(x => x.lintOpenPythonFiles(), TypeMoq.Times.once()); - }); - test('Lint on save pylintrc', async () => { docManager.setup(x => x.onDidSaveTextDocument).returns(() => emitter.event); document.setup(x => x.uri).returns(() => vscode.Uri.file('.pylintrc')); From d0af3d7b6d6283adea67ec7281743ddf60d70f8b Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 17:02:12 -0800 Subject: [PATCH 094/103] Revert "Remove test" This reverts commit e240c3fd117c38b9e6fdcbdd1ba2715789fefe48. --- src/test/linters/lint.provider.test.ts | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/test/linters/lint.provider.test.ts b/src/test/linters/lint.provider.test.ts index 51e49d3d35b9..023ee86223be 100644 --- a/src/test/linters/lint.provider.test.ts +++ b/src/test/linters/lint.provider.test.ts @@ -113,6 +113,16 @@ suite('Linting - Provider', () => { engine.verify(x => x.lintDocument(document.object, 'save'), TypeMoq.Times.never()); }); + test('Lint on change interpreters', () => { + const e = new vscode.EventEmitter(); + interpreterService.setup(x => x.onDidChangeInterpreter).returns(() => e.event); + + // tslint:disable-next-line:no-unused-variable + const provider = new LinterProvider(context.object, serviceContainer); + e.fire(); + engine.verify(x => x.lintOpenPythonFiles(), TypeMoq.Times.once()); + }); + test('Lint on save pylintrc', async () => { docManager.setup(x => x.onDidSaveTextDocument).returns(() => emitter.event); document.setup(x => x.uri).returns(() => vscode.Uri.file('.pylintrc')); From b0b878da0e4486cdf30a35321c2461e1347a7179 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 17:02:47 -0800 Subject: [PATCH 095/103] Revert "Remove double event listening" This reverts commit af573be27372a79d5589e2134002cc753bb54f2a. --- src/client/providers/linterProvider.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/client/providers/linterProvider.ts b/src/client/providers/linterProvider.ts index 27aa85ffa61f..fb66aab3971b 100644 --- a/src/client/providers/linterProvider.ts +++ b/src/client/providers/linterProvider.ts @@ -9,6 +9,7 @@ import { ConfigSettingMonitor } from '../common/configSettingMonitor'; import { isTestExecution } from '../common/constants'; import { IFileSystem } from '../common/platform/types'; import { IConfigurationService } from '../common/types'; +import { IInterpreterService } from '../interpreter/contracts'; import { IServiceContainer } from '../ioc/types'; import { ILinterManager, ILintingEngine } from '../linters/types'; @@ -16,6 +17,7 @@ export class LinterProvider implements vscode.Disposable { private context: vscode.ExtensionContext; private disposables: vscode.Disposable[]; private configMonitor: ConfigSettingMonitor; + private interpreterService: IInterpreterService; private documents: IDocumentManager; private configuration: IConfigurationService; private linterManager: ILinterManager; @@ -29,9 +31,12 @@ export class LinterProvider implements vscode.Disposable { this.fs = serviceContainer.get(IFileSystem); this.engine = serviceContainer.get(ILintingEngine); this.linterManager = serviceContainer.get(ILinterManager); + this.interpreterService = serviceContainer.get(IInterpreterService); this.documents = serviceContainer.get(IDocumentManager); this.configuration = serviceContainer.get(IConfigurationService); + this.disposables.push(this.interpreterService.onDidChangeInterpreter(() => this.engine.lintOpenPythonFiles())); + this.documents.onDidOpenTextDocument(e => this.onDocumentOpened(e), this.context.subscriptions); this.documents.onDidCloseTextDocument(e => this.onDocumentClosed(e), this.context.subscriptions); this.documents.onDidSaveTextDocument((e) => this.onDocumentSaved(e), this.context.subscriptions); From bce60e19344246fdf0b76ba43688dacea650abdd Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Mar 2018 09:39:59 -0800 Subject: [PATCH 096/103] Explicitly disable linter --- src/client/common/installer/productInstaller.ts | 8 ++++---- src/client/linters/linterManager.ts | 12 ------------ src/client/linters/types.ts | 1 - 3 files changed, 4 insertions(+), 17 deletions(-) diff --git a/src/client/common/installer/productInstaller.ts b/src/client/common/installer/productInstaller.ts index 1d6f34637ea8..7dfb47686985 100644 --- a/src/client/common/installer/productInstaller.ts +++ b/src/client/common/installer/productInstaller.ts @@ -154,18 +154,18 @@ class LinterInstaller extends BaseInstaller { public async promptToInstall(product: Product, resource?: Uri): Promise { const productName = ProductNames.get(product)!; const install = 'Install'; - const disable = 'Disable linting'; + const disableLinting = 'Disable linting'; const response = await this.appShell - .showErrorMessage(`Linter ${productName} is not installed.`, install, disable); + .showErrorMessage(`Linter ${productName} is not installed.`, install, `Disable ${productName}`, disableLinting); if (response === install) { return this.install(product, resource); } const lm = this.serviceContainer.get(ILinterManager); - if (response === disable) { + if (response === disableLinting) { await lm.enableLintingAsync(false); } else { - lm.disableSessionLinting(); + await lm.getLinterInfo(product).enableAsync(false); } return InstallerResponse.Ignore; } diff --git a/src/client/linters/linterManager.ts b/src/client/linters/linterManager.ts index 9b8a9a13d430..4158d8210ebc 100644 --- a/src/client/linters/linterManager.ts +++ b/src/client/linters/linterManager.ts @@ -30,7 +30,6 @@ export class LinterManager implements ILinterManager { private lintingEnabledSettingName = 'enabled'; private linters: ILinterInfo[]; private configService: IConfigurationService; - private disabledForCurrentSession = false; constructor(@inject(IServiceContainer) serviceContainer: IServiceContainer) { this.configService = serviceContainer.get(IConfigurationService); @@ -58,18 +57,11 @@ export class LinterManager implements ILinterManager { } public isLintingEnabled(resource?: Uri): boolean { - if (this.disabledForCurrentSession) { - return false; - } const settings = this.configService.getSettings(resource); return (settings.linting[this.lintingEnabledSettingName] as boolean) && this.getActiveLinters(resource).length > 0; } public async enableLintingAsync(enable: boolean, resource?: Uri): Promise { - if (enable) { - this.disabledForCurrentSession = false; - } - await this.configService.updateSettingAsync(`linting.${this.lintingEnabledSettingName}`, enable, resource); // If nothing is enabled, fix it up to PyLint (default). @@ -78,10 +70,6 @@ export class LinterManager implements ILinterManager { } } - public disableSessionLinting(): void { - this.disabledForCurrentSession = true; - } - public getActiveLinters(resource?: Uri): ILinterInfo[] { return this.linters.filter(x => x.isEnabled(resource)); } diff --git a/src/client/linters/types.ts b/src/client/linters/types.ts index bbd89558fa47..6a7755eec2ee 100644 --- a/src/client/linters/types.ts +++ b/src/client/linters/types.ts @@ -38,7 +38,6 @@ export interface ILinterManager { getActiveLinters(resource?: vscode.Uri): ILinterInfo[]; isLintingEnabled(resource?: vscode.Uri): boolean; enableLintingAsync(enable: boolean, resource?: vscode.Uri): Promise; - disableSessionLinting(): void; setActiveLintersAsync(products: Product[], resource?: vscode.Uri): Promise; createLinter(product: Product, outputChannel: vscode.OutputChannel, serviceContainer: IServiceContainer, resource?: vscode.Uri): ILinter; } From 3a2e2b71e2d8648250cba6ee7c92a79437f28d08 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Mar 2018 09:57:53 -0800 Subject: [PATCH 097/103] New buttons --- src/client/common/installer/productInstaller.ts | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/client/common/installer/productInstaller.ts b/src/client/common/installer/productInstaller.ts index 7dfb47686985..c75be6451bc8 100644 --- a/src/client/common/installer/productInstaller.ts +++ b/src/client/common/installer/productInstaller.ts @@ -154,17 +154,18 @@ class LinterInstaller extends BaseInstaller { public async promptToInstall(product: Product, resource?: Uri): Promise { const productName = ProductNames.get(product)!; const install = 'Install'; - const disableLinting = 'Disable linting'; + const disableAllLinting = 'Disable linting'; + const disableThisLinter = `Disable ${productName}`; const response = await this.appShell - .showErrorMessage(`Linter ${productName} is not installed.`, install, `Disable ${productName}`, disableLinting); + .showErrorMessage(`Linter ${productName} is not installed.`, install, disableThisLinter, disableAllLinting); if (response === install) { return this.install(product, resource); } const lm = this.serviceContainer.get(ILinterManager); - if (response === disableLinting) { + if (response === disableAllLinting) { await lm.enableLintingAsync(false); - } else { + } else if (response === disableThisLinter) { await lm.getLinterInfo(product).enableAsync(false); } return InstallerResponse.Ignore; From d0c7ab5da3de35a1febeed153db21531ee7c18b6 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Mar 2018 10:54:11 -0800 Subject: [PATCH 098/103] PR feedback --- ptvs/Microsoft.PythonTools.VsCode.deps.json | 784 ++++++++++++++++++ ...soft.PythonTools.VsCode.runtimeconfig.json | 9 + .../common/installer/productInstaller.ts | 2 + src/test/.vscode/settings.json | 3 +- .../workspace1/.vscode/settings.json | 12 +- 5 files changed, 806 insertions(+), 4 deletions(-) create mode 100644 ptvs/Microsoft.PythonTools.VsCode.deps.json create mode 100644 ptvs/Microsoft.PythonTools.VsCode.runtimeconfig.json diff --git a/ptvs/Microsoft.PythonTools.VsCode.deps.json b/ptvs/Microsoft.PythonTools.VsCode.deps.json new file mode 100644 index 000000000000..705ca73ed550 --- /dev/null +++ b/ptvs/Microsoft.PythonTools.VsCode.deps.json @@ -0,0 +1,784 @@ +{ + "runtimeTarget": { + "name": ".NETCoreApp,Version=v2.0", + "signature": "e55460407c60b885fc4f3279d5bf56b22b15c0a1" + }, + "compilationOptions": {}, + "targets": { + ".NETCoreApp,Version=v2.0": { + "Microsoft.PythonTools.VsCode/1.0.0": { + "dependencies": { + "MicroBuild.Core": "0.2.0", + "Microsoft.Python.Analysis.Engine": "1.0.0", + "StreamJsonRpc": "1.3.23" + }, + "runtime": { + "Microsoft.PythonTools.VsCode.dll": {} + } + }, + "MicroBuild.Core/0.2.0": {}, + "Microsoft.CSharp/4.0.1": { + "dependencies": { + "System.Collections": "4.0.11", + "System.Diagnostics.Debug": "4.0.11", + "System.Dynamic.Runtime": "4.0.11", + "System.Globalization": "4.0.11", + "System.Linq": "4.1.0", + "System.Linq.Expressions": "4.1.0", + "System.ObjectModel": "4.0.12", + "System.Reflection": "4.1.0", + "System.Reflection.Extensions": "4.0.1", + "System.Reflection.Primitives": "4.0.1", + "System.Reflection.TypeExtensions": "4.1.0", + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Extensions": "4.1.0", + "System.Runtime.InteropServices": "4.1.0", + "System.Threading": "4.0.11" + } + }, + "Microsoft.NETCore.Targets/1.0.1": {}, + "Microsoft.VisualStudio.Threading/15.3.20": { + "dependencies": { + "Microsoft.VisualStudio.Validation": "15.3.15" + }, + "runtime": { + "lib/netstandard1.1/Microsoft.VisualStudio.Threading.dll": {} + }, + "resources": { + "lib/netstandard1.1/cs/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "cs" + }, + "lib/netstandard1.1/de/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "de" + }, + "lib/netstandard1.1/es/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "es" + }, + "lib/netstandard1.1/fr/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "fr" + }, + "lib/netstandard1.1/it/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "it" + }, + "lib/netstandard1.1/ja/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "ja" + }, + "lib/netstandard1.1/ko/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "ko" + }, + "lib/netstandard1.1/pl/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "pl" + }, + "lib/netstandard1.1/pt-BR/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "pt-BR" + }, + "lib/netstandard1.1/ru/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "ru" + }, + "lib/netstandard1.1/tr/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "tr" + }, + "lib/netstandard1.1/zh-Hans/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "zh-Hans" + }, + "lib/netstandard1.1/zh-Hant/Microsoft.VisualStudio.Threading.resources.dll": { + "locale": "zh-Hant" + } + } + }, + "Microsoft.VisualStudio.Validation/15.3.15": { + "runtime": { + "lib/netstandard1.0/Microsoft.VisualStudio.Validation.dll": {} + }, + "resources": { + "lib/netstandard1.0/cs/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "cs" + }, + "lib/netstandard1.0/de/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "de" + }, + "lib/netstandard1.0/es/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "es" + }, + "lib/netstandard1.0/fr/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "fr" + }, + "lib/netstandard1.0/it/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "it" + }, + "lib/netstandard1.0/ja/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "ja" + }, + "lib/netstandard1.0/ko/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "ko" + }, + "lib/netstandard1.0/pl/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "pl" + }, + "lib/netstandard1.0/pt-BR/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "pt-BR" + }, + "lib/netstandard1.0/ru/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "ru" + }, + "lib/netstandard1.0/tr/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "tr" + }, + "lib/netstandard1.0/zh-Hans/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "zh-Hans" + }, + "lib/netstandard1.0/zh-Hant/Microsoft.VisualStudio.Validation.resources.dll": { + "locale": "zh-Hant" + } + } + }, + "Newtonsoft.Json/9.0.1": { + "dependencies": { + "Microsoft.CSharp": "4.0.1", + "System.Collections": "4.0.11", + "System.Diagnostics.Debug": "4.0.11", + "System.Dynamic.Runtime": "4.0.11", + "System.Globalization": "4.0.11", + "System.IO": "4.1.0", + "System.Linq": "4.1.0", + "System.Linq.Expressions": "4.1.0", + "System.ObjectModel": "4.0.12", + "System.Reflection": "4.1.0", + "System.Reflection.Extensions": "4.0.1", + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Extensions": "4.1.0", + "System.Runtime.Serialization.Primitives": "4.1.1", + "System.Text.Encoding": "4.0.11", + "System.Text.Encoding.Extensions": "4.0.11", + "System.Text.RegularExpressions": "4.1.0", + "System.Threading": "4.0.11", + "System.Threading.Tasks": "4.0.11", + "System.Xml.ReaderWriter": "4.0.11", + "System.Xml.XDocument": "4.0.11" + }, + "runtime": { + "lib/netstandard1.0/Newtonsoft.Json.dll": {} + } + }, + "StreamJsonRpc/1.3.23": { + "dependencies": { + "Microsoft.VisualStudio.Threading": "15.3.20", + "Newtonsoft.Json": "9.0.1" + }, + "runtime": { + "lib/netstandard1.1/StreamJsonRpc.dll": {} + }, + "resources": { + "lib/netstandard1.1/cs/StreamJsonRpc.resources.dll": { + "locale": "cs" + }, + "lib/netstandard1.1/de/StreamJsonRpc.resources.dll": { + "locale": "de" + }, + "lib/netstandard1.1/es/StreamJsonRpc.resources.dll": { + "locale": "es" + }, + "lib/netstandard1.1/fr/StreamJsonRpc.resources.dll": { + "locale": "fr" + }, + "lib/netstandard1.1/it/StreamJsonRpc.resources.dll": { + "locale": "it" + }, + "lib/netstandard1.1/ja/StreamJsonRpc.resources.dll": { + "locale": "ja" + }, + "lib/netstandard1.1/ko/StreamJsonRpc.resources.dll": { + "locale": "ko" + }, + "lib/netstandard1.1/pl/StreamJsonRpc.resources.dll": { + "locale": "pl" + }, + "lib/netstandard1.1/pt-BR/StreamJsonRpc.resources.dll": { + "locale": "pt-BR" + }, + "lib/netstandard1.1/ru/StreamJsonRpc.resources.dll": { + "locale": "ru" + }, + "lib/netstandard1.1/tr/StreamJsonRpc.resources.dll": { + "locale": "tr" + }, + "lib/netstandard1.1/zh-Hans/StreamJsonRpc.resources.dll": { + "locale": "zh-Hans" + }, + "lib/netstandard1.1/zh-Hant/StreamJsonRpc.resources.dll": { + "locale": "zh-Hant" + } + } + }, + "System.Collections/4.0.11": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Diagnostics.Debug/4.0.11": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Diagnostics.Tools/4.0.1": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Dynamic.Runtime/4.0.11": { + "dependencies": { + "System.Collections": "4.0.11", + "System.Diagnostics.Debug": "4.0.11", + "System.Globalization": "4.0.11", + "System.Linq": "4.1.0", + "System.Linq.Expressions": "4.1.0", + "System.ObjectModel": "4.0.12", + "System.Reflection": "4.1.0", + "System.Reflection.Emit": "4.0.1", + "System.Reflection.Emit.ILGeneration": "4.0.1", + "System.Reflection.Primitives": "4.0.1", + "System.Reflection.TypeExtensions": "4.1.0", + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Extensions": "4.1.0", + "System.Threading": "4.0.11" + } + }, + "System.Globalization/4.0.11": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.IO/4.1.0": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0", + "System.Text.Encoding": "4.0.11", + "System.Threading.Tasks": "4.0.11" + } + }, + "System.IO.FileSystem/4.0.1": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.IO": "4.1.0", + "System.IO.FileSystem.Primitives": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Handles": "4.0.1", + "System.Text.Encoding": "4.0.11", + "System.Threading.Tasks": "4.0.11" + } + }, + "System.IO.FileSystem.Primitives/4.0.1": { + "dependencies": { + "System.Runtime": "4.1.0" + } + }, + "System.Linq/4.1.0": { + "dependencies": { + "System.Collections": "4.0.11", + "System.Diagnostics.Debug": "4.0.11", + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Extensions": "4.1.0" + } + }, + "System.Linq.Expressions/4.1.0": { + "dependencies": { + "System.Collections": "4.0.11", + "System.Diagnostics.Debug": "4.0.11", + "System.Globalization": "4.0.11", + "System.IO": "4.1.0", + "System.Linq": "4.1.0", + "System.ObjectModel": "4.0.12", + "System.Reflection": "4.1.0", + "System.Reflection.Emit": "4.0.1", + "System.Reflection.Emit.ILGeneration": "4.0.1", + "System.Reflection.Emit.Lightweight": "4.0.1", + "System.Reflection.Extensions": "4.0.1", + "System.Reflection.Primitives": "4.0.1", + "System.Reflection.TypeExtensions": "4.1.0", + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Extensions": "4.1.0", + "System.Threading": "4.0.11" + } + }, + "System.ObjectModel/4.0.12": { + "dependencies": { + "System.Collections": "4.0.11", + "System.Diagnostics.Debug": "4.0.11", + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0", + "System.Threading": "4.0.11" + } + }, + "System.Reflection/4.1.0": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.IO": "4.1.0", + "System.Reflection.Primitives": "4.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Reflection.Emit/4.0.1": { + "dependencies": { + "System.IO": "4.1.0", + "System.Reflection": "4.1.0", + "System.Reflection.Emit.ILGeneration": "4.0.1", + "System.Reflection.Primitives": "4.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Reflection.Emit.ILGeneration/4.0.1": { + "dependencies": { + "System.Reflection": "4.1.0", + "System.Reflection.Primitives": "4.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Reflection.Emit.Lightweight/4.0.1": { + "dependencies": { + "System.Reflection": "4.1.0", + "System.Reflection.Emit.ILGeneration": "4.0.1", + "System.Reflection.Primitives": "4.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Reflection.Extensions/4.0.1": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Reflection": "4.1.0", + "System.Runtime": "4.1.0" + } + }, + "System.Reflection.Primitives/4.0.1": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Reflection.TypeExtensions/4.1.0": { + "dependencies": { + "System.Reflection": "4.1.0", + "System.Runtime": "4.1.0" + } + }, + "System.Resources.ResourceManager/4.0.1": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Globalization": "4.0.11", + "System.Reflection": "4.1.0", + "System.Runtime": "4.1.0" + } + }, + "System.Runtime/4.1.0": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1" + } + }, + "System.Runtime.Extensions/4.1.0": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Runtime.Handles/4.0.1": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Runtime.InteropServices/4.1.0": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Reflection": "4.1.0", + "System.Reflection.Primitives": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Handles": "4.0.1" + } + }, + "System.Runtime.Serialization.Primitives/4.1.1": { + "dependencies": { + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Text.Encoding/4.0.11": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Text.Encoding.Extensions/4.0.11": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0", + "System.Text.Encoding": "4.0.11" + } + }, + "System.Text.RegularExpressions/4.1.0": { + "dependencies": { + "System.Collections": "4.0.11", + "System.Globalization": "4.0.11", + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Extensions": "4.1.0", + "System.Threading": "4.0.11" + } + }, + "System.Threading/4.0.11": { + "dependencies": { + "System.Runtime": "4.1.0", + "System.Threading.Tasks": "4.0.11" + } + }, + "System.Threading.Tasks/4.0.11": { + "dependencies": { + "Microsoft.NETCore.Targets": "1.0.1", + "System.Runtime": "4.1.0" + } + }, + "System.Threading.Tasks.Extensions/4.0.0": { + "dependencies": { + "System.Collections": "4.0.11", + "System.Runtime": "4.1.0", + "System.Threading.Tasks": "4.0.11" + } + }, + "System.Xml.ReaderWriter/4.0.11": { + "dependencies": { + "System.Collections": "4.0.11", + "System.Diagnostics.Debug": "4.0.11", + "System.Globalization": "4.0.11", + "System.IO": "4.1.0", + "System.IO.FileSystem": "4.0.1", + "System.IO.FileSystem.Primitives": "4.0.1", + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Extensions": "4.1.0", + "System.Runtime.InteropServices": "4.1.0", + "System.Text.Encoding": "4.0.11", + "System.Text.Encoding.Extensions": "4.0.11", + "System.Text.RegularExpressions": "4.1.0", + "System.Threading.Tasks": "4.0.11", + "System.Threading.Tasks.Extensions": "4.0.0" + } + }, + "System.Xml.XDocument/4.0.11": { + "dependencies": { + "System.Collections": "4.0.11", + "System.Diagnostics.Debug": "4.0.11", + "System.Diagnostics.Tools": "4.0.1", + "System.Globalization": "4.0.11", + "System.IO": "4.1.0", + "System.Reflection": "4.1.0", + "System.Resources.ResourceManager": "4.0.1", + "System.Runtime": "4.1.0", + "System.Runtime.Extensions": "4.1.0", + "System.Text.Encoding": "4.0.11", + "System.Threading": "4.0.11", + "System.Xml.ReaderWriter": "4.0.11" + } + }, + "Microsoft.Python.Analysis.Engine/1.0.0": { + "dependencies": { + "MicroBuild.Core": "0.2.0" + }, + "runtime": { + "Microsoft.Python.Analysis.Engine.dll": {} + } + } + } + }, + "libraries": { + "Microsoft.PythonTools.VsCode/1.0.0": { + "type": "project", + "serviceable": false, + "sha512": "" + }, + "MicroBuild.Core/0.2.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-7AwhhKIApmsf7He0m9t49i/0s42YYIu9sf4kHsl3zbzhWyUUwS27cmUKo+Zp6wZ7iGDGoGInDrupUqJI1kaMEw==", + "path": "microbuild.core/0.2.0", + "hashPath": "microbuild.core.0.2.0.nupkg.sha512" + }, + "Microsoft.CSharp/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-17h8b5mXa87XYKrrVqdgZ38JefSUqLChUQpXgSnpzsM0nDOhE40FTeNWOJ/YmySGV6tG6T8+hjz6vxbknHJr6A==", + "path": "microsoft.csharp/4.0.1", + "hashPath": "microsoft.csharp.4.0.1.nupkg.sha512" + }, + "Microsoft.NETCore.Targets/1.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-rkn+fKobF/cbWfnnfBOQHKVKIOpxMZBvlSHkqDWgBpwGDcLRduvs3D9OLGeV6GWGvVwNlVi2CBbTjuPmtHvyNw==", + "path": "microsoft.netcore.targets/1.0.1", + "hashPath": "microsoft.netcore.targets.1.0.1.nupkg.sha512" + }, + "Microsoft.VisualStudio.Threading/15.3.20": { + "type": "package", + "serviceable": true, + "sha512": "sha512-h/EPnlSJIKBcIPQVTzbWvviKE/avZY5LI5+SiLY5kwQJ+MXyPEoK7ACSFhTMJB7hwGrwPFNXWSc4e9+5Pi3WQQ==", + "path": "microsoft.visualstudio.threading/15.3.20", + "hashPath": "microsoft.visualstudio.threading.15.3.20.nupkg.sha512" + }, + "Microsoft.VisualStudio.Validation/15.3.15": { + "type": "package", + "serviceable": true, + "sha512": "sha512-jRr/QTclHcJmMm1JXr9yYI0NDKvDT7+ciQeNYdAo6SMSk0eH/RJY/JTgPqnRYyCfG9Te1CuRktCcUrV4OGmBYA==", + "path": "microsoft.visualstudio.validation/15.3.15", + "hashPath": "microsoft.visualstudio.validation.15.3.15.nupkg.sha512" + }, + "Newtonsoft.Json/9.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-U82mHQSKaIk+lpSVCbWYKNavmNH1i5xrExDEquU1i6I5pV6UMOqRnJRSlKO3cMPfcpp0RgDY+8jUXHdQ4IfXvw==", + "path": "newtonsoft.json/9.0.1", + "hashPath": "newtonsoft.json.9.0.1.nupkg.sha512" + }, + "StreamJsonRpc/1.3.23": { + "type": "package", + "serviceable": true, + "sha512": "sha512-15yPYvp0HFnBMJbo2fLon+Ssa7VaUSeNpI2nwNCZ5zxZ4Jb4qsHghfwo9O2GdK9LBu037+KJHTK8LPRt0fxDOg==", + "path": "streamjsonrpc/1.3.23", + "hashPath": "streamjsonrpc.1.3.23.nupkg.sha512" + }, + "System.Collections/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-YUJGz6eFKqS0V//mLt25vFGrrCvOnsXjlvFQs+KimpwNxug9x0Pzy4PlFMU3Q2IzqAa9G2L4LsK3+9vCBK7oTg==", + "path": "system.collections/4.0.11", + "hashPath": "system.collections.4.0.11.nupkg.sha512" + }, + "System.Diagnostics.Debug/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-w5U95fVKHY4G8ASs/K5iK3J5LY+/dLFd4vKejsnI/ZhBsWS9hQakfx3Zr7lRWKg4tAw9r4iktyvsTagWkqYCiw==", + "path": "system.diagnostics.debug/4.0.11", + "hashPath": "system.diagnostics.debug.4.0.11.nupkg.sha512" + }, + "System.Diagnostics.Tools/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-xBfJ8pnd4C17dWaC9FM6aShzbJcRNMChUMD42I6772KGGrqaFdumwhn9OdM68erj1ueNo3xdQ1EwiFjK5k8p0g==", + "path": "system.diagnostics.tools/4.0.1", + "hashPath": "system.diagnostics.tools.4.0.1.nupkg.sha512" + }, + "System.Dynamic.Runtime/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-db34f6LHYM0U0JpE+sOmjar27BnqTVkbLJhgfwMpTdgTigG/Hna3m2MYVwnFzGGKnEJk2UXFuoVTr8WUbU91/A==", + "path": "system.dynamic.runtime/4.0.11", + "hashPath": "system.dynamic.runtime.4.0.11.nupkg.sha512" + }, + "System.Globalization/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-B95h0YLEL2oSnwF/XjqSWKnwKOy/01VWkNlsCeMTFJLLabflpGV26nK164eRs5GiaRSBGpOxQ3pKoSnnyZN5pg==", + "path": "system.globalization/4.0.11", + "hashPath": "system.globalization.4.0.11.nupkg.sha512" + }, + "System.IO/4.1.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-3KlTJceQc3gnGIaHZ7UBZO26SHL1SHE4ddrmiwumFnId+CEHP+O8r386tZKaE6zlk5/mF8vifMBzHj9SaXN+mQ==", + "path": "system.io/4.1.0", + "hashPath": "system.io.4.1.0.nupkg.sha512" + }, + "System.IO.FileSystem/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-IBErlVq5jOggAD69bg1t0pJcHaDbJbWNUZTPI96fkYWzwYbN6D9wRHMULLDd9dHsl7C2YsxXL31LMfPI1SWt8w==", + "path": "system.io.filesystem/4.0.1", + "hashPath": "system.io.filesystem.4.0.1.nupkg.sha512" + }, + "System.IO.FileSystem.Primitives/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-kWkKD203JJKxJeE74p8aF8y4Qc9r9WQx4C0cHzHPrY3fv/L/IhWnyCHaFJ3H1QPOH6A93whlQ2vG5nHlBDvzWQ==", + "path": "system.io.filesystem.primitives/4.0.1", + "hashPath": "system.io.filesystem.primitives.4.0.1.nupkg.sha512" + }, + "System.Linq/4.1.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-bQ0iYFOQI0nuTnt+NQADns6ucV4DUvMdwN6CbkB1yj8i7arTGiTN5eok1kQwdnnNWSDZfIUySQY+J3d5KjWn0g==", + "path": "system.linq/4.1.0", + "hashPath": "system.linq.4.1.0.nupkg.sha512" + }, + "System.Linq.Expressions/4.1.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-I+y02iqkgmCAyfbqOmSDOgqdZQ5tTj80Akm5BPSS8EeB0VGWdy6X1KCoYe8Pk6pwDoAKZUOdLVxnTJcExiv5zw==", + "path": "system.linq.expressions/4.1.0", + "hashPath": "system.linq.expressions.4.1.0.nupkg.sha512" + }, + "System.ObjectModel/4.0.12": { + "type": "package", + "serviceable": true, + "sha512": "sha512-tAgJM1xt3ytyMoW4qn4wIqgJYm7L7TShRZG4+Q4Qsi2PCcj96pXN7nRywS9KkB3p/xDUjc2HSwP9SROyPYDYKQ==", + "path": "system.objectmodel/4.0.12", + "hashPath": "system.objectmodel.4.0.12.nupkg.sha512" + }, + "System.Reflection/4.1.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-JCKANJ0TI7kzoQzuwB/OoJANy1Lg338B6+JVacPl4TpUwi3cReg3nMLplMq2uqYfHFQpKIlHAUVAJlImZz/4ng==", + "path": "system.reflection/4.1.0", + "hashPath": "system.reflection.4.1.0.nupkg.sha512" + }, + "System.Reflection.Emit/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-P2wqAj72fFjpP6wb9nSfDqNBMab+2ovzSDzUZK7MVIm54tBJEPr9jWfSjjoTpPwj1LeKcmX3vr0ttyjSSFM47g==", + "path": "system.reflection.emit/4.0.1", + "hashPath": "system.reflection.emit.4.0.1.nupkg.sha512" + }, + "System.Reflection.Emit.ILGeneration/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-Ov6dU8Bu15Bc7zuqttgHF12J5lwSWyTf1S+FJouUXVMSqImLZzYaQ+vRr1rQ0OZ0HqsrwWl4dsKHELckQkVpgA==", + "path": "system.reflection.emit.ilgeneration/4.0.1", + "hashPath": "system.reflection.emit.ilgeneration.4.0.1.nupkg.sha512" + }, + "System.Reflection.Emit.Lightweight/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-sSzHHXueZ5Uh0OLpUQprhr+ZYJrLPA2Cmr4gn0wj9+FftNKXx8RIMKvO9qnjk2ebPYUjZ+F2ulGdPOsvj+MEjA==", + "path": "system.reflection.emit.lightweight/4.0.1", + "hashPath": "system.reflection.emit.lightweight.4.0.1.nupkg.sha512" + }, + "System.Reflection.Extensions/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-GYrtRsZcMuHF3sbmRHfMYpvxZoIN2bQGrYGerUiWLEkqdEUQZhH3TRSaC/oI4wO0II1RKBPlpIa1TOMxIcOOzQ==", + "path": "system.reflection.extensions/4.0.1", + "hashPath": "system.reflection.extensions.4.0.1.nupkg.sha512" + }, + "System.Reflection.Primitives/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-4inTox4wTBaDhB7V3mPvp9XlCbeGYWVEM9/fXALd52vNEAVisc1BoVWQPuUuD0Ga//dNbA/WeMy9u9mzLxGTHQ==", + "path": "system.reflection.primitives/4.0.1", + "hashPath": "system.reflection.primitives.4.0.1.nupkg.sha512" + }, + "System.Reflection.TypeExtensions/4.1.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-tsQ/ptQ3H5FYfON8lL4MxRk/8kFyE0A+tGPXmVP967cT/gzLHYxIejIYSxp4JmIeFHVP78g/F2FE1mUUTbDtrg==", + "path": "system.reflection.typeextensions/4.1.0", + "hashPath": "system.reflection.typeextensions.4.1.0.nupkg.sha512" + }, + "System.Resources.ResourceManager/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-TxwVeUNoTgUOdQ09gfTjvW411MF+w9MBYL7AtNVc+HtBCFlutPLhUCdZjNkjbhj3bNQWMdHboF0KIWEOjJssbA==", + "path": "system.resources.resourcemanager/4.0.1", + "hashPath": "system.resources.resourcemanager.4.0.1.nupkg.sha512" + }, + "System.Runtime/4.1.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-v6c/4Yaa9uWsq+JMhnOFewrYkgdNHNG2eMKuNqRn8P733rNXeRCGvV5FkkjBXn2dbVkPXOsO0xjsEeM1q2zC0g==", + "path": "system.runtime/4.1.0", + "hashPath": "system.runtime.4.1.0.nupkg.sha512" + }, + "System.Runtime.Extensions/4.1.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-CUOHjTT/vgP0qGW22U4/hDlOqXmcPq5YicBaXdUR2UiUoLwBT+olO6we4DVbq57jeX5uXH2uerVZhf0qGj+sVQ==", + "path": "system.runtime.extensions/4.1.0", + "hashPath": "system.runtime.extensions.4.1.0.nupkg.sha512" + }, + "System.Runtime.Handles/4.0.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-nCJvEKguXEvk2ymk1gqj625vVnlK3/xdGzx0vOKicQkoquaTBJTP13AIYkocSUwHCLNBwUbXTqTWGDxBTWpt7g==", + "path": "system.runtime.handles/4.0.1", + "hashPath": "system.runtime.handles.4.0.1.nupkg.sha512" + }, + "System.Runtime.InteropServices/4.1.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-16eu3kjHS633yYdkjwShDHZLRNMKVi/s0bY8ODiqJ2RfMhDMAwxZaUaWVnZ2P71kr/or+X9o/xFWtNqz8ivieQ==", + "path": "system.runtime.interopservices/4.1.0", + "hashPath": "system.runtime.interopservices.4.1.0.nupkg.sha512" + }, + "System.Runtime.Serialization.Primitives/4.1.1": { + "type": "package", + "serviceable": true, + "sha512": "sha512-HZ6Du5QrTG8MNJbf4e4qMO3JRAkIboGT5Fk804uZtg3Gq516S7hAqTm2UZKUHa7/6HUGdVy3AqMQKbns06G/cg==", + "path": "system.runtime.serialization.primitives/4.1.1", + "hashPath": "system.runtime.serialization.primitives.4.1.1.nupkg.sha512" + }, + "System.Text.Encoding/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-U3gGeMlDZXxCEiY4DwVLSacg+DFWCvoiX+JThA/rvw37Sqrku7sEFeVBBBMBnfB6FeZHsyDx85HlKL19x0HtZA==", + "path": "system.text.encoding/4.0.11", + "hashPath": "system.text.encoding.4.0.11.nupkg.sha512" + }, + "System.Text.Encoding.Extensions/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-jtbiTDtvfLYgXn8PTfWI+SiBs51rrmO4AAckx4KR6vFK9Wzf6tI8kcRdsYQNwriUeQ1+CtQbM1W4cMbLXnj/OQ==", + "path": "system.text.encoding.extensions/4.0.11", + "hashPath": "system.text.encoding.extensions.4.0.11.nupkg.sha512" + }, + "System.Text.RegularExpressions/4.1.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-i88YCXpRTjCnoSQZtdlHkAOx4KNNik4hMy83n0+Ftlb7jvV6ZiZWMpnEZHhjBp6hQVh8gWd/iKNPzlPF7iyA2g==", + "path": "system.text.regularexpressions/4.1.0", + "hashPath": "system.text.regularexpressions.4.1.0.nupkg.sha512" + }, + "System.Threading/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-N+3xqIcg3VDKyjwwCGaZ9HawG9aC6cSDI+s7ROma310GQo8vilFZa86hqKppwTHleR/G0sfOzhvgnUxWCR/DrQ==", + "path": "system.threading/4.0.11", + "hashPath": "system.threading.4.0.11.nupkg.sha512" + }, + "System.Threading.Tasks/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-k1S4Gc6IGwtHGT8188RSeGaX86Qw/wnrgNLshJvsdNUOPP9etMmo8S07c+UlOAx4K/xLuN9ivA1bD0LVurtIxQ==", + "path": "system.threading.tasks/4.0.11", + "hashPath": "system.threading.tasks.4.0.11.nupkg.sha512" + }, + "System.Threading.Tasks.Extensions/4.0.0": { + "type": "package", + "serviceable": true, + "sha512": "sha512-pH4FZDsZQ/WmgJtN4LWYmRdJAEeVkyriSwrv2Teoe5FOU0Yxlb6II6GL8dBPOfRmutHGATduj3ooMt7dJ2+i+w==", + "path": "system.threading.tasks.extensions/4.0.0", + "hashPath": "system.threading.tasks.extensions.4.0.0.nupkg.sha512" + }, + "System.Xml.ReaderWriter/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-ZIiLPsf67YZ9zgr31vzrFaYQqxRPX9cVHjtPSnmx4eN6lbS/yEyYNr2vs1doGDEscF0tjCZFsk9yUg1sC9e8tg==", + "path": "system.xml.readerwriter/4.0.11", + "hashPath": "system.xml.readerwriter.4.0.11.nupkg.sha512" + }, + "System.Xml.XDocument/4.0.11": { + "type": "package", + "serviceable": true, + "sha512": "sha512-Mk2mKmPi0nWaoiYeotq1dgeNK1fqWh61+EK+w4Wu8SWuTYLzpUnschb59bJtGywaPq7SmTuPf44wrXRwbIrukg==", + "path": "system.xml.xdocument/4.0.11", + "hashPath": "system.xml.xdocument.4.0.11.nupkg.sha512" + }, + "Microsoft.Python.Analysis.Engine/1.0.0": { + "type": "project", + "serviceable": false, + "sha512": "" + } + } +} \ No newline at end of file diff --git a/ptvs/Microsoft.PythonTools.VsCode.runtimeconfig.json b/ptvs/Microsoft.PythonTools.VsCode.runtimeconfig.json new file mode 100644 index 000000000000..7539019b10dd --- /dev/null +++ b/ptvs/Microsoft.PythonTools.VsCode.runtimeconfig.json @@ -0,0 +1,9 @@ +{ + "runtimeOptions": { + "tfm": "netcoreapp2.0", + "framework": { + "name": "Microsoft.NETCore.App", + "version": "2.0.0" + } + } +} \ No newline at end of file diff --git a/src/client/common/installer/productInstaller.ts b/src/client/common/installer/productInstaller.ts index c75be6451bc8..c404906d6cb5 100644 --- a/src/client/common/installer/productInstaller.ts +++ b/src/client/common/installer/productInstaller.ts @@ -165,8 +165,10 @@ class LinterInstaller extends BaseInstaller { const lm = this.serviceContainer.get(ILinterManager); if (response === disableAllLinting) { await lm.enableLintingAsync(false); + return InstallerResponse.Disabled; } else if (response === disableThisLinter) { await lm.getLinterInfo(product).enableAsync(false); + return InstallerResponse.Disabled; } return InstallerResponse.Ignore; } diff --git a/src/test/.vscode/settings.json b/src/test/.vscode/settings.json index d0a948e74069..245e42af2f13 100644 --- a/src/test/.vscode/settings.json +++ b/src/test/.vscode/settings.json @@ -22,5 +22,6 @@ "python.linting.pylamaEnabled": false, "python.linting.mypyEnabled": false, "python.formatting.provider": "yapf", - "python.linting.pylintUseMinimalCheckers": false + "python.linting.pylintUseMinimalCheckers": false, + "python.pythonPath": "python" } \ No newline at end of file diff --git a/src/testMultiRootWkspc/workspace1/.vscode/settings.json b/src/testMultiRootWkspc/workspace1/.vscode/settings.json index f4d89e3bc0e4..b7686c301c3f 100644 --- a/src/testMultiRootWkspc/workspace1/.vscode/settings.json +++ b/src/testMultiRootWkspc/workspace1/.vscode/settings.json @@ -1,5 +1,11 @@ { - "python.linting.enabled": false, - "python.linting.flake8Enabled": true, - "python.linting.pylintEnabled": false + "python.linting.enabled": true, + "python.linting.flake8Enabled": false, + "python.linting.pylintEnabled": true, + "python.linting.pylintUseMinimalCheckers": false, + "python.linting.pylamaEnabled": false, + "python.linting.pydocstyleEnabled": false, + "python.linting.mypyEnabled": false, + "python.linting.pep8Enabled": false, + "python.linting.prospectorEnabled": false } From 18930ea5c417111f296beb598ce9cc5cbcccfd71 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Mar 2018 10:54:39 -0800 Subject: [PATCH 099/103] Revert "PR feedback" This reverts commit d0c7ab5da3de35a1febeed153db21531ee7c18b6. --- ptvs/Microsoft.PythonTools.VsCode.deps.json | 784 ------------------ ...soft.PythonTools.VsCode.runtimeconfig.json | 9 - .../common/installer/productInstaller.ts | 2 - src/test/.vscode/settings.json | 3 +- .../workspace1/.vscode/settings.json | 12 +- 5 files changed, 4 insertions(+), 806 deletions(-) delete mode 100644 ptvs/Microsoft.PythonTools.VsCode.deps.json delete mode 100644 ptvs/Microsoft.PythonTools.VsCode.runtimeconfig.json diff --git a/ptvs/Microsoft.PythonTools.VsCode.deps.json b/ptvs/Microsoft.PythonTools.VsCode.deps.json deleted file mode 100644 index 705ca73ed550..000000000000 --- a/ptvs/Microsoft.PythonTools.VsCode.deps.json +++ /dev/null @@ -1,784 +0,0 @@ -{ - "runtimeTarget": { - "name": ".NETCoreApp,Version=v2.0", - "signature": "e55460407c60b885fc4f3279d5bf56b22b15c0a1" - }, - "compilationOptions": {}, - "targets": { - ".NETCoreApp,Version=v2.0": { - "Microsoft.PythonTools.VsCode/1.0.0": { - "dependencies": { - "MicroBuild.Core": "0.2.0", - "Microsoft.Python.Analysis.Engine": "1.0.0", - "StreamJsonRpc": "1.3.23" - }, - "runtime": { - "Microsoft.PythonTools.VsCode.dll": {} - } - }, - "MicroBuild.Core/0.2.0": {}, - "Microsoft.CSharp/4.0.1": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Dynamic.Runtime": "4.0.11", - "System.Globalization": "4.0.11", - "System.Linq": "4.1.0", - "System.Linq.Expressions": "4.1.0", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Reflection.Extensions": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Reflection.TypeExtensions": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Threading": "4.0.11" - } - }, - "Microsoft.NETCore.Targets/1.0.1": {}, - "Microsoft.VisualStudio.Threading/15.3.20": { - "dependencies": { - "Microsoft.VisualStudio.Validation": "15.3.15" - }, - "runtime": { - "lib/netstandard1.1/Microsoft.VisualStudio.Threading.dll": {} - }, - "resources": { - "lib/netstandard1.1/cs/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "cs" - }, - "lib/netstandard1.1/de/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "de" - }, - "lib/netstandard1.1/es/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "es" - }, - "lib/netstandard1.1/fr/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "fr" - }, - "lib/netstandard1.1/it/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "it" - }, - "lib/netstandard1.1/ja/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "ja" - }, - "lib/netstandard1.1/ko/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "ko" - }, - "lib/netstandard1.1/pl/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "pl" - }, - "lib/netstandard1.1/pt-BR/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "pt-BR" - }, - "lib/netstandard1.1/ru/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "ru" - }, - "lib/netstandard1.1/tr/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "tr" - }, - "lib/netstandard1.1/zh-Hans/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "zh-Hans" - }, - "lib/netstandard1.1/zh-Hant/Microsoft.VisualStudio.Threading.resources.dll": { - "locale": "zh-Hant" - } - } - }, - "Microsoft.VisualStudio.Validation/15.3.15": { - "runtime": { - "lib/netstandard1.0/Microsoft.VisualStudio.Validation.dll": {} - }, - "resources": { - "lib/netstandard1.0/cs/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "cs" - }, - "lib/netstandard1.0/de/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "de" - }, - "lib/netstandard1.0/es/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "es" - }, - "lib/netstandard1.0/fr/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "fr" - }, - "lib/netstandard1.0/it/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "it" - }, - "lib/netstandard1.0/ja/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "ja" - }, - "lib/netstandard1.0/ko/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "ko" - }, - "lib/netstandard1.0/pl/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "pl" - }, - "lib/netstandard1.0/pt-BR/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "pt-BR" - }, - "lib/netstandard1.0/ru/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "ru" - }, - "lib/netstandard1.0/tr/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "tr" - }, - "lib/netstandard1.0/zh-Hans/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "zh-Hans" - }, - "lib/netstandard1.0/zh-Hant/Microsoft.VisualStudio.Validation.resources.dll": { - "locale": "zh-Hant" - } - } - }, - "Newtonsoft.Json/9.0.1": { - "dependencies": { - "Microsoft.CSharp": "4.0.1", - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Dynamic.Runtime": "4.0.11", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.Linq": "4.1.0", - "System.Linq.Expressions": "4.1.0", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Reflection.Extensions": "4.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.Serialization.Primitives": "4.1.1", - "System.Text.Encoding": "4.0.11", - "System.Text.Encoding.Extensions": "4.0.11", - "System.Text.RegularExpressions": "4.1.0", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11", - "System.Xml.ReaderWriter": "4.0.11", - "System.Xml.XDocument": "4.0.11" - }, - "runtime": { - "lib/netstandard1.0/Newtonsoft.Json.dll": {} - } - }, - "StreamJsonRpc/1.3.23": { - "dependencies": { - "Microsoft.VisualStudio.Threading": "15.3.20", - "Newtonsoft.Json": "9.0.1" - }, - "runtime": { - "lib/netstandard1.1/StreamJsonRpc.dll": {} - }, - "resources": { - "lib/netstandard1.1/cs/StreamJsonRpc.resources.dll": { - "locale": "cs" - }, - "lib/netstandard1.1/de/StreamJsonRpc.resources.dll": { - "locale": "de" - }, - "lib/netstandard1.1/es/StreamJsonRpc.resources.dll": { - "locale": "es" - }, - "lib/netstandard1.1/fr/StreamJsonRpc.resources.dll": { - "locale": "fr" - }, - "lib/netstandard1.1/it/StreamJsonRpc.resources.dll": { - "locale": "it" - }, - "lib/netstandard1.1/ja/StreamJsonRpc.resources.dll": { - "locale": "ja" - }, - "lib/netstandard1.1/ko/StreamJsonRpc.resources.dll": { - "locale": "ko" - }, - "lib/netstandard1.1/pl/StreamJsonRpc.resources.dll": { - "locale": "pl" - }, - "lib/netstandard1.1/pt-BR/StreamJsonRpc.resources.dll": { - "locale": "pt-BR" - }, - "lib/netstandard1.1/ru/StreamJsonRpc.resources.dll": { - "locale": "ru" - }, - "lib/netstandard1.1/tr/StreamJsonRpc.resources.dll": { - "locale": "tr" - }, - "lib/netstandard1.1/zh-Hans/StreamJsonRpc.resources.dll": { - "locale": "zh-Hans" - }, - "lib/netstandard1.1/zh-Hant/StreamJsonRpc.resources.dll": { - "locale": "zh-Hant" - } - } - }, - "System.Collections/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Debug/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Tools/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Dynamic.Runtime/4.0.11": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Globalization": "4.0.11", - "System.Linq": "4.1.0", - "System.Linq.Expressions": "4.1.0", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Reflection.Emit": "4.0.1", - "System.Reflection.Emit.ILGeneration": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Reflection.TypeExtensions": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Threading": "4.0.11" - } - }, - "System.Globalization/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.IO/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.IO.FileSystem/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.IO": "4.1.0", - "System.IO.FileSystem.Primitives": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.IO.FileSystem.Primitives/4.0.1": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Linq/4.1.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0" - } - }, - "System.Linq.Expressions/4.1.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.Linq": "4.1.0", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Reflection.Emit": "4.0.1", - "System.Reflection.Emit.ILGeneration": "4.0.1", - "System.Reflection.Emit.Lightweight": "4.0.1", - "System.Reflection.Extensions": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Reflection.TypeExtensions": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Threading": "4.0.11" - } - }, - "System.ObjectModel/4.0.12": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Threading": "4.0.11" - } - }, - "System.Reflection/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.IO": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Emit/4.0.1": { - "dependencies": { - "System.IO": "4.1.0", - "System.Reflection": "4.1.0", - "System.Reflection.Emit.ILGeneration": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Emit.ILGeneration/4.0.1": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Emit.Lightweight/4.0.1": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Reflection.Emit.ILGeneration": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Extensions/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Primitives/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.TypeExtensions/4.1.0": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Resources.ResourceManager/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Globalization": "4.0.11", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "System.Runtime.Extensions/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.Handles/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.InteropServices/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Handles": "4.0.1" - } - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "dependencies": { - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Text.Encoding/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Text.Encoding.Extensions/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11" - } - }, - "System.Text.RegularExpressions/4.1.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Globalization": "4.0.11", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Threading": "4.0.11" - } - }, - "System.Threading/4.0.11": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Threading.Tasks/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Threading.Tasks.Extensions/4.0.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Xml.ReaderWriter/4.0.11": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.IO.FileSystem": "4.0.1", - "System.IO.FileSystem.Primitives": "4.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Text.Encoding.Extensions": "4.0.11", - "System.Text.RegularExpressions": "4.1.0", - "System.Threading.Tasks": "4.0.11", - "System.Threading.Tasks.Extensions": "4.0.0" - } - }, - "System.Xml.XDocument/4.0.11": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Diagnostics.Tools": "4.0.1", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.Reflection": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading": "4.0.11", - "System.Xml.ReaderWriter": "4.0.11" - } - }, - "Microsoft.Python.Analysis.Engine/1.0.0": { - "dependencies": { - "MicroBuild.Core": "0.2.0" - }, - "runtime": { - "Microsoft.Python.Analysis.Engine.dll": {} - } - } - } - }, - "libraries": { - "Microsoft.PythonTools.VsCode/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - }, - "MicroBuild.Core/0.2.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-7AwhhKIApmsf7He0m9t49i/0s42YYIu9sf4kHsl3zbzhWyUUwS27cmUKo+Zp6wZ7iGDGoGInDrupUqJI1kaMEw==", - "path": "microbuild.core/0.2.0", - "hashPath": "microbuild.core.0.2.0.nupkg.sha512" - }, - "Microsoft.CSharp/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-17h8b5mXa87XYKrrVqdgZ38JefSUqLChUQpXgSnpzsM0nDOhE40FTeNWOJ/YmySGV6tG6T8+hjz6vxbknHJr6A==", - "path": "microsoft.csharp/4.0.1", - "hashPath": "microsoft.csharp.4.0.1.nupkg.sha512" - }, - "Microsoft.NETCore.Targets/1.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-rkn+fKobF/cbWfnnfBOQHKVKIOpxMZBvlSHkqDWgBpwGDcLRduvs3D9OLGeV6GWGvVwNlVi2CBbTjuPmtHvyNw==", - "path": "microsoft.netcore.targets/1.0.1", - "hashPath": "microsoft.netcore.targets.1.0.1.nupkg.sha512" - }, - "Microsoft.VisualStudio.Threading/15.3.20": { - "type": "package", - "serviceable": true, - "sha512": "sha512-h/EPnlSJIKBcIPQVTzbWvviKE/avZY5LI5+SiLY5kwQJ+MXyPEoK7ACSFhTMJB7hwGrwPFNXWSc4e9+5Pi3WQQ==", - "path": "microsoft.visualstudio.threading/15.3.20", - "hashPath": "microsoft.visualstudio.threading.15.3.20.nupkg.sha512" - }, - "Microsoft.VisualStudio.Validation/15.3.15": { - "type": "package", - "serviceable": true, - "sha512": "sha512-jRr/QTclHcJmMm1JXr9yYI0NDKvDT7+ciQeNYdAo6SMSk0eH/RJY/JTgPqnRYyCfG9Te1CuRktCcUrV4OGmBYA==", - "path": "microsoft.visualstudio.validation/15.3.15", - "hashPath": "microsoft.visualstudio.validation.15.3.15.nupkg.sha512" - }, - "Newtonsoft.Json/9.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-U82mHQSKaIk+lpSVCbWYKNavmNH1i5xrExDEquU1i6I5pV6UMOqRnJRSlKO3cMPfcpp0RgDY+8jUXHdQ4IfXvw==", - "path": "newtonsoft.json/9.0.1", - "hashPath": "newtonsoft.json.9.0.1.nupkg.sha512" - }, - "StreamJsonRpc/1.3.23": { - "type": "package", - "serviceable": true, - "sha512": "sha512-15yPYvp0HFnBMJbo2fLon+Ssa7VaUSeNpI2nwNCZ5zxZ4Jb4qsHghfwo9O2GdK9LBu037+KJHTK8LPRt0fxDOg==", - "path": "streamjsonrpc/1.3.23", - "hashPath": "streamjsonrpc.1.3.23.nupkg.sha512" - }, - "System.Collections/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-YUJGz6eFKqS0V//mLt25vFGrrCvOnsXjlvFQs+KimpwNxug9x0Pzy4PlFMU3Q2IzqAa9G2L4LsK3+9vCBK7oTg==", - "path": "system.collections/4.0.11", - "hashPath": "system.collections.4.0.11.nupkg.sha512" - }, - "System.Diagnostics.Debug/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-w5U95fVKHY4G8ASs/K5iK3J5LY+/dLFd4vKejsnI/ZhBsWS9hQakfx3Zr7lRWKg4tAw9r4iktyvsTagWkqYCiw==", - "path": "system.diagnostics.debug/4.0.11", - "hashPath": "system.diagnostics.debug.4.0.11.nupkg.sha512" - }, - "System.Diagnostics.Tools/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-xBfJ8pnd4C17dWaC9FM6aShzbJcRNMChUMD42I6772KGGrqaFdumwhn9OdM68erj1ueNo3xdQ1EwiFjK5k8p0g==", - "path": "system.diagnostics.tools/4.0.1", - "hashPath": "system.diagnostics.tools.4.0.1.nupkg.sha512" - }, - "System.Dynamic.Runtime/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-db34f6LHYM0U0JpE+sOmjar27BnqTVkbLJhgfwMpTdgTigG/Hna3m2MYVwnFzGGKnEJk2UXFuoVTr8WUbU91/A==", - "path": "system.dynamic.runtime/4.0.11", - "hashPath": "system.dynamic.runtime.4.0.11.nupkg.sha512" - }, - "System.Globalization/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-B95h0YLEL2oSnwF/XjqSWKnwKOy/01VWkNlsCeMTFJLLabflpGV26nK164eRs5GiaRSBGpOxQ3pKoSnnyZN5pg==", - "path": "system.globalization/4.0.11", - "hashPath": "system.globalization.4.0.11.nupkg.sha512" - }, - "System.IO/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-3KlTJceQc3gnGIaHZ7UBZO26SHL1SHE4ddrmiwumFnId+CEHP+O8r386tZKaE6zlk5/mF8vifMBzHj9SaXN+mQ==", - "path": "system.io/4.1.0", - "hashPath": "system.io.4.1.0.nupkg.sha512" - }, - "System.IO.FileSystem/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-IBErlVq5jOggAD69bg1t0pJcHaDbJbWNUZTPI96fkYWzwYbN6D9wRHMULLDd9dHsl7C2YsxXL31LMfPI1SWt8w==", - "path": "system.io.filesystem/4.0.1", - "hashPath": "system.io.filesystem.4.0.1.nupkg.sha512" - }, - "System.IO.FileSystem.Primitives/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-kWkKD203JJKxJeE74p8aF8y4Qc9r9WQx4C0cHzHPrY3fv/L/IhWnyCHaFJ3H1QPOH6A93whlQ2vG5nHlBDvzWQ==", - "path": "system.io.filesystem.primitives/4.0.1", - "hashPath": "system.io.filesystem.primitives.4.0.1.nupkg.sha512" - }, - "System.Linq/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-bQ0iYFOQI0nuTnt+NQADns6ucV4DUvMdwN6CbkB1yj8i7arTGiTN5eok1kQwdnnNWSDZfIUySQY+J3d5KjWn0g==", - "path": "system.linq/4.1.0", - "hashPath": "system.linq.4.1.0.nupkg.sha512" - }, - "System.Linq.Expressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-I+y02iqkgmCAyfbqOmSDOgqdZQ5tTj80Akm5BPSS8EeB0VGWdy6X1KCoYe8Pk6pwDoAKZUOdLVxnTJcExiv5zw==", - "path": "system.linq.expressions/4.1.0", - "hashPath": "system.linq.expressions.4.1.0.nupkg.sha512" - }, - "System.ObjectModel/4.0.12": { - "type": "package", - "serviceable": true, - "sha512": "sha512-tAgJM1xt3ytyMoW4qn4wIqgJYm7L7TShRZG4+Q4Qsi2PCcj96pXN7nRywS9KkB3p/xDUjc2HSwP9SROyPYDYKQ==", - "path": "system.objectmodel/4.0.12", - "hashPath": "system.objectmodel.4.0.12.nupkg.sha512" - }, - "System.Reflection/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-JCKANJ0TI7kzoQzuwB/OoJANy1Lg338B6+JVacPl4TpUwi3cReg3nMLplMq2uqYfHFQpKIlHAUVAJlImZz/4ng==", - "path": "system.reflection/4.1.0", - "hashPath": "system.reflection.4.1.0.nupkg.sha512" - }, - "System.Reflection.Emit/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-P2wqAj72fFjpP6wb9nSfDqNBMab+2ovzSDzUZK7MVIm54tBJEPr9jWfSjjoTpPwj1LeKcmX3vr0ttyjSSFM47g==", - "path": "system.reflection.emit/4.0.1", - "hashPath": "system.reflection.emit.4.0.1.nupkg.sha512" - }, - "System.Reflection.Emit.ILGeneration/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Ov6dU8Bu15Bc7zuqttgHF12J5lwSWyTf1S+FJouUXVMSqImLZzYaQ+vRr1rQ0OZ0HqsrwWl4dsKHELckQkVpgA==", - "path": "system.reflection.emit.ilgeneration/4.0.1", - "hashPath": "system.reflection.emit.ilgeneration.4.0.1.nupkg.sha512" - }, - "System.Reflection.Emit.Lightweight/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-sSzHHXueZ5Uh0OLpUQprhr+ZYJrLPA2Cmr4gn0wj9+FftNKXx8RIMKvO9qnjk2ebPYUjZ+F2ulGdPOsvj+MEjA==", - "path": "system.reflection.emit.lightweight/4.0.1", - "hashPath": "system.reflection.emit.lightweight.4.0.1.nupkg.sha512" - }, - "System.Reflection.Extensions/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-GYrtRsZcMuHF3sbmRHfMYpvxZoIN2bQGrYGerUiWLEkqdEUQZhH3TRSaC/oI4wO0II1RKBPlpIa1TOMxIcOOzQ==", - "path": "system.reflection.extensions/4.0.1", - "hashPath": "system.reflection.extensions.4.0.1.nupkg.sha512" - }, - "System.Reflection.Primitives/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-4inTox4wTBaDhB7V3mPvp9XlCbeGYWVEM9/fXALd52vNEAVisc1BoVWQPuUuD0Ga//dNbA/WeMy9u9mzLxGTHQ==", - "path": "system.reflection.primitives/4.0.1", - "hashPath": "system.reflection.primitives.4.0.1.nupkg.sha512" - }, - "System.Reflection.TypeExtensions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-tsQ/ptQ3H5FYfON8lL4MxRk/8kFyE0A+tGPXmVP967cT/gzLHYxIejIYSxp4JmIeFHVP78g/F2FE1mUUTbDtrg==", - "path": "system.reflection.typeextensions/4.1.0", - "hashPath": "system.reflection.typeextensions.4.1.0.nupkg.sha512" - }, - "System.Resources.ResourceManager/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-TxwVeUNoTgUOdQ09gfTjvW411MF+w9MBYL7AtNVc+HtBCFlutPLhUCdZjNkjbhj3bNQWMdHboF0KIWEOjJssbA==", - "path": "system.resources.resourcemanager/4.0.1", - "hashPath": "system.resources.resourcemanager.4.0.1.nupkg.sha512" - }, - "System.Runtime/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-v6c/4Yaa9uWsq+JMhnOFewrYkgdNHNG2eMKuNqRn8P733rNXeRCGvV5FkkjBXn2dbVkPXOsO0xjsEeM1q2zC0g==", - "path": "system.runtime/4.1.0", - "hashPath": "system.runtime.4.1.0.nupkg.sha512" - }, - "System.Runtime.Extensions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-CUOHjTT/vgP0qGW22U4/hDlOqXmcPq5YicBaXdUR2UiUoLwBT+olO6we4DVbq57jeX5uXH2uerVZhf0qGj+sVQ==", - "path": "system.runtime.extensions/4.1.0", - "hashPath": "system.runtime.extensions.4.1.0.nupkg.sha512" - }, - "System.Runtime.Handles/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-nCJvEKguXEvk2ymk1gqj625vVnlK3/xdGzx0vOKicQkoquaTBJTP13AIYkocSUwHCLNBwUbXTqTWGDxBTWpt7g==", - "path": "system.runtime.handles/4.0.1", - "hashPath": "system.runtime.handles.4.0.1.nupkg.sha512" - }, - "System.Runtime.InteropServices/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-16eu3kjHS633yYdkjwShDHZLRNMKVi/s0bY8ODiqJ2RfMhDMAwxZaUaWVnZ2P71kr/or+X9o/xFWtNqz8ivieQ==", - "path": "system.runtime.interopservices/4.1.0", - "hashPath": "system.runtime.interopservices.4.1.0.nupkg.sha512" - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-HZ6Du5QrTG8MNJbf4e4qMO3JRAkIboGT5Fk804uZtg3Gq516S7hAqTm2UZKUHa7/6HUGdVy3AqMQKbns06G/cg==", - "path": "system.runtime.serialization.primitives/4.1.1", - "hashPath": "system.runtime.serialization.primitives.4.1.1.nupkg.sha512" - }, - "System.Text.Encoding/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-U3gGeMlDZXxCEiY4DwVLSacg+DFWCvoiX+JThA/rvw37Sqrku7sEFeVBBBMBnfB6FeZHsyDx85HlKL19x0HtZA==", - "path": "system.text.encoding/4.0.11", - "hashPath": "system.text.encoding.4.0.11.nupkg.sha512" - }, - "System.Text.Encoding.Extensions/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-jtbiTDtvfLYgXn8PTfWI+SiBs51rrmO4AAckx4KR6vFK9Wzf6tI8kcRdsYQNwriUeQ1+CtQbM1W4cMbLXnj/OQ==", - "path": "system.text.encoding.extensions/4.0.11", - "hashPath": "system.text.encoding.extensions.4.0.11.nupkg.sha512" - }, - "System.Text.RegularExpressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-i88YCXpRTjCnoSQZtdlHkAOx4KNNik4hMy83n0+Ftlb7jvV6ZiZWMpnEZHhjBp6hQVh8gWd/iKNPzlPF7iyA2g==", - "path": "system.text.regularexpressions/4.1.0", - "hashPath": "system.text.regularexpressions.4.1.0.nupkg.sha512" - }, - "System.Threading/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-N+3xqIcg3VDKyjwwCGaZ9HawG9aC6cSDI+s7ROma310GQo8vilFZa86hqKppwTHleR/G0sfOzhvgnUxWCR/DrQ==", - "path": "system.threading/4.0.11", - "hashPath": "system.threading.4.0.11.nupkg.sha512" - }, - "System.Threading.Tasks/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-k1S4Gc6IGwtHGT8188RSeGaX86Qw/wnrgNLshJvsdNUOPP9etMmo8S07c+UlOAx4K/xLuN9ivA1bD0LVurtIxQ==", - "path": "system.threading.tasks/4.0.11", - "hashPath": "system.threading.tasks.4.0.11.nupkg.sha512" - }, - "System.Threading.Tasks.Extensions/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-pH4FZDsZQ/WmgJtN4LWYmRdJAEeVkyriSwrv2Teoe5FOU0Yxlb6II6GL8dBPOfRmutHGATduj3ooMt7dJ2+i+w==", - "path": "system.threading.tasks.extensions/4.0.0", - "hashPath": "system.threading.tasks.extensions.4.0.0.nupkg.sha512" - }, - "System.Xml.ReaderWriter/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-ZIiLPsf67YZ9zgr31vzrFaYQqxRPX9cVHjtPSnmx4eN6lbS/yEyYNr2vs1doGDEscF0tjCZFsk9yUg1sC9e8tg==", - "path": "system.xml.readerwriter/4.0.11", - "hashPath": "system.xml.readerwriter.4.0.11.nupkg.sha512" - }, - "System.Xml.XDocument/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Mk2mKmPi0nWaoiYeotq1dgeNK1fqWh61+EK+w4Wu8SWuTYLzpUnschb59bJtGywaPq7SmTuPf44wrXRwbIrukg==", - "path": "system.xml.xdocument/4.0.11", - "hashPath": "system.xml.xdocument.4.0.11.nupkg.sha512" - }, - "Microsoft.Python.Analysis.Engine/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - } - } -} \ No newline at end of file diff --git a/ptvs/Microsoft.PythonTools.VsCode.runtimeconfig.json b/ptvs/Microsoft.PythonTools.VsCode.runtimeconfig.json deleted file mode 100644 index 7539019b10dd..000000000000 --- a/ptvs/Microsoft.PythonTools.VsCode.runtimeconfig.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "runtimeOptions": { - "tfm": "netcoreapp2.0", - "framework": { - "name": "Microsoft.NETCore.App", - "version": "2.0.0" - } - } -} \ No newline at end of file diff --git a/src/client/common/installer/productInstaller.ts b/src/client/common/installer/productInstaller.ts index c404906d6cb5..c75be6451bc8 100644 --- a/src/client/common/installer/productInstaller.ts +++ b/src/client/common/installer/productInstaller.ts @@ -165,10 +165,8 @@ class LinterInstaller extends BaseInstaller { const lm = this.serviceContainer.get(ILinterManager); if (response === disableAllLinting) { await lm.enableLintingAsync(false); - return InstallerResponse.Disabled; } else if (response === disableThisLinter) { await lm.getLinterInfo(product).enableAsync(false); - return InstallerResponse.Disabled; } return InstallerResponse.Ignore; } diff --git a/src/test/.vscode/settings.json b/src/test/.vscode/settings.json index 245e42af2f13..d0a948e74069 100644 --- a/src/test/.vscode/settings.json +++ b/src/test/.vscode/settings.json @@ -22,6 +22,5 @@ "python.linting.pylamaEnabled": false, "python.linting.mypyEnabled": false, "python.formatting.provider": "yapf", - "python.linting.pylintUseMinimalCheckers": false, - "python.pythonPath": "python" + "python.linting.pylintUseMinimalCheckers": false } \ No newline at end of file diff --git a/src/testMultiRootWkspc/workspace1/.vscode/settings.json b/src/testMultiRootWkspc/workspace1/.vscode/settings.json index b7686c301c3f..f4d89e3bc0e4 100644 --- a/src/testMultiRootWkspc/workspace1/.vscode/settings.json +++ b/src/testMultiRootWkspc/workspace1/.vscode/settings.json @@ -1,11 +1,5 @@ { - "python.linting.enabled": true, - "python.linting.flake8Enabled": false, - "python.linting.pylintEnabled": true, - "python.linting.pylintUseMinimalCheckers": false, - "python.linting.pylamaEnabled": false, - "python.linting.pydocstyleEnabled": false, - "python.linting.mypyEnabled": false, - "python.linting.pep8Enabled": false, - "python.linting.prospectorEnabled": false + "python.linting.enabled": false, + "python.linting.flake8Enabled": true, + "python.linting.pylintEnabled": false } From 27fced6e72aba3ef7e00d6950b6468c19648d856 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 7 Mar 2018 10:55:16 -0800 Subject: [PATCH 100/103] PR feedback --- src/client/common/installer/productInstaller.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client/common/installer/productInstaller.ts b/src/client/common/installer/productInstaller.ts index c75be6451bc8..c404906d6cb5 100644 --- a/src/client/common/installer/productInstaller.ts +++ b/src/client/common/installer/productInstaller.ts @@ -165,8 +165,10 @@ class LinterInstaller extends BaseInstaller { const lm = this.serviceContainer.get(ILinterManager); if (response === disableAllLinting) { await lm.enableLintingAsync(false); + return InstallerResponse.Disabled; } else if (response === disableThisLinter) { await lm.getLinterInfo(product).enableAsync(false); + return InstallerResponse.Disabled; } return InstallerResponse.Ignore; } From 51136004079bcbd8671f82563d19decd3b958ceb Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 20 Mar 2018 13:15:13 -0700 Subject: [PATCH 101/103] Fix end of comment completion and operator handling --- .gitignore | 1 + src/client/extension.ts | 50 ++++++++++++----------- src/client/language/tokenizer.ts | 8 ++-- src/client/providers/providerUtilities.ts | 15 ++++++- src/test/autocomplete/base.test.ts | 5 ++- src/test/language/tokenizer.test.ts | 4 +- tslint.json | 5 ++- 7 files changed, 53 insertions(+), 35 deletions(-) diff --git a/.gitignore b/.gitignore index 052afbc54035..cc941e968ccb 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ coverage/ .venv pythonFiles/experimental/ptvsd/** debug_coverage*/** +analysis/** diff --git a/src/client/extension.ts b/src/client/extension.ts index 4788030e98de..1a2eb6b7cb19 100644 --- a/src/client/extension.ts +++ b/src/client/extension.ts @@ -6,10 +6,12 @@ if ((Reflect as any).metadata === undefined) { require('reflect-metadata'); } import { Container } from 'inversify'; -import * as vscode from 'vscode'; -import { Disposable, Memento, OutputChannel, window } from 'vscode'; +import { + debug, Disposable, DocumentFilter, ExtensionContext, + extensions, IndentAction, languages, Memento, + OutputChannel, window +} from 'vscode'; import { PythonSettings } from './common/configSettings'; -import * as settings from './common/configSettings'; import { STANDARD_OUTPUT_CHANNEL } from './common/constants'; import { FeatureDeprecationManager } from './common/featureDeprecationManager'; import { createDeferred } from './common/helpers'; @@ -61,12 +63,12 @@ import * as tests from './unittests/main'; import { registerTypes as unitTestsRegisterTypes } from './unittests/serviceRegistry'; import { WorkspaceSymbols } from './workspaceSymbols/main'; -const PYTHON: vscode.DocumentFilter = { language: 'python' }; +const PYTHON: DocumentFilter = { language: 'python' }; const activationDeferred = createDeferred(); export const activated = activationDeferred.promise; // tslint:disable-next-line:max-func-body-length -export async function activate(context: vscode.ExtensionContext) { +export async function activate(context: ExtensionContext) { const cont = new Container(); const serviceManager = new ServiceManager(cont); const serviceContainer = new ServiceContainer(cont); @@ -95,7 +97,7 @@ export async function activate(context: vscode.ExtensionContext) { serviceManager.get(ICodeExecutionManager).registerCommands(); const persistentStateFactory = serviceManager.get(IPersistentStateFactory); - const pythonSettings = settings.PythonSettings.getInstance(); + const pythonSettings = PythonSettings.getInstance(); // tslint:disable-next-line:no-floating-promises sendStartupTelemetry(activated, serviceContainer); @@ -125,60 +127,60 @@ export async function activate(context: vscode.ExtensionContext) { // Enable indentAction // tslint:disable-next-line:no-non-null-assertion - vscode.languages.setLanguageConfiguration(PYTHON.language!, { + languages.setLanguageConfiguration(PYTHON.language!, { onEnterRules: [ { beforeText: /^\s*(?:def|class|for|if|elif|else|while|try|with|finally|except|async)\b.*/, - action: { indentAction: vscode.IndentAction.Indent } + action: { indentAction: IndentAction.Indent } }, { beforeText: /^\s*#.*/, afterText: /.+$/, - action: { indentAction: vscode.IndentAction.None, appendText: '# ' } + action: { indentAction: IndentAction.None, appendText: '# ' } }, { beforeText: /^\s+(continue|break|return)\b.*/, afterText: /\s+$/, - action: { indentAction: vscode.IndentAction.Outdent } + action: { indentAction: IndentAction.Outdent } } ] }); context.subscriptions.push(jediFactory); - context.subscriptions.push(vscode.languages.registerRenameProvider(PYTHON, new PythonRenameProvider(serviceContainer))); + context.subscriptions.push(languages.registerRenameProvider(PYTHON, new PythonRenameProvider(serviceContainer))); const definitionProvider = new PythonDefinitionProvider(jediFactory); - context.subscriptions.push(vscode.languages.registerDefinitionProvider(PYTHON, definitionProvider)); - context.subscriptions.push(vscode.languages.registerHoverProvider(PYTHON, new PythonHoverProvider(jediFactory))); - context.subscriptions.push(vscode.languages.registerReferenceProvider(PYTHON, new PythonReferenceProvider(jediFactory))); - context.subscriptions.push(vscode.languages.registerCompletionItemProvider(PYTHON, new PythonCompletionItemProvider(jediFactory, serviceContainer), '.')); - context.subscriptions.push(vscode.languages.registerCodeLensProvider(PYTHON, serviceContainer.get(IShebangCodeLensProvider))); + context.subscriptions.push(languages.registerDefinitionProvider(PYTHON, definitionProvider)); + context.subscriptions.push(languages.registerHoverProvider(PYTHON, new PythonHoverProvider(jediFactory))); + context.subscriptions.push(languages.registerReferenceProvider(PYTHON, new PythonReferenceProvider(jediFactory))); + context.subscriptions.push(languages.registerCompletionItemProvider(PYTHON, new PythonCompletionItemProvider(jediFactory, serviceContainer), '.')); + context.subscriptions.push(languages.registerCodeLensProvider(PYTHON, serviceContainer.get(IShebangCodeLensProvider))); const symbolProvider = new PythonSymbolProvider(jediFactory); - context.subscriptions.push(vscode.languages.registerDocumentSymbolProvider(PYTHON, symbolProvider)); + context.subscriptions.push(languages.registerDocumentSymbolProvider(PYTHON, symbolProvider)); if (pythonSettings.devOptions.indexOf('DISABLE_SIGNATURE') === -1) { - context.subscriptions.push(vscode.languages.registerSignatureHelpProvider(PYTHON, new PythonSignatureProvider(jediFactory), '(', ',')); + context.subscriptions.push(languages.registerSignatureHelpProvider(PYTHON, new PythonSignatureProvider(jediFactory), '(', ',')); } if (pythonSettings.formatting.provider !== 'none') { const formatProvider = new PythonFormattingEditProvider(context, serviceContainer); - context.subscriptions.push(vscode.languages.registerDocumentFormattingEditProvider(PYTHON, formatProvider)); - context.subscriptions.push(vscode.languages.registerDocumentRangeFormattingEditProvider(PYTHON, formatProvider)); + context.subscriptions.push(languages.registerDocumentFormattingEditProvider(PYTHON, formatProvider)); + context.subscriptions.push(languages.registerDocumentRangeFormattingEditProvider(PYTHON, formatProvider)); } const linterProvider = new LinterProvider(context, serviceContainer); context.subscriptions.push(linterProvider); - const jupyterExtension = vscode.extensions.getExtension('donjayamanne.jupyter'); + const jupyterExtension = extensions.getExtension('donjayamanne.jupyter'); const lintingEngine = serviceContainer.get(ILintingEngine); lintingEngine.linkJupiterExtension(jupyterExtension).ignoreErrors(); tests.activate(context, unitTestOutChannel, symbolProvider, serviceContainer); context.subscriptions.push(new WorkspaceSymbols(serviceContainer)); - context.subscriptions.push(vscode.languages.registerOnTypeFormattingEditProvider(PYTHON, new BlockFormatProviders(), ':')); - context.subscriptions.push(vscode.languages.registerOnTypeFormattingEditProvider(PYTHON, new OnEnterFormatter(), '\n')); + context.subscriptions.push(languages.registerOnTypeFormattingEditProvider(PYTHON, new BlockFormatProviders(), ':')); + context.subscriptions.push(languages.registerOnTypeFormattingEditProvider(PYTHON, new OnEnterFormatter(), '\n')); serviceContainer.getAll(IDebugConfigurationProvider).forEach(debugConfig => { - context.subscriptions.push(vscode.debug.registerDebugConfigurationProvider(debugConfig.debugType, debugConfig)); + context.subscriptions.push(debug.registerDebugConfigurationProvider(debugConfig.debugType, debugConfig)); }); activationDeferred.resolve(); diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index ecd382d96541..c481c4201ac0 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -34,10 +34,10 @@ export class Tokenizer implements ITokenizer { // 'not', 'or', 'pass', 'print', 'raise', 'return', 'True', 'try', // 'while', 'with', 'yield' // ]; - private cs: ICharacterStream; + private cs: ICharacterStream = new CharacterStream(''); private tokens: IToken[] = []; private floatRegex = /[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?/; - private mode: TokenizerMode; + private mode = TokenizerMode.Full; constructor() { //this.floatRegex.compile(); @@ -287,7 +287,7 @@ export class Tokenizer implements ITokenizer { } else if (nextChar === Char.Less) { length = this.cs.lookAhead(2) === Char.Equal ? 3 : 2; } else { - length = 1; + length = nextChar === Char.Equal ? 2 : 1; } break; @@ -295,7 +295,7 @@ export class Tokenizer implements ITokenizer { if (nextChar === Char.Greater) { length = this.cs.lookAhead(2) === Char.Equal ? 3 : 2; } else { - length = 1; + length = nextChar === Char.Equal ? 2 : 1; } break; diff --git a/src/client/providers/providerUtilities.ts b/src/client/providers/providerUtilities.ts index 4b113c61eceb..49c6148c0856 100644 --- a/src/client/providers/providerUtilities.ts +++ b/src/client/providers/providerUtilities.ts @@ -13,10 +13,23 @@ export function getDocumentTokens(document: vscode.TextDocument, tokenizeTo: vsc export function isPositionInsideStringOrComment(document: vscode.TextDocument, position: vscode.Position): boolean { const tokenizeTo = position.translate(1, 0); const tokens = getDocumentTokens(document, tokenizeTo, TokenizerMode.CommentsAndStrings); - const index = tokens.getItemContaining(document.offsetAt(position)); + const offset = document.offsetAt(position); + let index = tokens.getItemContaining(offset); if (index >= 0) { const token = tokens.getItemAt(index); return token.type === TokenType.String || token.type === TokenType.Comment; } + if (offset > 0) { + // In case position is at the every end of the comment or unterminated string + index = tokens.getItemContaining(offset - 1); + if (index >= 0) { + const token = tokens.getItemAt(index); + if (token.end === offset) { + if (token.type === TokenType.Comment) { + return true; + } + } + } + } return false; } diff --git a/src/test/autocomplete/base.test.ts b/src/test/autocomplete/base.test.ts index bf07ac4fd783..80bbf755eb5e 100644 --- a/src/test/autocomplete/base.test.ts +++ b/src/test/autocomplete/base.test.ts @@ -195,10 +195,11 @@ suite('Autocomplete', () => { new vscode.Position(3, 0), // false new vscode.Position(4, 2), // false new vscode.Position(4, 8), // false - new vscode.Position(5, 4) // false + new vscode.Position(5, 4), // false + new vscode.Position(6, 10) // false ]; const expected = [ - false, true, false, false, false, false, false, false, false, false + false, true, false, false, false, false, false, false, false, false, false ]; const textDocument = await vscode.workspace.openTextDocument(fileSuppress); await vscode.window.showTextDocument(textDocument); diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index e2a5b3f6defb..1d2bf15d2b7b 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -180,7 +180,7 @@ suite('Language.Tokenizer', () => { }); test('Operators', async () => { const text = '< <> << <<= ' + - '== != > >> >>= ' + + '== != > >> >>= >= <=' + '+ -' + '* ** / /= //=' + '*= += -= **= ' + @@ -188,7 +188,7 @@ suite('Language.Tokenizer', () => { const tokens = new Tokenizer().tokenize(text); const lengths = [ 1, 2, 2, 3, - 2, 2, 1, 2, 3, + 2, 2, 1, 2, 3, 2, 2, 1, 1, 1, 2, 1, 2, 3, 2, 2, 2, 3, diff --git a/tslint.json b/tslint.json index 2746486ce48a..600e28f64075 100644 --- a/tslint.json +++ b/tslint.json @@ -59,6 +59,7 @@ ], "no-unnecessary-type-assertion": false, "no-submodule-imports": false, - "no-redundant-jsdoc": false + "no-redundant-jsdoc": false, + "binary-expression-operand-order": false } -} +} \ No newline at end of file From c7330e376b375e55ea3284cbf2f63df539a81e11 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 20 Mar 2018 13:57:11 -0700 Subject: [PATCH 102/103] Simplify --- src/client/providers/providerUtilities.ts | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/client/providers/providerUtilities.ts b/src/client/providers/providerUtilities.ts index 49c6148c0856..0a4f8274144d 100644 --- a/src/client/providers/providerUtilities.ts +++ b/src/client/providers/providerUtilities.ts @@ -24,11 +24,7 @@ export function isPositionInsideStringOrComment(document: vscode.TextDocument, p index = tokens.getItemContaining(offset - 1); if (index >= 0) { const token = tokens.getItemAt(index); - if (token.end === offset) { - if (token.type === TokenType.Comment) { - return true; - } - } + return token.end === offset && token.type === TokenType.Comment; } } return false; From 01b4bea318113d4764c8599655816915b19f6beb Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 20 Mar 2018 15:12:21 -0700 Subject: [PATCH 103/103] Test typo fix --- src/client/providers/completionSource.ts | 4 ++-- src/test/autocomplete/base.test.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client/providers/completionSource.ts b/src/client/providers/completionSource.ts index 3496f38a6f99..5a2064c9338c 100644 --- a/src/client/providers/completionSource.ts +++ b/src/client/providers/completionSource.ts @@ -61,7 +61,7 @@ export class CompletionSource { const sourceText = `${document.getText(leadingRange)}${itemString}`; const range = new vscode.Range(leadingRange.end, leadingRange.end.translate(0, itemString.length)); - return await this.itemInfoSource.getItemInfoFromText(document.uri, document.fileName, range, sourceText, token); + return this.itemInfoSource.getItemInfoFromText(document.uri, document.fileName, range, sourceText, token); } private async getCompletionResult(document: vscode.TextDocument, position: vscode.Position, token: vscode.CancellationToken) @@ -90,7 +90,7 @@ export class CompletionSource { source: source }; - return await this.jediFactory.getJediProxyHandler(document.uri).sendCommand(cmd, token); + return this.jediFactory.getJediProxyHandler(document.uri).sendCommand(cmd, token); } private toVsCodeCompletions(documentPosition: DocumentPosition, data: proxy.ICompletionResult, resource: vscode.Uri): vscode.CompletionItem[] { diff --git a/src/test/autocomplete/base.test.ts b/src/test/autocomplete/base.test.ts index 80bbf755eb5e..4c4b8fd65992 100644 --- a/src/test/autocomplete/base.test.ts +++ b/src/test/autocomplete/base.test.ts @@ -196,7 +196,7 @@ suite('Autocomplete', () => { new vscode.Position(4, 2), // false new vscode.Position(4, 8), // false new vscode.Position(5, 4), // false - new vscode.Position(6, 10) // false + new vscode.Position(5, 10) // false ]; const expected = [ false, true, false, false, false, false, false, false, false, false, false