diff --git a/.gitignore b/.gitignore
index 9ed7cca..a4e00f0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,7 +19,7 @@ pyShellType.js.map
 *.d.ts
 .pytest_cache
 .vscode/settings.json
-
+python/.vscode/settings.json
 
 ##################################################
 # Default python ignore
diff --git a/index.test.ts b/index.test.ts
index b78a49a..02e1bd5 100644
--- a/index.test.ts
+++ b/index.test.ts
@@ -6,7 +6,8 @@
 // The module 'assert' provides assertion methods from node
 import * as assert from 'assert'
 
-import {PythonEvaluator} from './index'
+import { PythonEvaluator } from './index'
+import { EOL } from 'os';
 
 function isEmpty(obj) {
     return Object.keys(obj).length === 0;
@@ -15,58 +16,70 @@ function isEmpty(obj) {
 suite("python_evaluator Tests", () => {
     let pyEvaluator = new PythonEvaluator()
     let input = {
-      evalCode:"",
-      savedCode: "",
-      filePath: "",
-      usePreviousVariables: false,
-      showGlobalVars: true,
-      default_filter_vars: [],
-      default_filter_types: ["<class 'module'>", "<class 'function'>"]
+        evalCode: "",
+        savedCode: "",
+        filePath: "",
+        usePreviousVariables: false,
+        show_global_vars: true,
+        default_filter_vars: [],
+        default_filter_types: ["<class 'module'>", "<class 'function'>"]
     }
     const pythonStartupTime = 3500
     // python 3.7 has much faster startup time
     // when we drop support for 3.6 we can decrease this
 
-    suiteSetup(function(done){
-        this.timeout(pythonStartupTime+500)
+    suiteSetup(function (done) {
+        this.timeout(pythonStartupTime + 500)
         pyEvaluator.start()
         // wait for for python to start
-        setTimeout(()=>done(), pythonStartupTime)
+        setTimeout(() => done(), pythonStartupTime)
+    })
+
+    setup(function () {
+        pyEvaluator.onPrint = () => { }
+        pyEvaluator.onStderr = () => { }
+        pyEvaluator.onResult = () => { }
     })
 
     test("sanity check: 1+1=2", () => {
-        assert.equal(1+1,2)
+        assert.equal(1 + 1, 2)
     })
 
-    test("returns result", function(done){
-        pyEvaluator.onResult = (result)=>{
+    test("returns result", function (done) {
+        pyEvaluator.onResult = (result) => {
             assert.notEqual(result, null)
             done()
         }
+        pyEvaluator.onStderr = (err: string) => {
+            done(err)
+        }
+        pyEvaluator.onPrint = (msg: string) => {
+            done(msg)
+        }
         input.evalCode = "x"
         pyEvaluator.execCode(input)
     })
 
-    test("arepl_store works", function(done){
-        pyEvaluator.onPrint = (result)=>{
-            assert.strictEqual(result, "3")
+    test("arepl_store works", function (done) {
+        pyEvaluator.onPrint = (result) => {
+            assert.strictEqual(result, "3" + EOL)
         }
 
         input.evalCode = "arepl_store=3"
-        pyEvaluator.onResult = ()=>{}
+        pyEvaluator.onResult = () => { }
         pyEvaluator.execCode(input)
 
         let onSecondRun = false
-        pyEvaluator.onResult = (result)=>{
-            if(result.userErrorMsg){
+        pyEvaluator.onResult = (result) => {
+            if (result.userErrorMsg) {
                 done(result.userErrorMsg)
             }
-            else if(!onSecondRun){
+            else if (!onSecondRun) {
                 input.evalCode = "print(arepl_store)"
                 pyEvaluator.execCode(input)
                 onSecondRun = true
             }
-            else{
+            else {
                 done()
             }
         }
@@ -83,10 +96,10 @@ suite("python_evaluator Tests", () => {
     //     pyEvaluator.execCode(input)
     // })
 
-    test("dump returns result", function(done){
+    test("dump returns result", function (done) {
         let gotDump = false
-        pyEvaluator.onResult = (result)=>{
-            if(gotDump) return
+        pyEvaluator.onResult = (result) => {
+            if (gotDump) return
             assert.notEqual(result, null)
             assert.equal(isEmpty(result.userError), true)
             assert.equal(result.internalError, null)
@@ -100,8 +113,25 @@ suite("python_evaluator Tests", () => {
         pyEvaluator.execCode(input)
     })
 
-    test("returns syntax error when incorrect syntax", function(done){
-        pyEvaluator.onResult = (result)=>{ 
+    test("nothing funky happens if dump called again", function (done) {
+        let gotDump = false
+        pyEvaluator.onResult = (result) => {
+            if (gotDump) return
+            assert.notEqual(result, null)
+            assert.equal(isEmpty(result.userError), true)
+            assert.equal(result.internalError, null)
+            assert.equal(result.userVariables['dump output'], 4)
+            assert.equal(result.caller, '<module>')
+            assert.equal(result.lineno, 1)
+            gotDump = true
+            done()
+        }
+        input.evalCode = "from arepl_dump import dump;dump(4)"
+        pyEvaluator.execCode(input)
+    })
+
+    test("returns syntax error when incorrect syntax", function (done) {
+        pyEvaluator.onResult = (result) => {
             assert.notEqual(result.userError, null)
             assert.equal(result.userError.filename, '<string>')
             assert.equal(result.userError.lineno, '1')
@@ -112,8 +142,8 @@ suite("python_evaluator Tests", () => {
         pyEvaluator.execCode(input)
     })
 
-    test("returns user variables", function(done){
-        pyEvaluator.onResult = (result)=>{ 
+    test("returns user variables", function (done) {
+        pyEvaluator.onResult = (result) => {
             assert.equal(result.userVariables['x'], 1)
             done()
         }
@@ -121,8 +151,8 @@ suite("python_evaluator Tests", () => {
         pyEvaluator.execCode(input)
     })
 
-    test("uses previousRun variables asked", function(done){
-        pyEvaluator.onResult = (result)=>{ 
+    test("uses previousRun variables asked", function (done) {
+        pyEvaluator.onResult = (result) => {
             assert.equal(result.userVariables['y'], 1)
             done()
         }
@@ -132,15 +162,15 @@ suite("python_evaluator Tests", () => {
         input.usePreviousVariables = false
     })
 
-    test("can print stdout", function(done){
+    test("can print stdout", function (done) {
         let hasPrinted = false
-        pyEvaluator.onPrint = (stdout)=>{ 
-            assert.equal(stdout, "hello world")
+        pyEvaluator.onPrint = (stdout) => {
+            assert.equal(stdout, "hello world" + EOL)
             hasPrinted = true
         }
 
         pyEvaluator.onResult = () => {
-            if(!hasPrinted) assert.fail("program has returned result","program should still be printing")
+            if (!hasPrinted) assert.fail("program has returned result", "program should still be printing")
             else done()
         }
 
@@ -148,41 +178,52 @@ suite("python_evaluator Tests", () => {
         pyEvaluator.execCode(input)
     })
 
-    test("can print stderr", function(done){
+    test("can print stdout if no newline", function (done) {
+        let hasPrinted = false
+        pyEvaluator.onPrint = (stdout) => {
+            assert.equal(stdout, "hello world")
+            hasPrinted = true
+        }
+
+        pyEvaluator.onResult = () => {
+            if (!hasPrinted) assert.fail("program has returned result", "program should still be printing")
+            else done()
+        }
+
+        input.evalCode = "print('hello world', end='')"
+        pyEvaluator.execCode(input)
+    })
+
+    test("can print stderr", function (done) {
         let hasLogged = false
-        pyEvaluator.onStderr = (stderr)=>{ 
-            assert.equal(stderr, "hello world\r")
-            // I have nooo clue why the \r is at the end
-            // for some reason python-shell recieves hello world\r\r\n
+        pyEvaluator.onStderr = (stderr) => {
+            assert.equal(stderr, "hello world")
             hasLogged = true
+            done()
         }
 
         pyEvaluator.onResult = (result) => {
-            if(!hasLogged) assert.fail("program has returned result","program should still be logging")
-            else done()
+            setTimeout(() => {
+                if (!hasLogged) assert.fail("program has returned result " + JSON.stringify(result), "program should still be logging")
+            }, 100); //to avoid race conditions wait a bit in case stderr arrives later
         }
 
-        input.evalCode = "import sys;sys.stderr.write('hello world\\r\\n')"
+        input.evalCode = "import sys;sys.stderr.write('hello world')"
         pyEvaluator.execCode(input)
     })
 
-    test("can print multiple lines", function(done){
+    test("can print multiple lines", function (done) {
         let firstPrint = false
-        let secondPrint = false
 
-        pyEvaluator.onPrint = (stdout)=>{ 
-            if(firstPrint){
-                assert.equal(stdout, '2')
-                secondPrint = true
-            }
-            else{
-                assert.equal(stdout, "1")
-                firstPrint = true
-            }
+        pyEvaluator.onPrint = (stdout) => {
+            // not sure why it is doing this.. stdout should be line buffered
+            // so we should get 1 and 2 seperately
+            assert.equal(stdout, '1' + EOL + '2' + EOL)
+            firstPrint = true
         }
 
         pyEvaluator.onResult = () => {
-            if(!secondPrint) assert.fail("program has returned result","program should still be printing")
+            if (!firstPrint) assert.fail("program has returned result", "program should still be printing")
             else done()
         }
 
@@ -190,9 +231,9 @@ suite("python_evaluator Tests", () => {
         pyEvaluator.execCode(input)
     })
 
-    test("returns result after print", function(done){
-        pyEvaluator.onPrint = (stdout)=>{ 
-            assert.equal(stdout, "hello world")
+    test("returns result after print", function (done) {
+        pyEvaluator.onPrint = (stdout) => {
+            assert.equal(stdout, "hello world" + EOL)
             assert.equal(pyEvaluator.executing, true)
         }
 
@@ -205,29 +246,29 @@ suite("python_evaluator Tests", () => {
         pyEvaluator.execCode(input)
     })
 
-    test("can restart", function(done){
+    test("can restart", function (done) {
 
-        this.timeout(this.timeout()+pythonStartupTime)
+        this.timeout(this.timeout() + pythonStartupTime)
 
         assert.equal(pyEvaluator.running, true)
         assert.equal(pyEvaluator.restarting, false)
         assert.equal(pyEvaluator.executing, false)
 
-        pyEvaluator.restart(()=>{
+        pyEvaluator.restart(() => {
             assert.equal(pyEvaluator.running, true)
             assert.equal(pyEvaluator.executing, false)
 
-            setTimeout(()=>{
+            setTimeout(() => {
                 // by now python should be restarted and accepting input
-                pyEvaluator.onResult = ()=>done()
+                pyEvaluator.onResult = () => done()
                 input.evalCode = "x"
                 pyEvaluator.execCode(input)
-            },1500)
+            }, 1500)
         })
     })
 
-    test("strips out unnecessary error info", function(done){
-        pyEvaluator.onResult = (result)=>{
+    test("strips out unnecessary error info", function (done) {
+        pyEvaluator.onResult = (result) => {
             assert.equal(result.userErrorMsg, "Traceback (most recent call last):\n  line 1, in <module>\nNameError: name 'x' is not defined\n")
             done()
         }
@@ -235,8 +276,8 @@ suite("python_evaluator Tests", () => {
         pyEvaluator.execCode(input)
     })
 
-    test("strips out unnecessary error info even with long tracebacks", function(done){
-        pyEvaluator.onResult = (result)=>{
+    test("strips out unnecessary error info even with long tracebacks", function (done) {
+        pyEvaluator.onResult = (result) => {
             // asserting the exact string would result in flaky tests
             // because internal python code could change & the traceback would be different
             // so we just do some generic checks
@@ -250,8 +291,8 @@ suite("python_evaluator Tests", () => {
         pyEvaluator.execCode(input)
     })
 
-    test("strips out unnecessary error info even with multiple tracebacks", function(done){
-        pyEvaluator.onResult = (result)=>{ 
+    test("strips out unnecessary error info even with multiple tracebacks", function (done) {
+        pyEvaluator.onResult = (result) => {
             assert.equal(result.userErrorMsg, `Traceback (most recent call last):
   line 6, in <module>
   line 3, in foo
@@ -278,26 +319,26 @@ except Exception as e:
         pyEvaluator.execCode(input)
     })
 
-    test("prints in real-time", function(done){
+    test("prints in real-time", function (done) {
         let printed = false
 
-        pyEvaluator.onPrint = (stdout)=>{ printed = true }
+        pyEvaluator.onPrint = (stdout) => { printed = true }
         pyEvaluator.onResult = () => { done() }
 
-        setTimeout(()=>{ if(!printed) assert.fail("") }, 25)
+        setTimeout(() => { if (!printed) assert.fail("") }, 25)
 
         input.evalCode = "from time import sleep\nprint('a')\nsleep(.05)\nprint(b)"
         pyEvaluator.execCode(input)
     })
 
-    test("checks syntax", function(done){
-        pyEvaluator.checkSyntax("x=").then(()=>{
+    test("checks syntax", function (done) {
+        pyEvaluator.checkSyntax("x=").then(() => {
             assert.fail("promise should have been rejected")
-        }).catch(()=>{})
+        }).catch(() => { })
 
-        pyEvaluator.checkSyntax("x=1").then(()=>{
+        pyEvaluator.checkSyntax("x=1").then(() => {
             done()
-        }).catch((err)=>{
+        }).catch((err) => {
             assert.fail("syntax was correct there should not have been an error")
         })
     })
diff --git a/index.ts b/index.ts
index 3cbed29..82bcd66 100644
--- a/index.ts
+++ b/index.ts
@@ -1,4 +1,6 @@
-import {PythonShell, Options} from 'python-shell' 
+import { PythonShell, Options } from 'python-shell'
+import { EOL } from 'os'
+import { Readable } from 'stream'
 
 export interface FrameSummary {
 	_line: string
@@ -8,7 +10,7 @@ export interface FrameSummary {
 	name: string
 }
 
-export interface UserError{
+export interface UserError {
 	__cause__: UserError
 	__context__: UserError
 	_str: string
@@ -29,85 +31,85 @@ export interface UserError{
 	text?: string
 }
 
-export interface ExecArgs{
-	evalCode:string,
-	savedCode:string,
-	filePath:string,
-	usePreviousVariables?:boolean,
-	showGlobalVars?:boolean,
-	default_filter_vars:string[],
-	default_filter_types:string[]
+export interface ExecArgs {
+	evalCode: string,
+	savedCode: string,
+	filePath: string,
+	usePreviousVariables?: boolean,
+	show_global_vars?: boolean,
+	default_filter_vars: string[],
+	default_filter_types: string[]
 }
 
-export interface PythonResult{
+export interface PythonResult {
 	userError: UserError,
 	userErrorMsg?: string,
-	userVariables:object,
-	execTime:number,
-	totalPyTime:number,
-	totalTime:number,
-	internalError:string,
+	userVariables: object,
+	execTime: number,
+	totalPyTime: number,
+	totalTime: number,
+	internalError: string,
 	caller: string,
-	lineno:number,
+	lineno: number,
 	done: boolean
 }
 
-export class PythonEvaluator{
-    
-    private static readonly identifier = "6q3co7"
+export class PythonEvaluator {
 	private static readonly areplPythonBackendFolderPath = __dirname + '/python/'
 
     /**
      * whether python is busy executing inputted code
      */
-    executing = false
+	executing = false
 
     /**
      * whether python backend process is running / not running
      */
-    running = false
+	running = false
 
-    restarting = false
-    private startTime:number
+	restarting = false
+	private startTime: number
 
     /**
      * an instance of python-shell. See https://github.com/extrabacon/python-shell
      */
-    pyshell:PythonShell
+	pyshell: PythonShell
 
 	/**
 	 * starts python_evaluator.py 
 	 * @param options Process / Python options. If not specified sensible defaults are inferred. 
 	 */
-	constructor(private options: Options = {}){
+	constructor(private options: Options = {}) {
 
-		if(process.platform == "darwin"){
+		if (process.platform == "darwin") {
 			//needed for Mac to prevent ENOENT
 			process.env.PATH = ["/usr/local/bin", process.env.PATH].join(":")
 		}
 
-		// we want unbuffered mode by default because it can be frustrating to the user
-		// if they run the program but don't see any print output immediately.
-		if(!options.pythonOptions) this.options.pythonOptions = ['-u']
-		if(!options.pythonPath) this.options.pythonPath = PythonShell.defaultPythonPath
-		if(!options.scriptPath) this.options.scriptPath = PythonEvaluator.areplPythonBackendFolderPath
+		// python-shell buffers untill newline is reached in text mode
+		// so we use binary instead to skip python-shell buffering
+		// this lets user flush without newline
+		this.options.mode = 'binary'
+		this.options.stdio = ['pipe', 'pipe', 'pipe', 'pipe']
+		if (!options.pythonPath) this.options.pythonPath = PythonShell.defaultPythonPath
+		if (!options.scriptPath) this.options.scriptPath = PythonEvaluator.areplPythonBackendFolderPath
 	}
 
-	
+
 	/**
 	 * does not do anything if program is currently executing code 
 	 */
-	execCode(code:ExecArgs){
-		if(this.executing) return
+	execCode(code: ExecArgs) {
+		if (this.executing) return
 		this.executing = true
 		this.startTime = Date.now()
-		this.pyshell.send(JSON.stringify(code))
+		this.pyshell.send(JSON.stringify(code) + EOL)
 	}
 
 	/**
 	 * @param {string} message
 	 */
-	sendStdin(message:string){
+	sendStdin(message: string) {
 		this.pyshell.send(message)
 	}
 
@@ -115,14 +117,14 @@ export class PythonEvaluator{
 	 * kills python process and restarts.  Force-kills if necessary after 50ms. 
 	 * After process restarts the callback passed in is invoked
 	 */
-	restart(callback=()=>{}){
+	restart(callback = () => { }) {
 
 		this.restarting = false
 
 		// register callback for restart
 		// using childProcess callback instead of pyshell callback
 		// (pyshell callback only happens when process exits voluntarily)
-		this.pyshell.childProcess.on('exit',()=>{
+		this.pyshell.childProcess.on('exit', () => {
 			this.restarting = true
 			this.executing = false
 			this.start()
@@ -136,18 +138,18 @@ export class PythonEvaluator{
 	 * kills python process.  force-kills if necessary after 50ms.
 	 * you can check python_evaluator.running to see if process is dead yet
 	 */
-	stop(){
+	stop() {
 		// pyshell has 50 ms to die gracefully
 		this.pyshell.childProcess.kill()
 		this.running = !this.pyshell.childProcess.killed
-		if(this.running) console.info("pyshell refused to die")
+		if (this.running) console.info("pyshell refused to die")
 		else this.executing = false
 
-		setTimeout(()=>{
-			if(this.running && !this.restarting){
+		setTimeout(() => {
+			if (this.running && !this.restarting) {
 				// murder the process with extreme prejudice
 				this.pyshell.childProcess.kill('SIGKILL')
-				if(this.pyshell.childProcess.killed){
+				if (this.pyshell.childProcess.killed) {
 					console.error("the python process simply cannot be killed!")
 				}
 				else this.executing = false
@@ -158,14 +160,21 @@ export class PythonEvaluator{
 	/**
 	 * starts python_evaluator.py. Will NOT WORK with python 2
 	 */
-	start(){
+	start() {
 		console.log("Starting Python...")
 		this.pyshell = new PythonShell('arepl_python_evaluator.py', this.options)
-		this.pyshell.on('message', message => {
-			this.handleResult(message)
+
+		// @ts-ignore node is badly typed, stdio can have more than 3 pipes
+		const resultPipe: Readable = this.pyshell.childProcess.stdio[3]
+		resultPipe.on('data', this.handleResult.bind(this))
+
+		// not sure why exactly I have to wrap onPrint/onStderr w/ lambda
+		// but tests fail if I don't
+		this.pyshell.stdout.on('data', (message: Buffer) => {
+			this.onPrint(message.toString())
 		})
-		this.pyshell.on('stderr', (log)=>{
-			this.onStderr(log)
+		this.pyshell.stderr.on('data', (log: Buffer) => {
+			this.onStderr(log.toString())
 		})
 		this.running = true
 	}
@@ -174,70 +183,63 @@ export class PythonEvaluator{
 	 * Overwrite this with your own handler.
 	 * is called when program fails or completes
 	 */
-	onResult(foo: PythonResult){}
+	onResult(foo: PythonResult) { }
 
 	/**
 	 * Overwrite this with your own handler.
 	 * Is called when program prints
 	 * @param {string} foo
 	 */
-	onPrint(foo: string){}
+	onPrint(foo: string) { }
 
 	/**
 	 * Overwrite this with your own handler. 
 	 * Is called when program logs stderr
 	 * @param {string} foo
 	 */
-	onStderr(foo: string){}
+	onStderr(foo: string) { }
 
 	/**
 	 * handles pyshell results and calls onResult / onPrint
 	 * @param {string} results 
 	 */
-	handleResult(results:string) {
-		let pyResult:PythonResult = {
-			userError:null,
+	handleResult(results: string) {
+		let pyResult: PythonResult = {
+			userError: null,
 			userErrorMsg: "",
 			userVariables: {},
-            execTime:0,
-            totalTime:0,
-			totalPyTime:0,
-			internalError:"",
-			caller:"",
-			lineno:-1,
-			done:true
+			execTime: 0,
+			totalTime: 0,
+			totalPyTime: 0,
+			internalError: "",
+			caller: "",
+			lineno: -1,
+			done: true
 		}
 
-        //result should have identifier, otherwise it is just a printout from users code
-        if(results.startsWith(PythonEvaluator.identifier)){
-			try {
-				results = results.replace(PythonEvaluator.identifier,"")
-				pyResult = JSON.parse(results)
-				this.executing = !pyResult['done']
-				
-				pyResult.execTime = pyResult.execTime*1000 // convert into ms
-				pyResult.totalPyTime = pyResult.totalPyTime*1000
-				
-				//@ts-ignore pyResult.userVariables is sent to as string, we convert to object
-				pyResult.userVariables = JSON.parse(pyResult.userVariables)
-				//@ts-ignore pyResult.userError is sent to as string, we convert to object
-				pyResult.userError = pyResult.userError ? JSON.parse(pyResult.userError) : {}
-
-				if(pyResult.userErrorMsg){
-					pyResult.userErrorMsg = this.formatPythonException(pyResult.userErrorMsg)
-				}
-				pyResult.totalTime = Date.now()-this.startTime
-				this.onResult(pyResult)
+		try {
+			pyResult = JSON.parse(results)
+			this.executing = !pyResult['done']
 
-			} catch (err) {
-				if (err instanceof Error){
-					err.message = err.message+"\nresults: "+results
-				}
-				throw err
+			pyResult.execTime = pyResult.execTime * 1000 // convert into ms
+			pyResult.totalPyTime = pyResult.totalPyTime * 1000
+
+			//@ts-ignore pyResult.userVariables is sent to as string, we convert to object
+			pyResult.userVariables = JSON.parse(pyResult.userVariables)
+			//@ts-ignore pyResult.userError is sent to as string, we convert to object
+			pyResult.userError = pyResult.userError ? JSON.parse(pyResult.userError) : {}
+
+			if (pyResult.userErrorMsg) {
+				pyResult.userErrorMsg = this.formatPythonException(pyResult.userErrorMsg)
 			}
-		}
-        else{
-            this.onPrint(results)
+			pyResult.totalTime = Date.now() - this.startTime
+			this.onResult(pyResult)
+
+		} catch (err) {
+			if (err instanceof Error) {
+				err.message = err.message + "\nresults: " + results
+			}
+			throw err
 		}
 	}
 
@@ -246,7 +248,7 @@ export class PythonEvaluator{
 	 * @param {string} code
 	 * @returns {Promise} rejects w/ stderr if syntax failure
 	 */
-	async checkSyntax(code:string){
+	async checkSyntax(code: string) {
 		return PythonShell.checkSyntax(code);
 	}
 
@@ -255,7 +257,7 @@ export class PythonEvaluator{
 	 * @param {string} filePath
 	 * @returns {Promise} rejects w/ stderr if syntax failure
 	 */
-	async checkSyntaxFile(filePath:string){
+	async checkSyntaxFile(filePath: string) {
 		// note that this should really be done in python_evaluator.py
 		// but communication with that happens through just one channel (stdin/stdout)
 		// so for now i prefer to keep this seperate
@@ -268,7 +270,7 @@ export class PythonEvaluator{
 	 * @example err:
 	 * Traceback (most recent call last):\n  File "<string>", line 1, in <module>\nNameError: name \'x\' is not defined\n
 	 */
-	formatPythonException(err:string){
+	formatPythonException(err: string) {
 		//replace File "<string>" (pointless)
 		err = err.replace(/File \"<string>\", /g, "")
 		return err
@@ -279,9 +281,9 @@ export class PythonEvaluator{
 	 * Useful for real-time execution so execCode doesn't get called too often
 	 * thanks to https://stackoverflow.com/a/1909508/6629672
 	 */
-	debounce = (function(){
-		let timer:any = 0;
-		return function(callback, ms: number, ...args: any[]){
+	debounce = (function () {
+		let timer: any = 0;
+		return function (callback, ms: number, ...args: any[]) {
 			clearTimeout(timer);
 			timer = setTimeout(callback, ms, args);
 		};
diff --git a/package-lock.json b/package-lock.json
index 0f48edf..5e95bfa 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -2201,9 +2201,9 @@
       "dev": true
     },
     "@types/node": {
-      "version": "10.14.7",
-      "resolved": "https://registry.npmjs.org/@types/node/-/node-10.14.7.tgz",
-      "integrity": "sha512-on4MmIDgHXiuJDELPk1NFaKVUxxCFr37tm8E9yN6rAiF5Pzp/9bBfBHkoexqRiY+hk/Z04EJU9kKEb59YqJ82A==",
+      "version": "12.12.37",
+      "resolved": "https://registry.npmjs.org/@types/node/-/node-12.12.37.tgz",
+      "integrity": "sha512-4mXKoDptrXAwZErQHrLzpe0FN/0Wmf5JRniSVIdwUrtDf9wnmEV1teCNLBo/TwuXhkK/bVegoEn/wmb+x0AuPg==",
       "dev": true
     },
     "@types/normalize-package-data": {
@@ -11131,9 +11131,9 @@
       "dev": true
     },
     "python-shell": {
-      "version": "1.0.8",
-      "resolved": "https://registry.npmjs.org/python-shell/-/python-shell-1.0.8.tgz",
-      "integrity": "sha512-jMKagerg3alm6j+Prq5t/M3dTgEppy5vC6ns+LqAjfuHiT8olfK3PMokpqpeEcWEqvDnUcAOhp6SQzaLBtTzRw=="
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/python-shell/-/python-shell-2.0.0.tgz",
+      "integrity": "sha512-0FCKvCPs0Bl09AGfuzd5kHVdg/SYtkVPt5sILpuJWNJ7JQ+QKAdd93TYg/AfLR6/uldvZopSk3F2vUQ4IY50gg=="
     },
     "q": {
       "version": "1.5.1",
diff --git a/package.json b/package.json
index 1ff8b8d..a5a9c7d 100644
--- a/package.json
+++ b/package.json
@@ -7,7 +7,7 @@
     "compile": "tsc -watch -p ./",
     "compileOnce": "tsc",
     "declaration": "tsc --target es6 --declaration index.ts",
-    "test": "npm run compileOnce && mocha --ui tdd *.test.js --exit",
+    "test": "mocha -r ts-node/register --ui tdd *.test.ts --exit",
     "appveyorTest": "mocha --ui tdd --reporter mocha-appveyor-reporter *.test.js --exit",
     "document": "documentation readme index.js --section=API",
     "prePublish": "npm run compileOnce && npm run test && npm run document",
@@ -36,7 +36,7 @@
   "license": "MIT",
   "devDependencies": {
     "@types/mocha": "^5.2.5",
-    "@types/node": "^10.5.2",
+    "@types/node": "^12.11.7",
     "documentation": "^12.1.4",
     "mocha": "^5.2.0",
     "mocha-appveyor-reporter": "^0.4.0",
@@ -46,6 +46,6 @@
     "typescript": "^3.6.3"
   },
   "dependencies": {
-    "python-shell": "^1.0.8"
+    "python-shell": "^2.0.0"
   }
 }
diff --git a/python/arepl_module_logic.py b/python/arepl_module_logic.py
index ad93ad3..e6e3d34 100644
--- a/python/arepl_module_logic.py
+++ b/python/arepl_module_logic.py
@@ -3,7 +3,8 @@
 from sys import modules
 from pkgutil import iter_modules
 from arepl_stdlib_list import stdlib_list
-from typing import List,Set
+from typing import List, Set
+
 
 def get_non_user_modules() -> Set[str]:
     """returns a set of all modules not written by the user (aka all builtin and pip modules)
@@ -20,6 +21,4 @@ def get_non_user_modules() -> Set[str]:
     even_more_builtin_modules = [k for k in modules]
     # how many damn modules are there???
 
-    return set(
-        pip_modules + list(builtin_module_names) + more_builtin_modules + even_more_builtin_modules
-    )
+    return set(pip_modules + list(builtin_module_names) + more_builtin_modules + even_more_builtin_modules)
diff --git a/python/arepl_python_evaluator.py b/python/arepl_python_evaluator.py
index 37fd9e4..77fb36a 100644
--- a/python/arepl_python_evaluator.py
+++ b/python/arepl_python_evaluator.py
@@ -6,7 +6,9 @@
 import traceback
 from time import time
 import asyncio
+from io import TextIOWrapper
 import os
+import sys
 from sys import path, modules, argv, version_info, exc_info
 from typing import Any, Dict, FrozenSet, Set
 from contextlib import contextmanager
@@ -19,6 +21,7 @@
 import arepl_saved as saved
 from arepl_settings import get_settings, update_settings
 from arepl_user_error import UserError
+import arepl_result_stream
 
 if util.find_spec("howdoi") is not None:
     from howdoi import howdoi  # pylint: disable=import-error
@@ -43,10 +46,10 @@ def __init__(
         execTime: float,
         totalPyTime: float,
         internalError: str = None,
-        caller = "<module>",
-        lineno = -1,
-        done = True,
-        count = -1,
+        caller="<module>",
+        lineno=-1,
+        done=True,
+        count=-1,
         *args,
         **kwargs
     ):
@@ -65,18 +68,19 @@ def __init__(
         self.count = count
 
 
-if version_info[0] < 3 or (version_info[0] == 3 and version_info[1] < 5):
-    # need at least 3.5 for typing
-    exMsg = "Must be using python 3.5 or later. You are using " + str(version_info)
-    print(ReturnInfo("", "{}", None, None, exMsg))
-    raise Exception(exMsg)
-
-
 class ExecArgs(object):
 
     # HALT! do NOT change this without changing corresponding type in the frontend! <----
     # Also note that this uses camelCase because that is standard in JS frontend
-    def __init__(self, evalCode: str, savedCode="", filePath="", usePreviousVariables=False, *args, **kwargs):
+    def __init__(
+        self,
+        evalCode: str,
+        savedCode="",
+        filePath="",
+        usePreviousVariables=False,
+        *args,
+        **kwargs
+    ):
         self.savedCode = savedCode
         self.evalCode = evalCode
         self.filePath = filePath
@@ -84,16 +88,6 @@ def __init__(self, evalCode: str, savedCode="", filePath="", usePreviousVariable
         # HALT! do NOT change this without changing corresponding type in the frontend! <----
 
 
-nonUserModules = get_non_user_modules()
-origModules = frozenset(modules)
-
-saved.starting_locals["help"] = arepl_overloads.help_overload
-saved.starting_locals["input"] = arepl_overloads.input_overload
-saved.starting_locals["howdoi"] = arepl_overloads.howdoi_wrapper
-
-eval_locals = deepcopy(saved.starting_locals)
-
-
 @contextmanager
 def script_path(script_dir: str):
     """
@@ -121,7 +115,7 @@ def script_path(script_dir: str):
         try:
             yield
         finally:
-            if(path[-1] == arepl_dir):
+            if path[-1] == arepl_dir:
                 path.pop()
             path[0] = arepl_dir
             try:
@@ -130,6 +124,15 @@ def script_path(script_dir: str):
                 pass
 
 
+nonUserModules = get_non_user_modules()
+origModules = frozenset(modules)
+
+saved.starting_locals["help"] = arepl_overloads.help_overload
+saved.starting_locals["input"] = arepl_overloads.input_overload
+saved.starting_locals["howdoi"] = arepl_overloads.howdoi_wrapper
+
+eval_locals = deepcopy(saved.starting_locals)
+
 noGlobalVarsMsg = {"zz status": "AREPL is configured to not show global vars"}
 
 
@@ -140,16 +143,19 @@ def exec_input(exec_args: ExecArgs):
     """
     global eval_locals
 
-    argv[0] = exec_args.filePath  # see https://docs.python.org/3/library/sys.html#sys.argv
+    argv[0] = exec_args.filePath
+    # see https://docs.python.org/3/library/sys.html#sys.argv
     saved.starting_locals["__file__"] = exec_args.filePath
-    if(exec_args.filePath):
+    if exec_args.filePath:
         saved.starting_locals["__loader__"].path = os.path.basename(exec_args.filePath)
 
     if not exec_args.usePreviousVariables:
         eval_locals = saved.get_eval_locals(exec_args.savedCode)
 
     # re-import imports. (pickling imports from saved code was unfortunately not possible)
-    exec_args.evalCode = saved.copy_saved_imports_to_exec(exec_args.evalCode, exec_args.savedCode)
+    exec_args.evalCode = saved.copy_saved_imports_to_exec(
+        exec_args.evalCode, exec_args.savedCode
+    )
 
     # repoen revent loop in case user closed it in last run
     asyncio.set_event_loop(asyncio.new_event_loop())
@@ -162,13 +168,18 @@ def exec_input(exec_args: ExecArgs):
         except BaseException:
             execTime = time() - start
             _, exc_obj, exc_tb = exc_info()
-            if not get_settings().showGlobalVars:
+            if not get_settings().show_global_vars:
                 raise UserError(exc_obj, exc_tb, noGlobalVarsMsg, execTime)
             else:
                 raise UserError(exc_obj, exc_tb, eval_locals, execTime)
 
         finally:
 
+            if sys.stdout.flush and callable(sys.stdout.flush):
+                # a normal program will flush at the end of the run
+                # arepl never stops so we have to do it manually
+                sys.stdout.flush()
+
             saved.arepl_store = eval_locals.get("arepl_store")
 
             try:
@@ -200,13 +211,17 @@ def exec_input(exec_args: ExecArgs):
             # clear mock stdin for next run
             arepl_overloads.arepl_input_iterator = None
 
-    if get_settings().showGlobalVars:
+    if get_settings().show_global_vars:
         userVariables = pickle_user_vars(
-            eval_locals, get_settings().default_filter_vars, get_settings().default_filter_types
+            eval_locals,
+            get_settings().default_filter_vars,
+            get_settings().default_filter_types,
         )
     else:
         userVariables = pickle_user_vars(
-            noGlobalVarsMsg, get_settings().default_filter_vars, get_settings().default_filter_types
+            noGlobalVarsMsg,
+            get_settings().default_filter_vars,
+            get_settings().default_filter_types,
         )
 
     return ReturnInfo("", userVariables, execTime, None)
@@ -214,10 +229,12 @@ def exec_input(exec_args: ExecArgs):
 
 def print_output(output: object):
     """
-    turns output into JSON and prints it
+    turns output into JSON and sends it to result stream
     """
-    # 6q3co7 signifies to frontend that stdout is not due to a print in user's code
-    print("6q3co7" + json.dumps(output, default=lambda x: x.__dict__))
+    # We use result stream because user might use stdout and we don't want to conflict
+    print(
+        json.dumps(output, default=lambda x: x.__dict__), file=arepl_result_stream.get_result_stream(), flush=True
+    )
 
 
 def main(json_input: str):
@@ -238,7 +255,9 @@ def main(json_input: str):
         return_info.userVariables = e.varsSoFar
         return_info.execTime = e.execTime
     except Exception as e:
-        return_info.internalError = "Sorry, AREPL has ran into an error\n\n" + traceback.format_exc()
+        return_info.internalError = (
+            "Sorry, AREPL has ran into an error\n\n" + traceback.format_exc()
+        )
 
     return_info.totalPyTime = time() - start
 
@@ -247,5 +266,12 @@ def main(json_input: str):
 
 
 if __name__ == "__main__":
+    # arepl is ran via node so python thinks stdout is not a tty device and uses full buffering
+    # We want users to see output in real time so we change to line buffering
+    # todo: once python3.7 is supported use .reconfigure() instead
+    sys.stdout = TextIOWrapper(open(sys.stdout.fileno(), "wb"), line_buffering=True)
+    # Arepl node code will spawn process with a extra pipe for results
+    # This is to avoid results conflicting with user writes to stdout
+    arepl_result_stream.open_result_stream()
     while True:
         main(input())
diff --git a/python/arepl_result_stream.py b/python/arepl_result_stream.py
new file mode 100644
index 0000000..3c78ded
--- /dev/null
+++ b/python/arepl_result_stream.py
@@ -0,0 +1,15 @@
+"""
+File for storing result stream so it can be accessed by dump.
+Once you close a stream you can't reopen, hence why this file just has a open method
+"""
+
+result_stream = None
+
+
+def get_result_stream():
+    return result_stream
+
+
+def open_result_stream():
+    global result_stream
+    result_stream = open(3, "w")
diff --git a/python/arepl_settings.py b/python/arepl_settings.py
index f0203e2..aa896e1 100644
--- a/python/arepl_settings.py
+++ b/python/arepl_settings.py
@@ -1,11 +1,18 @@
 from typing import List
 
+
 class Settings(object):
 
     # HALT! do NOT change this without changing corresponding type in the frontend! <----
-    # Also note that this uses camelCase because that is standard in JS frontend
-    def __init__(self, showGlobalVars=True, default_filter_vars: List[str] = [], default_filter_types: List[str] = [], *args, **kwargs):
-        self.showGlobalVars = showGlobalVars
+    def __init__(
+        self,
+        show_global_vars=True,
+        default_filter_vars: List[str] = [],
+        default_filter_types: List[str] = [],
+        *args,
+        **kwargs
+    ):
+        self.show_global_vars = show_global_vars
         self.default_filter_vars = default_filter_vars
         self.default_filter_types = default_filter_types
         # HALT! do NOT change this without changing corresponding type in the frontend! <----
diff --git a/python/test_python_evaluator.py b/python/test_python_evaluator.py
index 3307bb8..ab7bd23 100644
--- a/python/test_python_evaluator.py
+++ b/python/test_python_evaluator.py
@@ -12,7 +12,7 @@
 python_ignore_path = path.join(path.dirname(path.abspath(__file__)), "testDataFiles")
 # The frontend will pass in below settings as default
 default_settings = {
-    "showGlobalVars": True,
+    "show_global_vars": True,
     "default_filter_vars": [],
     "default_filter_types": ["<class 'module'>", "<class 'function'>"],
 }
@@ -55,8 +55,10 @@ def foo():
 
 def test_dict_unpack_error():
     with pytest.raises(python_evaluator.UserError):
-        python_evaluator.exec_input(python_evaluator.ExecArgs("[(k,v) for (k,v) in {'a': 1}]"))
-        
+        python_evaluator.exec_input(
+            python_evaluator.ExecArgs("[(k,v) for (k,v) in {'a': 1}]")
+        )
+
 
 def test_main_returns_var():
     mock_stdin = """{
@@ -64,7 +66,7 @@ def test_main_returns_var():
         "evalCode": "x=1",
         "filePath": "",
         "usePreviousVariables": false,
-        "showGlobalVars": true
+        "show_global_vars": true
     }"""
     return_info = python_evaluator.main(mock_stdin)
     assert jsonpickle.decode(return_info.userVariables)["x"] == 1
@@ -76,7 +78,7 @@ def test_main_returns_var_even_when_error():
         "evalCode": "y=1;x",
         "filePath": "",
         "usePreviousVariables": false,
-        "showGlobalVars": true
+        "show_global_vars": true
     }"""
     return_info = python_evaluator.main(mock_stdin)
     assert jsonpickle.decode(return_info.userVariables)["y"] == 1
@@ -96,9 +98,12 @@ def test_infinite_generator():
 
 
 def test_dont_show_global_vars():
-    update_settings({"showGlobalVars": False})
+    update_settings({"show_global_vars": False})
     return_info = python_evaluator.exec_input(python_evaluator.ExecArgs("x = 1"))
-    assert jsonpickle.decode(return_info.userVariables)["zz status"] == "AREPL is configured to not show global vars"
+    assert (
+        jsonpickle.decode(return_info.userVariables)["zz status"]
+        == "AREPL is configured to not show global vars"
+    )
 
 
 def test_argv0_should_be_file_path():
@@ -106,7 +111,9 @@ def test_argv0_should_be_file_path():
     return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(code))
     assert jsonpickle.decode(return_info.userVariables)["args"][0] == ""
 
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(code, "", filePath="test path"))
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs(code, "", filePath="test path")
+    )
     assert jsonpickle.decode(return_info.userVariables)["args"][0] == "test path"
 
 
@@ -114,7 +121,9 @@ def test_syspath0_should_be_file_path():
     code = "from sys import path;first_path=path[0]"
     temp_dir = tempfile.gettempdir()
     fake_temp_file = path.join(temp_dir, "foo.py")
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(code, "", filePath=fake_temp_file))
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs(code, "", filePath=fake_temp_file)
+    )
     assert jsonpickle.decode(return_info.userVariables)["first_path"] == temp_dir
 
 
@@ -123,23 +132,38 @@ def test_starting_dunders_should_be_correct():
     return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(code))
     assert jsonpickle.decode(return_info.userVariables)["file_dunder"] == ""
 
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(code, "", filePath="test path"))
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs(code, "", filePath="test path")
+    )
     assert jsonpickle.decode(return_info.userVariables)["file_dunder"] == "test path"
 
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs("name_dunder=__name__"))
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs("name_dunder=__name__")
+    )
     assert jsonpickle.decode(return_info.userVariables)["name_dunder"] == "__main__"
 
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs("loader_dunder=__loader__", filePath="test path"))
-    assert jsonpickle.decode(return_info.userVariables)["loader_dunder"].name == "__main__"
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs("loader_dunder=__loader__", filePath="test path")
+    )
+    assert (
+        jsonpickle.decode(return_info.userVariables)["loader_dunder"].name == "__main__"
+    )
 
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs("loader_dunder=__loader__", filePath="test path"))
-    assert jsonpickle.decode(return_info.userVariables)["loader_dunder"].path == "test path"
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs("loader_dunder=__loader__", filePath="test path")
+    )
+    assert (
+        jsonpickle.decode(return_info.userVariables)["loader_dunder"].path
+        == "test path"
+    )
 
 
 def test_relative_import():
     file_path = path.join(python_ignore_path, "foo2.py")
     with open(file_path) as f:
-        return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(f.read(), "", file_path))
+        return_info = python_evaluator.exec_input(
+            python_evaluator.ExecArgs(f.read(), "", file_path)
+        )
     assert jsonpickle.decode(return_info.userVariables)["x"] == 2
 
 
@@ -155,16 +179,23 @@ def test_dump_when_exception():
     # and it causes dump to not work properly second time around (see https://github.com/Almenon/AREPL-vscode/issues/91)
     try:
         python_evaluator.exec_input(
-            python_evaluator.ExecArgs("from arepl_dump import dump;dumpOut = dump('dump worked');x=1;raise Exception()")
+            python_evaluator.ExecArgs(
+                "from arepl_dump import dump;dumpOut = dump('dump worked');x=1;raise Exception()"
+            )
         )
     except Exception as e:
         assert "dumpOut" in jsonpickle.decode(e.varsSoFar)
     try:
         python_evaluator.exec_input(
-            python_evaluator.ExecArgs("from arepl_dump import dump;dumpOut = dump('dump worked');raise Exception()")
+            python_evaluator.ExecArgs(
+                "from arepl_dump import dump;dumpOut = dump('dump worked');raise Exception()"
+            )
         )
     except Exception as e:
-        assert "dumpOut" in jsonpickle.decode(e.varsSoFar) and jsonpickle.decode(e.varsSoFar)["dumpOut"] is not None
+        assert (
+            "dumpOut" in jsonpickle.decode(e.varsSoFar)
+            and jsonpickle.decode(e.varsSoFar)["dumpOut"] is not None
+        )
 
 
 def test_import_does_not_show():
@@ -185,7 +216,9 @@ def test_save():
 
 
 def test_save_import():  # imports in saved section should be able to be referenced in exec section
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs("z=math.sin(0)", "import math#$save"))
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs("z=math.sin(0)", "import math#$save")
+    )
     assert jsonpickle.decode(return_info.userVariables)["z"] == 0
 
 
@@ -279,7 +312,9 @@ def hello_world():
     """
 
     python_evaluator.exec_input(python_evaluator.ExecArgs(event_loop_code))
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(event_loop_code))
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs(event_loop_code)
+    )
     vars = jsonpickle.decode(return_info.userVariables)
     assert "x" in vars
 
@@ -313,8 +348,12 @@ def test_user_import_deleted():
 
     try:
         with open(file_path2) as f:
-            return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(f.read(), "", file_path2))
-        assert jsonpickle.decode(return_info.userVariables)["x"] == 2  # just checking this for later on
+            return_info = python_evaluator.exec_input(
+                python_evaluator.ExecArgs(f.read(), "", file_path2)
+            )
+        assert (
+            jsonpickle.decode(return_info.userVariables)["x"] == 2
+        )  # just checking this for later on
         assert "foo" not in modules  # user import should be deleted!
 
         # now that import is uncached i should be able to change code, rerun & get different result
@@ -322,7 +361,9 @@ def test_user_import_deleted():
             f.write("def foo():\n    return 3")
 
         with open(file_path2) as f:
-            return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(f.read(), "", file_path2))
+            return_info = python_evaluator.exec_input(
+                python_evaluator.ExecArgs(f.read(), "", file_path2)
+            )
         assert jsonpickle.decode(return_info.userVariables)["x"] == 3
 
     finally:
@@ -346,8 +387,12 @@ def test_user_var_import_deleted():
 
     try:
         with open(importVarFile_path) as f:
-            return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(f.read(), "", importVarFile_path))
-        assert jsonpickle.decode(return_info.userVariables)["myVar"] == 5  # just checking this for later on
+            return_info = python_evaluator.exec_input(
+                python_evaluator.ExecArgs(f.read(), "", importVarFile_path)
+            )
+        assert (
+            jsonpickle.decode(return_info.userVariables)["myVar"] == 5
+        )  # just checking this for later on
         assert "varToImport" not in modules  # user import should be deleted!
 
         # now that import is uncached i should be able to change code, rerun & get different result
@@ -355,7 +400,9 @@ def test_user_var_import_deleted():
             f.write("varToImport = 3")
 
         with open(importVarFile_path) as f:
-            return_info = python_evaluator.exec_input(python_evaluator.ExecArgs(f.read(), "", importVarFile_path))
+            return_info = python_evaluator.exec_input(
+                python_evaluator.ExecArgs(f.read(), "", importVarFile_path)
+            )
         assert jsonpickle.decode(return_info.userVariables)["myVar"] == 3
 
     finally:
@@ -366,12 +413,16 @@ def test_user_var_import_deleted():
 
 def test_arepl_store():
     python_evaluator.exec_input(python_evaluator.ExecArgs("arepl_store=5"))
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs("x=arepl_store"))
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs("x=arepl_store")
+    )
     assert jsonpickle.decode(return_info.userVariables)["x"] == 5
 
 
 def test_howdoiArepl():
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs("x=howdoi('use arepl')"))
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs("x=howdoi('use arepl')")
+    )
     assert (
         jsonpickle.decode(return_info.userVariables)["x"]
         == "using AREPL is simple - just start coding and arepl will show you the final state of your variables. For more help see https://github.com/Almenon/AREPL-vscode/wiki"
@@ -380,40 +431,61 @@ def test_howdoiArepl():
 
 def test_script_path_should_work_regardless_of_user_errors():
     try:
-        python_evaluator.exec_input(python_evaluator.ExecArgs("from sys import path;x", filePath=python_ignore_path))
+        python_evaluator.exec_input(
+            python_evaluator.ExecArgs(
+                "from sys import path;x", filePath=python_ignore_path
+            )
+        )
     except python_evaluator.UserError as e:
         return_info = e.varsSoFar
     try:
-        python_evaluator.exec_input(python_evaluator.ExecArgs("from sys import path;x", filePath=python_ignore_path))
+        python_evaluator.exec_input(
+            python_evaluator.ExecArgs(
+                "from sys import path;x", filePath=python_ignore_path
+            )
+        )
     except python_evaluator.UserError as e:
         secondreturn_info = e.varsSoFar
 
     # script_path should restore the sys path back to original state after execution
     # so each run should have same path
-    assert jsonpickle.decode(return_info)["path"] == jsonpickle.decode(secondreturn_info)["path"]
+    assert (
+        jsonpickle.decode(return_info)["path"]
+        == jsonpickle.decode(secondreturn_info)["path"]
+    )
 
 
 def test_mock_stdin():
     return_info = python_evaluator.exec_input(
-        python_evaluator.ExecArgs("standard_input = 'hello\\nworld';x=input();y=input()")
+        python_evaluator.ExecArgs(
+            "standard_input = 'hello\\nworld';x=input();y=input()"
+        )
     )
     assert jsonpickle.decode(return_info.userVariables)["x"] == "hello"
     assert jsonpickle.decode(return_info.userVariables)["y"] == "world"
 
     return_info = python_evaluator.exec_input(
-        python_evaluator.ExecArgs("standard_input = ['hello', 'world'];x=input();y=input()")
+        python_evaluator.ExecArgs(
+            "standard_input = ['hello', 'world'];x=input();y=input()"
+        )
     )
     assert jsonpickle.decode(return_info.userVariables)["x"] == "hello"
     assert jsonpickle.decode(return_info.userVariables)["y"] == "world"
 
     with pytest.raises(python_evaluator.UserError):
-        python_evaluator.exec_input(python_evaluator.ExecArgs("standard_input = ['hello'];x=input();y=input()"))
+        python_evaluator.exec_input(
+            python_evaluator.ExecArgs("standard_input = ['hello'];x=input();y=input()")
+        )
 
 
 def integration_test_howdoi():
     # this requires internet access so it is not official test
-    return_info = python_evaluator.exec_input(python_evaluator.ExecArgs("x=howdoi('eat a apple')"))
-    print(jsonpickle.decode(return_info.userVariables)["x"])  # this should print out howdoi results
+    return_info = python_evaluator.exec_input(
+        python_evaluator.ExecArgs("x=howdoi('eat a apple')")
+    )
+    print(
+        jsonpickle.decode(return_info.userVariables)["x"]
+    )  # this should print out howdoi results
 
 
 ###########################