Skip to content

Commit

Permalink
fixing leftover prints
Browse files Browse the repository at this point in the history
  • Loading branch information
eleanorjboyd committed Oct 30, 2024
1 parent a23f8fa commit 36cac6f
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 87 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,8 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter {
this.resultResolver?.resolveDiscovery(data);
});

try {
await this.runPytestDiscovery(uri, name, executionFactory, interpreter);
} finally {
traceVerbose('donee');
}
await this.runPytestDiscovery(uri, name, executionFactory, interpreter);

// this is only a placeholder to handle function overloading until rewrite is finished
const discoveryPayload: DiscoveredTestPayload = { cwd: uri.fsPath, status: 'success' };
return discoveryPayload;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -232,9 +232,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter {
runInstance,
);
}
// this doesn't work, it instead directs us to the noop one which is defined first
// potentially this is due to the server already being close, if this is the case?
console.log('right before serverDispose');
}

// deferredTillEOT is resolved when all data sent on stdout and stderr is received, close event is only called when this occurs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,8 @@ export class UnittestTestDiscoveryAdapter implements ITestDiscoveryAdapter {
outChannel: this.outputChannel,
};

try {
await this.runDiscovery(uri, options, name, cwd, executionFactory);
} finally {
// none
}
await this.runDiscovery(uri, options, name, cwd, executionFactory);

// placeholder until after the rewrite is adopted
// TODO: remove after adoption.
const discoveryPayload: DiscoveredTestPayload = { cwd, status: 'success' };
Expand Down
148 changes: 74 additions & 74 deletions src/test/testing/common/testingAdapter.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1061,83 +1061,83 @@ suite('End to End Tests: test adapters', () => {
assert.strictEqual(failureOccurred, false, failureMsg);
});
});
// test('unittest execution adapter seg fault error handling', async () => {
// resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri);
// let callCount = 0;
// let failureOccurred = false;
// let failureMsg = '';
// resultResolver._resolveExecution = async (data, _token?) => {
// // do the following asserts for each time resolveExecution is called, should be called once per test.
// callCount = callCount + 1;
// traceLog(`unittest execution adapter seg fault error handling \n ${JSON.stringify(data)}`);
// try {
// if (data.status === 'error') {
// if (data.error === undefined) {
// // Dereference a NULL pointer
// const indexOfTest = JSON.stringify(data).search('Dereference a NULL pointer');
// if (indexOfTest === -1) {
// failureOccurred = true;
// failureMsg = 'Expected test to have a null pointer';
// }
// } else if (data.error.length === 0) {
// failureOccurred = true;
// failureMsg = "Expected errors in 'error' field";
// }
// } else {
// const indexOfTest = JSON.stringify(data.result).search('error');
// if (indexOfTest === -1) {
// failureOccurred = true;
// failureMsg =
// 'If payload status is not error then the individual tests should be marked as errors. This should occur on windows machines.';
// }
// }
// if (data.result === undefined) {
// failureOccurred = true;
// failureMsg = 'Expected results to be present';
// }
// // make sure the testID is found in the results
// const indexOfTest = JSON.stringify(data).search('test_seg_fault.TestSegmentationFault.test_segfault');
// if (indexOfTest === -1) {
// failureOccurred = true;
// failureMsg = 'Expected testId to be present';
// }
// } catch (err) {
// failureMsg = err ? (err as Error).toString() : '';
// failureOccurred = true;
// }
// return Promise.resolve();
// };
test('unittest execution adapter seg fault error handling', async () => {
resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri);
let callCount = 0;
let failureOccurred = false;
let failureMsg = '';
resultResolver._resolveExecution = async (data, _token?) => {
// do the following asserts for each time resolveExecution is called, should be called once per test.
callCount = callCount + 1;
traceLog(`unittest execution adapter seg fault error handling \n ${JSON.stringify(data)}`);
try {
if (data.status === 'error') {
if (data.error === undefined) {
// Dereference a NULL pointer
const indexOfTest = JSON.stringify(data).search('Dereference a NULL pointer');
if (indexOfTest === -1) {
failureOccurred = true;
failureMsg = 'Expected test to have a null pointer';
}
} else if (data.error.length === 0) {
failureOccurred = true;
failureMsg = "Expected errors in 'error' field";
}
} else {
const indexOfTest = JSON.stringify(data.result).search('error');
if (indexOfTest === -1) {
failureOccurred = true;
failureMsg =
'If payload status is not error then the individual tests should be marked as errors. This should occur on windows machines.';
}
}
if (data.result === undefined) {
failureOccurred = true;
failureMsg = 'Expected results to be present';
}
// make sure the testID is found in the results
const indexOfTest = JSON.stringify(data).search('test_seg_fault.TestSegmentationFault.test_segfault');
if (indexOfTest === -1) {
failureOccurred = true;
failureMsg = 'Expected testId to be present';
}
} catch (err) {
failureMsg = err ? (err as Error).toString() : '';
failureOccurred = true;
}
return Promise.resolve();
};

// const testId = `test_seg_fault.TestSegmentationFault.test_segfault`;
// const testIds: string[] = [testId];
const testId = `test_seg_fault.TestSegmentationFault.test_segfault`;
const testIds: string[] = [testId];

// // set workspace to test workspace folder
// workspaceUri = Uri.parse(rootPathErrorWorkspace);
// configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py'];
// set workspace to test workspace folder
workspaceUri = Uri.parse(rootPathErrorWorkspace);
configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py'];

// // run pytest execution
// const executionAdapter = new UnittestTestExecutionAdapter(
// configService,
// testOutputChannel.object,
// resultResolver,
// envVarsService,
// );
// const testRun = typeMoq.Mock.ofType<TestRun>();
// testRun
// .setup((t) => t.token)
// .returns(
// () =>
// ({
// onCancellationRequested: () => undefined,
// } as any),
// );
// await executionAdapter
// .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory)
// .finally(() => {
// assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once');
// assert.strictEqual(failureOccurred, false, failureMsg);
// });
// });
// run pytest execution
const executionAdapter = new UnittestTestExecutionAdapter(
configService,
testOutputChannel.object,
resultResolver,
envVarsService,
);
const testRun = typeMoq.Mock.ofType<TestRun>();
testRun
.setup((t) => t.token)
.returns(
() =>
({
onCancellationRequested: () => undefined,
} as any),
);
await executionAdapter
.runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory)
.finally(() => {
assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once');
assert.strictEqual(failureOccurred, false, failureMsg);
});
});
test('pytest execution adapter seg fault error handling', async () => {
resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri);
let callCount = 0;
Expand Down

0 comments on commit 36cac6f

Please sign in to comment.