diff --git a/smart_tests/commands/verify.py b/smart_tests/commands/verify.py index cb97cb55e..dde42b4c5 100644 --- a/smart_tests/commands/verify.py +++ b/smart_tests/commands/verify.py @@ -7,13 +7,13 @@ import click import smart_tests.args4p.typer as typer -from smart_tests.utils.commands import Command -from smart_tests.utils.env_keys import TOKEN_KEY from smart_tests.utils.tracking import Tracking, TrackingClient from .. import args4p from ..app import Application -from ..utils.authentication import get_org_workspace +from ..utils.authentication import ensure_org_workspace, get_org_workspace +from ..utils.commands import Command +from ..utils.env_keys import TOKEN_KEY from ..utils.java import get_java_command from ..utils.smart_tests_client import SmartTestsClient from ..utils.typer_types import emoji @@ -82,18 +82,8 @@ def verify(app_instance: Application): click.echo("Java command: " + repr(java)) click.echo("smart-tests version: " + repr(version)) - if org is None or workspace is None: - msg = ( - "Could not identify Smart Tests organization/workspace. " - "Please confirm if you set SMART_TESTS_TOKEN or SMART_TESTS_ORGANIZATION and SMART_TESTS_WORKSPACE " - "environment variables" - ) - tracking_client.send_error_event( - event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, - stack_trace=msg - ) - click.secho(msg, fg='red', err=True) - raise typer.Exit(1) + # raise an error here after we print out the basic diagnostics if LAUNCHABLE_TOKEN is not set. + ensure_org_workspace() try: res = client.request("get", "verification") diff --git a/smart_tests/test_runners/file.py b/smart_tests/test_runners/file.py index 530028f2a..8e99a204f 100644 --- a/smart_tests/test_runners/file.py +++ b/smart_tests/test_runners/file.py @@ -2,19 +2,9 @@ # The most bare-bone versions of the test runner support # -from ..commands.subset import Subset from . import smart_tests - -@smart_tests.subset -def subset(client: Subset): - # read lines as test file names - for t in client.stdin(): - client.test_path(t.rstrip("\n")) - - client.run() - - +subset = smart_tests.CommonSubsetImpls(__name__).scan_stdin() record_tests = smart_tests.CommonRecordTestImpls(__name__).file_profile_report_files() smart_tests.CommonDetectFlakesImpls(__name__).detect_flakes() # split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() diff --git a/smart_tests/test_runners/jasmine.py b/smart_tests/test_runners/jasmine.py index 390f993cd..7de003bba 100644 --- a/smart_tests/test_runners/jasmine.py +++ b/smart_tests/test_runners/jasmine.py @@ -27,13 +27,7 @@ def record_tests( client.run() -@smart_tests.subset -def subset(client): - # read lines as test file names - for t in client.stdin(): - client.test_path(t.rstrip("\n")) - - client.run() +subset = smart_tests.CommonSubsetImpls(__name__).scan_stdin() class JSONReportParser: diff --git a/smart_tests/test_runners/karma.py b/smart_tests/test_runners/karma.py new file mode 100644 index 000000000..9c9a35e7f --- /dev/null +++ b/smart_tests/test_runners/karma.py @@ -0,0 +1,167 @@ +# This runner only supports recording tests +# For subsetting, use 'ng' test runner instead +# It's possible to use 'karma' runner for recording, and 'ng' runner for subsetting, for the same test session +import json +from typing import Annotated, Dict, Generator, List + +import click + +from ..args4p import typer +from ..commands.record.case_event import CaseEvent +from ..testpath import TestPath +from . import smart_tests + + +@smart_tests.subset +def subset(client, _with: Annotated[str | None, typer.Option( + '--with', help="Specify 'ng' to use the Angular test runner for subsetting")] = None, ): + # TODO: implement the --with ng option + + # read lines as test file names + for t in client.stdin(): + client.test_path(t.rstrip("\n")) + + client.run() + + +@smart_tests.record.tests +def record_tests(client, + reports: Annotated[List[str], typer.Argument(multiple=True, help="Test report files to process")], + ): + client.parse_func = JSONReportParser(client).parse_func + + for r in reports: + client.report(r) + + client.run() + + +class JSONReportParser: + """ + Sample Karma report format: + { + "browsers": {...}, + "result": { + "24461741": [ + { + "fullName": "path/to/spec.ts should do something", + "description": "should do something", + "id": "spec0", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "path/to/spec.ts" + ], + "time": 92, + "executedExpectationsCount": 1, + "passedExpectations": [...], + "properties": null + } + ] + }, + "summary": {...} + } + """ + + def __init__(self, client): + self.client = client + + def parse_func(self, report_file: str) -> Generator[Dict, None, None]: # type: ignore + data: Dict + with open(report_file, 'r') as json_file: + try: + data = json.load(json_file) + except Exception: + click.echo( + click.style("Error: Failed to load Json report file: {}".format(report_file), fg='red'), err=True) + return + + if not self._validate_report_format(data): + click.echo( + "Error: {} does not appear to be valid Karma report format. " + "Make sure you are using karma-json-reporter or a compatible reporter.".format( + report_file), err=True) + return + + results = data.get("result", {}) + for browser_id, specs in results.items(): + if isinstance(specs, list): + for event in self._parse_specs(specs): + yield event + + def _validate_report_format(self, data: Dict) -> bool: + if not isinstance(data, dict): + return False + + if "result" not in data: + return False + + results = data.get("result", {}) + if not isinstance(results, dict): + return False + + for browser_id, specs in results.items(): + if not isinstance(specs, list): + return False + + for spec in specs: + if not isinstance(spec, dict): + return False + # Check for required fields + if "suite" not in spec or "time" not in spec: + return False + # Field suite should have at least one element (filename) + suite = spec.get("suite", []) + if not isinstance(suite, list) or len(suite) == 0: + return False + + return True + + def _parse_specs(self, specs: List[Dict]) -> List[Dict]: + events: List[Dict] = [] + + for spec in specs: + # TODO: + # In NextWorld, test filepaths are included in the suite tag + # But generally in a Karma test report, a suite tag can be any string + # For the time being let's get filepaths from the suite tag, + # until we find a standard way to include filepaths in the test reports + suite = spec.get("suite", []) + filename = suite[0] if suite else "" + + test_path: TestPath = [ + self.client.make_file_path_component(filename), + {"type": "testcase", "name": spec.get("fullName", spec.get("description", ""))} + ] + + duration_msec = spec.get("time", 0) + status = self._case_event_status_from_spec(spec) + stderr = self._parse_stderr(spec) + + events.append(CaseEvent.create( + test_path=test_path, + duration_secs=duration_msec / 1000 if duration_msec else 0, + status=status, + stderr=stderr + )) + + return events + + def _case_event_status_from_spec(self, spec: Dict) -> int: + if spec.get("skipped", False) or spec.get("disabled", False) or spec.get("pending", False): + return CaseEvent.TEST_SKIPPED + + if spec.get("success", False): + return CaseEvent.TEST_PASSED + else: + return CaseEvent.TEST_FAILED + + def _parse_stderr(self, spec: Dict) -> str: + log_messages = spec.get("log", []) + if not log_messages: + return "" + + return "\n".join(str(msg) for msg in log_messages if msg) diff --git a/smart_tests/test_runners/ng.py b/smart_tests/test_runners/ng.py new file mode 100644 index 000000000..fcdee0665 --- /dev/null +++ b/smart_tests/test_runners/ng.py @@ -0,0 +1,22 @@ +from . import smart_tests + + +@smart_tests.subset +def subset(client): + """ + Input format example: + src/app/feature/feature.component.spec.ts + src/app/service/service.service.spec.ts + + Output format: --include= format that can be passed to ng test + Example: + --include=src/app/feature/feature.component.spec.ts --include=src/app/service/service.service.spec.ts + """ + for t in client.stdin(): + path = t.strip() + if path: + client.test_path(path) + + client.formatter = lambda x: "--include={}".format(x[0]['name']) + client.separator = " " + client.run() diff --git a/smart_tests/test_runners/playwright.py b/smart_tests/test_runners/playwright.py index fa5671983..ddc857dee 100644 --- a/smart_tests/test_runners/playwright.py +++ b/smart_tests/test_runners/playwright.py @@ -13,7 +13,6 @@ from ..args4p.exceptions import BadCmdLineException from ..commands.record.case_event import CaseEvent, CaseEventGenerator from ..commands.record.tests import RecordTests -from ..commands.subset import Subset from ..testpath import TestPath from . import smart_tests @@ -63,13 +62,7 @@ def path_builder(case: TestCase, suite: TestSuite, client.run() -@smart_tests.subset -def subset(client: Subset): - # read lines as test file names - for t in client.stdin(): - client.test_path(t.rstrip("\n")) - - client.run() +subset = smart_tests.CommonSubsetImpls(__name__).scan_stdin() class JSONReportParser: diff --git a/smart_tests/test_runners/prove.py b/smart_tests/test_runners/prove.py index 9a645f3f7..151dfdea0 100644 --- a/smart_tests/test_runners/prove.py +++ b/smart_tests/test_runners/prove.py @@ -7,7 +7,6 @@ from ..args4p.exceptions import BadCmdLineException from ..commands.record.tests import RecordTests -from ..commands.subset import Subset from ..testpath import TestPath from . import smart_tests @@ -19,13 +18,7 @@ def remove_leading_number_and_dash(input_string: str) -> str: return result -@smart_tests.subset -def subset(client: Subset): - # read lines as test file names - for t in client.stdin(): - client.test_path(t.rstrip("\n")) - - client.run() +subset = smart_tests.CommonSubsetImpls(__name__).scan_stdin() @smart_tests.record.tests diff --git a/smart_tests/test_runners/smart_tests.py b/smart_tests/test_runners/smart_tests.py index e5db72d30..6d4b72e88 100644 --- a/smart_tests/test_runners/smart_tests.py +++ b/smart_tests/test_runners/smart_tests.py @@ -75,11 +75,27 @@ class CommonSubsetImpls: def __init__(self, module_name): self.cmdname = cmdname(module_name) - def scan_files(self, pattern): + def scan_stdin(self): + """ + Historical implementation of the files profile that's also used elsewhere. + Reads test one line at a time from stdin. Consider this implementation deprecated. + Newer test runners are advised to use scan_files() without the pattern argument. + """ + def subset(client): + # read lines as test file names + for t in client.stdin(): + client.test_path(t.rstrip("\n")) + client.run() + + return wrap(subset, subset_cmd, self.cmdname) + + def scan_files(self, pattern=None): """ Suitable for test runners that use files as unit of tests where file names follow a naming pattern. :param pattern: file masks that identify test files, such as '*_spec.rb' + for test runners that do not have natural file naming conventions, pass in None, + so that the implementation will refuse to accept directories. """ def subset( client: Subset, @@ -92,6 +108,8 @@ def subset( # client type: Optimize in def lauchable.commands.subset.subset def parse(fname: str): if os.path.isdir(fname): + if pattern is None: + raise click.UsageError(f'{fname} is a directory, but expecting a file or GLOB') client.scan(fname, '**/' + pattern) elif fname == '@-': # read stdin diff --git a/smart_tests/test_runners/vitest.py b/smart_tests/test_runners/vitest.py index 767cfa696..5d7bc4cde 100644 --- a/smart_tests/test_runners/vitest.py +++ b/smart_tests/test_runners/vitest.py @@ -4,7 +4,6 @@ import smart_tests.args4p.typer as typer from ..commands.record.tests import RecordTests -from ..commands.subset import Subset from . import smart_tests @@ -40,10 +39,4 @@ def parse_func(report: str) -> ET.ElementTree: smart_tests.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) -@smart_tests.subset -def subset(client: Subset): - # read lines as test file names - for t in client.stdin(): - client.test_path(t.rstrip("\n")) - - client.run() +subset = smart_tests.CommonSubsetImpls(__name__).scan_stdin() diff --git a/smart_tests/utils/authentication.py b/smart_tests/utils/authentication.py index 6de6f7513..ff4395e7e 100644 --- a/smart_tests/utils/authentication.py +++ b/smart_tests/utils/authentication.py @@ -10,6 +10,10 @@ def get_org_workspace(): + ''' + Returns (org,ws) tuple from LAUNCHABLE_TOKEN, or (None,None) if not found. + Use ensure_org_workspace() if this is supposed to be an error condition + ''' token = get_token() if token: try: @@ -17,7 +21,8 @@ def get_org_workspace(): org, workspace = user.split("/", 1) return org, workspace except ValueError: - return None, None + click.secho("Invalid value in LAUNCHABLE_TOKEN environment variable.", fg="red") + raise typer.Exit(1) return os.getenv(ORGANIZATION_KEY), os.getenv(WORKSPACE_KEY) diff --git a/smart_tests/utils/smart_tests_client.py b/smart_tests/utils/smart_tests_client.py index a3dd4b332..2949cd525 100644 --- a/smart_tests/utils/smart_tests_client.py +++ b/smart_tests/utils/smart_tests_client.py @@ -9,8 +9,7 @@ from smart_tests.utils.tracking import Tracking, TrackingClient # type: ignore from ..app import Application -from ..args4p.exceptions import BadCmdLineException -from .authentication import get_org_workspace +from .authentication import ensure_org_workspace from .env_keys import REPORT_ERROR_KEY @@ -23,13 +22,7 @@ def __init__(self, tracking_client: TrackingClient | None = None, base_url: str app=app ) self.tracking_client = tracking_client - self.organization, self.workspace = get_org_workspace() - if self.organization is None or self.workspace is None: - raise BadCmdLineException( - "Could not identify a Smart Tests organization/workspace. " - "Confirm that you set SMART_TESTS_TOKEN " - "(or SMART_TESTS_ORGANIZATION and SMART_TESTS_WORKSPACE) environment variable(s)\n" - "See https://help.launchableinc.com/sending-data-to-launchable/using-the-launchable-cli/getting-started/") + self.organization, self.workspace = ensure_org_workspace() self._workspace_state_cache: Dict[str, str | bool] | None = None def request( diff --git a/src/main/java/com/launchableinc/ingest/commits/CommitIngester.java b/src/main/java/com/launchableinc/ingest/commits/Main.java similarity index 97% rename from src/main/java/com/launchableinc/ingest/commits/CommitIngester.java rename to src/main/java/com/launchableinc/ingest/commits/Main.java index 302b30759..a093edd75 100644 --- a/src/main/java/com/launchableinc/ingest/commits/CommitIngester.java +++ b/src/main/java/com/launchableinc/ingest/commits/Main.java @@ -17,7 +17,7 @@ import org.kohsuke.args4j.Option; /** Driver for {@link CommitGraphCollector}. */ -public class CommitIngester { +public class Main { @Argument(required = true, metaVar = "NAME", usage = "Uniquely identifies this repository within the workspace", index = 0) public String name; @@ -60,10 +60,10 @@ public class CommitIngester { @VisibleForTesting String launchableToken = null; - public CommitIngester() throws CmdLineException, MalformedURLException {} + public Main() throws CmdLineException, MalformedURLException {} public static void main(String[] args) throws Exception { - CommitIngester ingester = new CommitIngester(); + Main ingester = new Main(); CmdLineParser parser = new CmdLineParser(ingester); try { parser.parseArgument(args); diff --git a/src/test/java/com/launchableinc/ingest/commits/AllTests.java b/src/test/java/com/launchableinc/ingest/commits/AllTests.java index ab9b02858..dc1315cab 100644 --- a/src/test/java/com/launchableinc/ingest/commits/AllTests.java +++ b/src/test/java/com/launchableinc/ingest/commits/AllTests.java @@ -7,7 +7,7 @@ @RunWith(Suite.class) @SuiteClasses({ CommitGraphCollectorTest.class, - CommitIngesterTest.class, + MainTest.class, FileChunkStreamerTest.class, SSLBypassTest.class, ProgressReportingConsumerTest.class diff --git a/src/test/java/com/launchableinc/ingest/commits/CommitIngesterTest.java b/src/test/java/com/launchableinc/ingest/commits/MainTest.java similarity index 91% rename from src/test/java/com/launchableinc/ingest/commits/CommitIngesterTest.java rename to src/test/java/com/launchableinc/ingest/commits/MainTest.java index fccd4fe2f..b0702bdb1 100644 --- a/src/test/java/com/launchableinc/ingest/commits/CommitIngesterTest.java +++ b/src/test/java/com/launchableinc/ingest/commits/MainTest.java @@ -19,7 +19,7 @@ import org.mockserver.junit.MockServerRule; @RunWith(JUnit4.class) -public class CommitIngesterTest { +public class MainTest { @Rule public TemporaryFolder tmp = new TemporaryFolder(); @Rule public MockServerRule mockServerRule = new MockServerRule(this); private MockServerClient mockServerClient; @@ -63,14 +63,14 @@ public void specifySubmodule() throws Exception { return response().withBody("OK"); }); - CommitIngester commitIngester = new CommitIngester(); + Main main = new Main(); InetSocketAddress addr = mockServerClient.remoteAddress(); // Specify submodule as the repository. JGit cannot open this directly, so the code should open // the main repository first, then open the submodule. - commitIngester.repo = new File(mainrepoDir, "sub"); - commitIngester.url = + main.repo = new File(mainrepoDir, "sub"); + main.url = new URL(String.format("http://%s:%s/intake/", addr.getHostString(), addr.getPort())); - commitIngester.launchableToken = "v1:testorg/testws:dummy-token"; - commitIngester.run(); + main.launchableToken = "v1:testorg/testws:dummy-token"; + main.run(); } } diff --git a/tests/data/jasmine/README.md b/tests/data/jasmine/README.md new file mode 100644 index 000000000..a7e67a966 --- /dev/null +++ b/tests/data/jasmine/README.md @@ -0,0 +1,41 @@ +Jasmine Runner Usage +===================== + +If you want to test this runner from scratch, start by creating a new jasmine project: + +``` +% npm install --save-dev jasmine jasmine-json-test-reporter +% npx jasmine init +% npx jasmine example +% git add . && git commit -m "Initial commit" +``` + +Create spec/helpers/jasmine-json-test-reporter.js +``` +var JSONReporter = require('jasmine-json-test-reporter'); +jasmine.getEnv().addReporter(new JSONReporter({ + file: 'jasmine-report.json' +})); +``` + +Record tests +``` +BUILD_NAME=jasmine_build +launchable record build --name ${BUILD_NAME} +launchable record session --build ${BUILD_NAME} > session.txt + +# Write all tests to a file +find spec/jasmine_examples -type f > test_list.txt + +# Run all tests +npx jasmine $(cat test_list.txt) + +launchable record tests --base $(pwd) jasmine jasmine-report.json +``` + +Request subset +``` +cat test_list.txt | launchable subset --target 25% jasmine > subset.txt +npx jasmine $(cat subset.txt) +``` + diff --git a/tests/data/jasmine/jasmine-test-results-v3.99.0.json b/tests/data/jasmine/jasmine-test-results-v3.99.0.json new file mode 100644 index 000000000..af251ead3 --- /dev/null +++ b/tests/data/jasmine/jasmine-test-results-v3.99.0.json @@ -0,0 +1,150 @@ +{ + "suite2": { + "id": "suite2", + "description": "when song has been paused", + "fullName": "Player when song has been paused", + "failedExpectations": [], + "deprecationWarnings": [], + "duration": 2, + "properties": null, + "status": "passed", + "specs": [ + { + "id": "spec0", + "description": "should be able to play a Song", + "fullName": "Player should be able to play a Song", + "failedExpectations": [], + "passedExpectations": [ + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toBePlaying", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "deprecationWarnings": [], + "pendingReason": "", + "duration": 0, + "properties": null, + "status": "passed" + }, + { + "id": "spec2", + "description": "should be possible to resume", + "fullName": "Player when song has been paused should be possible to resume", + "failedExpectations": [], + "passedExpectations": [ + { + "matcherName": "toBeTruthy", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "deprecationWarnings": [], + "pendingReason": "", + "duration": 1, + "properties": null, + "status": "passed" + }, + { + "id": "spec1", + "description": "should indicate that the song is currently paused", + "fullName": "Player when song has been paused should indicate that the song is currently paused", + "failedExpectations": [], + "passedExpectations": [ + { + "matcherName": "toBeFalsy", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toBePlaying", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "deprecationWarnings": [], + "pendingReason": "", + "duration": 1, + "properties": null, + "status": "passed" + } + ] + }, + "suite3": { + "id": "suite3", + "description": "#resume", + "fullName": "Player #resume", + "failedExpectations": [], + "deprecationWarnings": [], + "duration": 1, + "properties": null, + "status": "passed", + "specs": [ + { + "id": "spec3", + "description": "tells the current song if the user has made it a favorite", + "fullName": "Player tells the current song if the user has made it a favorite", + "failedExpectations": [], + "passedExpectations": [ + { + "matcherName": "toHaveBeenCalledWith", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "deprecationWarnings": [], + "pendingReason": "", + "duration": 1, + "properties": null, + "status": "passed" + }, + { + "id": "spec4", + "description": "should throw an exception if song is already playing", + "fullName": "Player #resume should throw an exception if song is already playing", + "failedExpectations": [], + "passedExpectations": [ + { + "matcherName": "toThrowError", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "deprecationWarnings": [], + "pendingReason": "", + "duration": 0, + "properties": null, + "status": "passed" + } + ] + }, + "suite1": { + "id": "suite1", + "description": "Player", + "fullName": "Player", + "failedExpectations": [], + "deprecationWarnings": [], + "duration": 4, + "properties": null, + "status": "passed", + "specs": [] + } +} diff --git a/tests/data/karma/record_test_result.json b/tests/data/karma/record_test_result.json new file mode 100644 index 000000000..d0972d26d --- /dev/null +++ b/tests/data/karma/record_test_result.json @@ -0,0 +1,189 @@ +{ + "events": [ + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should feed the monkey" + } + ], + "duration": 0.092, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should fetch the toy" + } + ], + "duration": 0.027, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should fetch the toyForRecord - record found" + } + ], + "duration": 0.028, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should fetch the toyForRecord - record not found" + } + ], + "duration": 0.027, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should fetch the toyForRecord - error in response" + } + ], + "duration": 0.033, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should throw a ball" + } + ], + "duration": 0.026, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should be nice to the zookeeper" + } + ], + "duration": 0.024, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should like a banana or two" + } + ], + "duration": 0.024, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should fall back to an apple if banana is not available" + } + ], + "duration": 0.025, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "foo/bar/zot.spec.ts" + }, + { + "type": "testcase", + "name": "foo/bar/zot.spec.ts should accept oranges if apple is not available" + } + ], + "duration": 0.032, + "status": 1, + "stdout": "", + "stderr": "", + "data": null + } + ], + "testRunner": "karma", + "group": "", + "noBuild": false, + "flavors": [], + "testSuite": "" +} diff --git a/tests/data/karma/sample-report.json b/tests/data/karma/sample-report.json new file mode 100644 index 000000000..ada7cea12 --- /dev/null +++ b/tests/data/karma/sample-report.json @@ -0,0 +1,360 @@ +{ + "browsers": { + "123456": { + "id": "123456", + "fullName": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/131.0.0.0 Safari/537.36", + "name": "Chrome Headless 131.0.0.0 (Mac OS 10.15.7)", + "state": "DISCONNECTED", + "lastResult": { + "startTime": 1763588893304, + "total": 10, + "success": 10, + "failed": 0, + "skipped": 0, + "totalTime": 358, + "netTime": 338, + "error": true, + "disconnected": false + }, + "disconnectsCount": 0, + "noActivityTimeout": 30000, + "disconnectDelay": 2000 + } + }, + "result": { + "123456": [ + { + "fullName": "foo/bar/zot.spec.ts should feed the monkey", + "description": "should feed the monkey", + "id": "spec0", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 92, + "executedExpectationsCount": 1, + "passedExpectations": [ + { + "matcherName": "toBeTruthy", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + }, + { + "fullName": "foo/bar/zot.spec.ts should fetch the toy", + "description": "should fetch the toy", + "id": "spec1", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 27, + "executedExpectationsCount": 7, + "passedExpectations": [ + { + "matcherName": "toHaveBeenCalled", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalled", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalled", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalled", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalled", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalledTimes", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + }, + { + "fullName": "foo/bar/zot.spec.ts should fetch the toyForRecord - record found", + "description": "should fetch the toyForRecord - record found", + "id": "spec2", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 28, + "executedExpectationsCount": 2, + "passedExpectations": [ + { + "matcherName": "toHaveBeenCalledOnceWith", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + }, + { + "fullName": "foo/bar/zot.spec.ts should fetch the toyForRecord - record not found", + "description": "should fetch the toyForRecord - record not found", + "id": "spec3", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 27, + "executedExpectationsCount": 2, + "passedExpectations": [ + { + "matcherName": "toHaveBeenCalledOnceWith", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + }, + { + "fullName": "foo/bar/zot.spec.ts should fetch the toyForRecord - error in response", + "description": "should fetch the toyForRecord - error in response", + "id": "spec4", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 33, + "executedExpectationsCount": 2, + "passedExpectations": [ + { + "matcherName": "toHaveBeenCalledOnceWith", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + }, + { + "fullName": "foo/bar/zot.spec.ts should throw a ball", + "description": "should throw a ball", + "id": "spec5", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 26, + "executedExpectationsCount": 2, + "passedExpectations": [ + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalledOnceWith", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + }, + { + "fullName": "foo/bar/zot.spec.ts should be nice to the zookeeper", + "description": "should be nice to the zookeeper", + "id": "spec6", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 24, + "executedExpectationsCount": 2, + "passedExpectations": [ + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalledOnceWith", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + }, + { + "fullName": "foo/bar/zot.spec.ts should like a banana or two", + "description": "should like a banana or two", + "id": "spec7", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 24, + "executedExpectationsCount": 2, + "passedExpectations": [ + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalledOnceWith", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + }, + { + "fullName": "foo/bar/zot.spec.ts should fall back to an apple if banana is not available", + "description": "should fall back to an apple if banana is not available", + "id": "spec8", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 25, + "executedExpectationsCount": 2, + "passedExpectations": [ + { + "matcherName": "toBeNull", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalledOnceWith", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + }, + { + "fullName": "foo/bar/zot.spec.ts should accept oranges if apple is not available", + "description": "should accept oranges if apple is not available", + "id": "spec9", + "log": [], + "skipped": false, + "disabled": false, + "pending": false, + "success": true, + "suite": [ + "foo/bar/zot.spec.ts" + ], + "time": 32, + "executedExpectationsCount": 2, + "passedExpectations": [ + { + "matcherName": "toEqual", + "message": "Passed.", + "stack": "", + "passed": true + }, + { + "matcherName": "toHaveBeenCalledOnceWith", + "message": "Passed.", + "stack": "", + "passed": true + } + ], + "properties": null + } + ] + }, + "summary": { + "success": 10, + "failed": 0, + "skipped": 0, + "error": true, + "disconnected": false, + "exitCode": 1 + } +} diff --git a/tests/data/karma/subset_result.json b/tests/data/karma/subset_result.json new file mode 100644 index 000000000..d5642af31 --- /dev/null +++ b/tests/data/karma/subset_result.json @@ -0,0 +1,22 @@ +{ + "testPaths": [ + [ + { + "type": "file", + "name": "a.ts" + } + ], + [ + { + "type": "file", + "name": "b.ts" + } + ] + ], + "testRunner": "karma", + "session": { "id": "16" }, + "goal": {"type": "subset-by-percentage", "percentage": 0.1}, + "ignoreNewTests": false, + "getTestsFromGuess": false, + "getTestsFromPreviousSessions": false +} diff --git a/tests/data/ng/subset_payload.json b/tests/data/ng/subset_payload.json new file mode 100644 index 000000000..d0b15b816 --- /dev/null +++ b/tests/data/ng/subset_payload.json @@ -0,0 +1,16 @@ +{ + "testPaths": [ + [ + { "type": "file", "name": "foo/bar/zot.spec.ts" } + ], + [ + { "type": "file", "name": "client-source/src/app/shared/other-test.spec.ts" } + ] + ], + "testRunner": "ng", + "goal": {"type": "subset-by-percentage", "percentage": 0.1}, + "ignoreNewTests": false, + "session": { "id": "16" }, + "getTestsFromGuess": false, + "getTestsFromPreviousSessions": false +} diff --git a/tests/test_runners/test_jasmine.py b/tests/test_runners/test_jasmine.py index 6122f7171..49f18db7a 100644 --- a/tests/test_runners/test_jasmine.py +++ b/tests/test_runners/test_jasmine.py @@ -16,6 +16,18 @@ def test_record_test_json(self): self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') + @responses.activate + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) + def test_record_tests_without_filename(self): + result = self.cli('record', 'tests', '--session', self.session, + 'jasmine', str(self.test_files_dir.joinpath("jasmine-test-results-v3.99.0.json"))) + + self.assertIn( + "does not appear to be valid format. " + "Make sure you are using Jasmine >= v4.6.0 and jasmine-json-test-reporter as the reporter.", + result.output + ) + @responses.activate @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): diff --git a/tests/test_runners/test_karma.py b/tests/test_runners/test_karma.py new file mode 100644 index 000000000..c237c835d --- /dev/null +++ b/tests/test_runners/test_karma.py @@ -0,0 +1,26 @@ +import os +from unittest import mock + +import responses # type: ignore + +from tests.cli_test_case import CliTestCase + + +class KarmaTest(CliTestCase): + @responses.activate + @mock.patch.dict(os.environ, + {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) + def test_record_tests_json(self): + result = self.cli('record', 'tests', '--session', self.session, + 'karma', str(self.test_files_dir.joinpath("sample-report.json"))) + + self.assert_success(result) + self.assert_record_tests_payload('record_test_result.json') + + @responses.activate + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) + def test_subset(self): + result = self.cli('subset', 'karma', '--session', self.session, '--target', '10%', '--base', + os.getcwd(), '--with', 'ng', input="a.ts\nb.ts") + self.assert_success(result) + self.assert_subset_payload('subset_result.json') diff --git a/tests/test_runners/test_ng.py b/tests/test_runners/test_ng.py new file mode 100644 index 000000000..18b0838b2 --- /dev/null +++ b/tests/test_runners/test_ng.py @@ -0,0 +1,18 @@ +import os +from unittest import mock + +import responses # type: ignore + +from tests.cli_test_case import CliTestCase + + +class NgTest(CliTestCase): + @responses.activate + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) + def test_subset(self): + subset_input = """foo/bar/zot.spec.ts +client-source/src/app/shared/other-test.spec.ts +""" + result = self.cli('subset', 'ng', '--session', self.session, '--target', '10%', input=subset_input) + self.assert_success(result) + self.assert_subset_payload('subset_payload.json') diff --git a/tests/utils/test_authentication.py b/tests/utils/test_authentication.py index a060d976d..f7923580a 100644 --- a/tests/utils/test_authentication.py +++ b/tests/utils/test_authentication.py @@ -11,12 +11,6 @@ def test_get_org_workspace_no_environment_variables(self): self.assertIsNone(org) self.assertIsNone(workspace) - @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": "invalid"}) - def test_get_org_workspace_invalid_SMART_TESTS_TOKEN(self): - org, workspace = get_org_workspace() - self.assertIsNone(org) - self.assertIsNone(workspace) - @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": "v1:launchableinc/test:token"}) def test_get_org_workspace_valid_SMART_TESTS_TOKEN(self):