From a362c88d54fd67ab5dd8c6be27618f8a700a3376 Mon Sep 17 00:00:00 2001 From: Onsi Fakhouri Date: Sat, 21 Apr 2018 19:55:00 -0400 Subject: [PATCH] Add -showDebugAddress to enable debugging hanging parallel tests --- CHANGELOG.md | 3 + ginkgo/build_command.go | 2 +- ginkgo/run_command.go | 2 +- ginkgo/run_watch_and_build_command_flags.go | 16 +- ginkgo/testrunner/test_runner.go | 49 ++-- ginkgo/testrunner/test_runner_test.go | 2 +- ginkgo/watch_command.go | 2 +- ginkgo_dsl.go | 2 +- .../slow_parallel_suite_test.go | 13 + .../slow_parallel_test/slow_parallel_test.go | 16 ++ integration/run_test.go | 45 ++- internal/remote/aggregator.go | 227 ++++++++------- internal/remote/aggregator_test.go | 8 +- .../remote/fake_output_interceptor_test.go | 6 + internal/remote/forwarding_reporter.go | 37 ++- internal/remote/forwarding_reporter_test.go | 2 +- internal/remote/output_interceptor.go | 1 + internal/remote/output_interceptor_unix.go | 9 + internal/remote/output_interceptor_win.go | 8 +- internal/remote/server.go | 64 +++-- internal/remote/server_test.go | 269 ------------------ types/types.go | 9 + 22 files changed, 364 insertions(+), 428 deletions(-) create mode 100644 integration/_fixtures/slow_parallel_test/slow_parallel_suite_test.go create mode 100644 integration/_fixtures/slow_parallel_test/slow_parallel_test.go delete mode 100644 internal/remote/server_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index b79232f6f..4112793ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ ## HEAD +- Running parallel tests with `-showDebugAddress` now prints out a Debug URL when the test begins. http GETting from this URL returns a JSON payload that describes the currently running spec for each parallel node. This payload will include any output the spec has emitted (either to stdout or the GinkgoWriter). + +The intent behind `-showDebugAddress` is to enable users to debug parallel test suites that are hanging. We recommend enabling `-showDebugAddress` in your CI setup. - When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. [#365] - When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) [#446] diff --git a/ginkgo/build_command.go b/ginkgo/build_command.go index f0eb375c3..0d70e09a6 100644 --- a/ginkgo/build_command.go +++ b/ginkgo/build_command.go @@ -46,7 +46,7 @@ func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) { passed := true for _, suite := range suites { - runner := testrunner.New(suite, 1, false, 0, r.commandFlags.GoOpts, nil) + runner := testrunner.New(suite, 1, false, false, 0, r.commandFlags.GoOpts, nil) fmt.Printf("Compiling %s...\n", suite.PackageName) path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName))) diff --git a/ginkgo/run_command.go b/ginkgo/run_command.go index 569b6a29c..a532ff394 100644 --- a/ginkgo/run_command.go +++ b/ginkgo/run_command.go @@ -75,7 +75,7 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { runners := []*testrunner.TestRunner{} for _, suite := range suites { - runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Timeout, r.commandFlags.GoOpts, additionalArgs)) + runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.ShowParallelDebugAddress, r.commandFlags.Timeout, r.commandFlags.GoOpts, additionalArgs)) } numSuites := 0 diff --git a/ginkgo/run_watch_and_build_command_flags.go b/ginkgo/run_watch_and_build_command_flags.go index b7cb7f566..4d58d07d1 100644 --- a/ginkgo/run_watch_and_build_command_flags.go +++ b/ginkgo/run_watch_and_build_command_flags.go @@ -15,13 +15,14 @@ type RunWatchAndBuildCommandFlags struct { GoOpts map[string]interface{} //for run and watch commands - NumCPU int - NumCompilers int - ParallelStream bool - Notify bool - AfterSuiteHook string - AutoNodes bool - Timeout time.Duration + NumCPU int + NumCompilers int + ParallelStream bool + ShowParallelDebugAddress bool + Notify bool + AfterSuiteHook string + AutoNodes bool + Timeout time.Duration //only for run command KeepGoing bool @@ -147,6 +148,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) { c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)") c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes") c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging") + c.FlagSet.BoolVar(&(c.ShowParallelDebugAddress), "showDebugAddress", false, "show parallel test debug address") if !onWindows { c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes") } diff --git a/ginkgo/testrunner/test_runner.go b/ginkgo/testrunner/test_runner.go index e12913668..7cf77b9a7 100644 --- a/ginkgo/testrunner/test_runner.go +++ b/ginkgo/testrunner/test_runner.go @@ -26,25 +26,27 @@ type TestRunner struct { compiled bool compilationTargetPath string - numCPU int - parallelStream bool - timeout time.Duration - goOpts map[string]interface{} - additionalArgs []string - stderr *bytes.Buffer + numCPU int + parallelStream bool + timeout time.Duration + goOpts map[string]interface{} + additionalArgs []string + stderr *bytes.Buffer + showDebugAddress bool CoverageFile string } -func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner { +func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, showDebugAddress bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner { runner := &TestRunner{ - Suite: suite, - numCPU: numCPU, - parallelStream: parallelStream, - goOpts: goOpts, - additionalArgs: additionalArgs, - timeout: timeout, - stderr: new(bytes.Buffer), + Suite: suite, + numCPU: numCPU, + parallelStream: parallelStream, + showDebugAddress: showDebugAddress, + goOpts: goOpts, + additionalArgs: additionalArgs, + timeout: timeout, + stderr: new(bytes.Buffer), } if !suite.Precompiled { @@ -259,7 +261,7 @@ func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult { completions := make(chan RunResult) writers := make([]*logWriter, t.numCPU) - server, err := remote.NewServer(t.numCPU) + server, err := remote.NewServer(t.numCPU, nil) if err != nil { panic("Failed to start parallel spec server") } @@ -308,7 +310,7 @@ func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult { } func (t *TestRunner) runParallelGinkgoSuite() RunResult { - result := make(chan bool) + result := make(chan bool, 1) completions := make(chan RunResult) writers := make([]*logWriter, t.numCPU) reports := make([]*bytes.Buffer, t.numCPU) @@ -316,11 +318,15 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult { stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1) aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer) - server, err := remote.NewServer(t.numCPU) + server, err := remote.NewServer(t.numCPU, aggregator) if err != nil { panic("Failed to start parallel spec server") } - server.RegisterReporters(aggregator) + + if t.showDebugAddress { + fmt.Println(t.Suite.PackageName, "Debug URL: ", server.Address()+"/debug") + } + server.Start() defer server.Close() @@ -344,6 +350,13 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult { return !cmd.ProcessState.Exited() }) + server.RegisterSignalDebug(cpu+1, func() { + if cmd.Process == nil { + return + } + cmd.Process.Signal(syscall.SIGUSR1) + }) + go t.run(cmd, completions) } diff --git a/ginkgo/testrunner/test_runner_test.go b/ginkgo/testrunner/test_runner_test.go index b6f556770..d0bd6b293 100644 --- a/ginkgo/testrunner/test_runner_test.go +++ b/ginkgo/testrunner/test_runner_test.go @@ -33,7 +33,7 @@ var _ = Describe("TestRunner", func() { "cover": boolAddr(false), "blockprofilerate": intAddr(100), } - tr := testrunner.New(testsuite.TestSuite{}, 1, false, 0, opts, []string{}) + tr := testrunner.New(testsuite.TestSuite{}, 1, false, false, 0, opts, []string{}) args := tr.BuildArgs(".") Ω(args).Should(Equal([]string{ diff --git a/ginkgo/watch_command.go b/ginkgo/watch_command.go index a6ef053c8..4d81316f6 100644 --- a/ginkgo/watch_command.go +++ b/ginkgo/watch_command.go @@ -59,7 +59,7 @@ func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalA runners := []*testrunner.TestRunner{} for _, suite := range suites { - runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Timeout, w.commandFlags.GoOpts, additionalArgs)) + runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.ShowParallelDebugAddress, w.commandFlags.Timeout, w.commandFlags.GoOpts, additionalArgs)) } return runners diff --git a/ginkgo_dsl.go b/ginkgo_dsl.go index ea5b7ccde..76375e797 100644 --- a/ginkgo_dsl.go +++ b/ginkgo_dsl.go @@ -231,7 +231,7 @@ func buildDefaultReporter() Reporter { stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1) return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) } else { - return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor()) + return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), config.GinkgoConfig.ParallelNode, GinkgoWriter.(*writer.Writer)) } } diff --git a/integration/_fixtures/slow_parallel_test/slow_parallel_suite_test.go b/integration/_fixtures/slow_parallel_test/slow_parallel_suite_test.go new file mode 100644 index 000000000..e90a55e22 --- /dev/null +++ b/integration/_fixtures/slow_parallel_test/slow_parallel_suite_test.go @@ -0,0 +1,13 @@ +package slow_parallel_suite_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestHangingSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "SlowParallelSuite Suite") +} diff --git a/integration/_fixtures/slow_parallel_test/slow_parallel_test.go b/integration/_fixtures/slow_parallel_test/slow_parallel_test.go new file mode 100644 index 000000000..729b74198 --- /dev/null +++ b/integration/_fixtures/slow_parallel_test/slow_parallel_test.go @@ -0,0 +1,16 @@ +package slow_parallel_suite_test + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo" +) + +var _ = Describe("SlowParallelSuite", func() { + It("should hang out for a while", func() { + fmt.Fprintln(GinkgoWriter, "Slow Test is Hanging Out") + fmt.Println("Slow Test is Sleeping...") + time.Sleep(5 * time.Second) + }) +}) diff --git a/integration/run_test.go b/integration/run_test.go index 871ba7ecb..a3b7582c2 100644 --- a/integration/run_test.go +++ b/integration/run_test.go @@ -2,6 +2,8 @@ package integration_test import ( "fmt" + "io/ioutil" + "net/http" "os" "regexp" "runtime" @@ -257,12 +259,12 @@ var _ = Describe("Running Specs", func() { }) Context("when running in parallel", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - copyIn("passing_ginkgo_tests", pathToTest) - }) - Context("with a specific number of -nodes", func() { + BeforeEach(func() { + pathToTest = tmpPath("ginkgo") + copyIn("passing_ginkgo_tests", pathToTest) + }) + It("should use the specified number of nodes", func() { session := startGinkgo(pathToTest, "--noColor", "-succinct", "-nodes=2") Eventually(session).Should(gexec.Exit(0)) @@ -274,6 +276,11 @@ var _ = Describe("Running Specs", func() { }) Context("with -p", func() { + BeforeEach(func() { + pathToTest = tmpPath("ginkgo") + copyIn("passing_ginkgo_tests", pathToTest) + }) + It("it should autocompute the number of nodes", func() { session := startGinkgo(pathToTest, "--noColor", "-succinct", "-p") Eventually(session).Should(gexec.Exit(0)) @@ -290,6 +297,34 @@ var _ = Describe("Running Specs", func() { Ω(output).Should(ContainSubstring("Test Suite Passed")) }) }) + + Context("with -showDebugAddress", func() { + BeforeEach(func() { + pathToTest = tmpPath("slow") + copyIn("slow_parallel_test", pathToTest) + }) + + It("enables the user to debug the suite", func() { + session := startGinkgo(pathToTest, "--noColor", "-nodes=2", "-showDebugAddress") + Eventually(session).Should(gbytes.Say("Debug URL:")) + re := regexp.MustCompile(`Debug URL:\s+(http://127.0.0.1:\d*/debug)`) + url := re.FindStringSubmatch(string(session.Out.Contents()))[1] + + getDebugInfo := func() string { + resp, err := http.Get(url) + Ω(err).ShouldNot(HaveOccurred()) + content, err := ioutil.ReadAll(resp.Body) + Ω(err).ShouldNot(HaveOccurred()) + resp.Body.Close() + return string(content) + } + + Eventually(getDebugInfo).Should(ContainSubstring("Slow Test is Hanging Out"), "Debug endpoint captures GinkgoWriter output while the test is running") + Eventually(getDebugInfo).Should(ContainSubstring("Slow Test is Sleeping..."), "Debug endpoint captures output while the test is running") + + Eventually(session).Should(gexec.Exit(0)) + }) + }) }) Context("when streaming in parallel", func() { diff --git a/internal/remote/aggregator.go b/internal/remote/aggregator.go index 6b54afe01..e6c985fb5 100644 --- a/internal/remote/aggregator.go +++ b/internal/remote/aggregator.go @@ -10,6 +10,8 @@ where N is the number of nodes you desire. package remote import ( + "encoding/json" + "sync" "time" "github.com/onsi/ginkgo/config" @@ -22,27 +24,53 @@ type configAndSuite struct { summary *types.SuiteSummary } +type currentRunningSpec struct { + Spec *types.SpecSummary `json:"spec"` + StartTime time.Time `json:"startTime"` + CapturedOutput string `json:"output"` + GinkgoWriterOutput string `json:"ginkgoWriterOutput"` +} + +type nodeStatus struct { + Node int `json:"node"` + IsRunning bool `json:"isRunning"` + StartTime time.Time `json:"startTime"` + Spec *currentRunningSpec `json:"spec"` +} + +func (node *nodeStatus) startSpec(specSummary *types.SpecSummary) { + node.Spec = ¤tRunningSpec{ + Spec: specSummary, + StartTime: time.Now(), + } +} + +func (node *nodeStatus) updateSpecDebugOutput(debugOutput *types.ParallelSpecDebugOutput) { + if node.Spec != nil { + node.Spec.CapturedOutput = debugOutput.CapturedOutput + node.Spec.GinkgoWriterOutput = debugOutput.GinkgoWriterOutput + } +} + +func (node *nodeStatus) lastSpecIsDone() { + node.Spec = nil +} + type Aggregator struct { nodeCount int config config.DefaultReporterConfigType stenographer stenographer.Stenographer result chan bool - suiteBeginnings chan configAndSuite aggregatedSuiteBeginnings []configAndSuite + aggregatedBeforeSuites []*types.SetupSummary + aggregatedAfterSuites []*types.SetupSummary + completedSpecs []*types.SpecSummary + aggregatedSuiteEndings []*types.SuiteSummary + specs []*types.SpecSummary + nodeStatus map[int]*nodeStatus - beforeSuites chan *types.SetupSummary - aggregatedBeforeSuites []*types.SetupSummary - - afterSuites chan *types.SetupSummary - aggregatedAfterSuites []*types.SetupSummary - - specCompletions chan *types.SpecSummary - completedSpecs []*types.SpecSummary - - suiteEndings chan *types.SuiteSummary - aggregatedSuiteEndings []*types.SuiteSummary - specs []*types.SpecSummary + lock *sync.Mutex startTime time.Time } @@ -54,66 +82,26 @@ func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporte config: config, stenographer: stenographer, - suiteBeginnings: make(chan configAndSuite, 0), - beforeSuites: make(chan *types.SetupSummary, 0), - afterSuites: make(chan *types.SetupSummary, 0), - specCompletions: make(chan *types.SpecSummary, 0), - suiteEndings: make(chan *types.SuiteSummary, 0), + nodeStatus: map[int]*nodeStatus{}, + lock: &sync.Mutex{}, } - go aggregator.mux() - return aggregator } func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - aggregator.suiteBeginnings <- configAndSuite{config, summary} -} - -func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - aggregator.beforeSuites <- setupSummary -} - -func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - aggregator.afterSuites <- setupSummary -} + aggregator.lock.Lock() + defer aggregator.lock.Unlock() -func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { - //noop -} - -func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { - aggregator.specCompletions <- specSummary -} - -func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { - aggregator.suiteEndings <- summary -} - -func (aggregator *Aggregator) mux() { -loop: - for { - select { - case configAndSuite := <-aggregator.suiteBeginnings: - aggregator.registerSuiteBeginning(configAndSuite) - case setupSummary := <-aggregator.beforeSuites: - aggregator.registerBeforeSuite(setupSummary) - case setupSummary := <-aggregator.afterSuites: - aggregator.registerAfterSuite(setupSummary) - case specSummary := <-aggregator.specCompletions: - aggregator.registerSpecCompletion(specSummary) - case suite := <-aggregator.suiteEndings: - finished, passed := aggregator.registerSuiteEnding(suite) - if finished { - aggregator.result <- passed - break loop - } - } + aggregator.nodeStatus[summary.GinkgoNode] = &nodeStatus{ + Node: summary.GinkgoNode, + IsRunning: true, + StartTime: time.Now(), } -} -func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) { - aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite) + cs := configAndSuite{config, summary} + + aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, cs) if len(aggregator.aggregatedSuiteBeginnings) == 1 { aggregator.startTime = time.Now() @@ -123,34 +111,104 @@ func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSui return } - aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) + aggregator.stenographer.AnnounceSuite(cs.summary.SuiteDescription, cs.config.RandomSeed, cs.config.RandomizeAllSpecs, aggregator.config.Succinct) totalNumberOfSpecs := 0 if len(aggregator.aggregatedSuiteBeginnings) > 0 { - totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization + totalNumberOfSpecs = cs.summary.NumberOfSpecsBeforeParallelization } aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct) aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) aggregator.flushCompletedSpecs() + } -func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) { +func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.lock.Lock() + defer aggregator.lock.Unlock() + aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary) aggregator.flushCompletedSpecs() } -func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) { +func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.lock.Lock() + defer aggregator.lock.Unlock() + aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary) aggregator.flushCompletedSpecs() } -func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) { +func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { + aggregator.lock.Lock() + defer aggregator.lock.Unlock() + + aggregator.nodeStatus[specSummary.GinkgoNode].startSpec(specSummary) +} + +func (aggregator *Aggregator) UpdateSpecDebugOutput(debugOutput *types.ParallelSpecDebugOutput) { + aggregator.lock.Lock() + defer aggregator.lock.Unlock() + + aggregator.nodeStatus[debugOutput.GinkgoNode].updateSpecDebugOutput(debugOutput) +} + +func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { + aggregator.lock.Lock() + defer aggregator.lock.Unlock() + + aggregator.nodeStatus[specSummary.GinkgoNode].lastSpecIsDone() + aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary) aggregator.specs = append(aggregator.specs, specSummary) aggregator.flushCompletedSpecs() } +func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { + aggregator.lock.Lock() + defer aggregator.lock.Unlock() + + aggregator.nodeStatus[summary.GinkgoNode].IsRunning = false + + aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, summary) + if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { + return + } + + aggregatedSuiteSummary := &types.SuiteSummary{} + aggregatedSuiteSummary.SuiteSucceeded = true + + for _, suiteSummary := range aggregator.aggregatedSuiteEndings { + if suiteSummary.SuiteSucceeded == false { + aggregatedSuiteSummary.SuiteSucceeded = false + } + + aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun + aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs + aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs + aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs + aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs + aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs + aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs + } + + aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) + + aggregator.stenographer.SummarizeFailures(aggregator.specs) + aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) + + aggregator.result <- aggregatedSuiteSummary.SuiteSucceeded +} + +func (aggregator *Aggregator) DebugReport() []byte { + aggregator.lock.Lock() + defer aggregator.lock.Unlock() + + encoded, _ := json.Marshal(aggregator.nodeStatus) + return encoded +} + func (aggregator *Aggregator) flushCompletedSpecs() { if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { return @@ -216,34 +274,3 @@ func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) } } - -func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) { - aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite) - if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { - return false, false - } - - aggregatedSuiteSummary := &types.SuiteSummary{} - aggregatedSuiteSummary.SuiteSucceeded = true - - for _, suiteSummary := range aggregator.aggregatedSuiteEndings { - if suiteSummary.SuiteSucceeded == false { - aggregatedSuiteSummary.SuiteSucceeded = false - } - - aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun - aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs - aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs - aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs - aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs - aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs - aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs - } - - aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) - - aggregator.stenographer.SummarizeFailures(aggregator.specs) - aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) - - return true, aggregatedSuiteSummary.SuiteSucceeded -} diff --git a/internal/remote/aggregator_test.go b/internal/remote/aggregator_test.go index aedf93927..0a436b1ab 100644 --- a/internal/remote/aggregator_test.go +++ b/internal/remote/aggregator_test.go @@ -66,6 +66,7 @@ var _ = Describe("Aggregator", func() { suiteSummary1 = &types.SuiteSummary{ SuiteDescription: suiteDescription, + GinkgoNode: 1, NumberOfSpecsBeforeParallelization: 30, NumberOfTotalSpecs: 17, @@ -76,6 +77,7 @@ var _ = Describe("Aggregator", func() { suiteSummary2 = &types.SuiteSummary{ SuiteDescription: suiteDescription, + GinkgoNode: 2, NumberOfSpecsBeforeParallelization: 30, NumberOfTotalSpecs: 13, @@ -97,6 +99,7 @@ var _ = Describe("Aggregator", func() { specSummary = &types.SpecSummary{ State: types.SpecStatePassed, CapturedOutput: "SpecOutput", + GinkgoNode: 1, } }) @@ -145,8 +148,10 @@ var _ = Describe("Aggregator", func() { Describe("Announcing specs and before suites", func() { Context("when the parallel-suites have not all started", func() { BeforeEach(func() { + aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1) aggregator.BeforeSuiteDidRun(beforeSummary) aggregator.AfterSuiteDidRun(afterSummary) + aggregator.SpecWillRun(specSummary) aggregator.SpecDidComplete(specSummary) }) @@ -156,7 +161,7 @@ var _ = Describe("Aggregator", func() { Context("when the parallel-suites subsequently start", func() { BeforeEach(func() { - beginSuite() + aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2) }) It("should announce the specs, the before suites and the after suites", func() { @@ -179,6 +184,7 @@ var _ = Describe("Aggregator", func() { Context("When a spec completes", func() { BeforeEach(func() { aggregator.BeforeSuiteDidRun(beforeSummary) + aggregator.SpecWillRun(specSummary) aggregator.SpecDidComplete(specSummary) aggregator.AfterSuiteDidRun(afterSummary) Eventually(func() interface{} { diff --git a/internal/remote/fake_output_interceptor_test.go b/internal/remote/fake_output_interceptor_test.go index a928f93d3..27dca942d 100644 --- a/internal/remote/fake_output_interceptor_test.go +++ b/internal/remote/fake_output_interceptor_test.go @@ -4,6 +4,7 @@ type fakeOutputInterceptor struct { DidStartInterceptingOutput bool DidStopInterceptingOutput bool InterceptedOutput string + ReturnedOutput bool } func (interceptor *fakeOutputInterceptor) StartInterceptingOutput() error { @@ -15,3 +16,8 @@ func (interceptor *fakeOutputInterceptor) StopInterceptingAndReturnOutput() (str interceptor.DidStopInterceptingOutput = true return interceptor.InterceptedOutput, nil } + +func (interceptor *fakeOutputInterceptor) Output() (string, error) { + interceptor.ReturnedOutput = true + return interceptor.InterceptedOutput, nil +} diff --git a/internal/remote/forwarding_reporter.go b/internal/remote/forwarding_reporter.go index 025eb5064..16d6535cc 100644 --- a/internal/remote/forwarding_reporter.go +++ b/internal/remote/forwarding_reporter.go @@ -5,6 +5,11 @@ import ( "encoding/json" "io" "net/http" + "os" + "os/signal" + "syscall" + + "github.com/onsi/ginkgo/internal/writer" "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/types" @@ -30,14 +35,36 @@ type ForwardingReporter struct { serverHost string poster Poster outputInterceptor OutputInterceptor + ginkgoNode int } -func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter { - return &ForwardingReporter{ +func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoNode int, ginkgoWriter *writer.Writer) *ForwardingReporter { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + + reporter := &ForwardingReporter{ serverHost: serverHost, poster: poster, outputInterceptor: outputInterceptor, + ginkgoNode: ginkgoNode, } + + go func() { + for { + <-c + capturedOutput, _ := outputInterceptor.Output() + + debugOutput := types.ParallelSpecDebugOutput{ + GinkgoNode: ginkgoNode, + CapturedOutput: capturedOutput, + GinkgoWriterOutput: string(ginkgoWriter.Bytes()), + } + + reporter.post("/UpdateSpecDebugOutput", debugOutput) + } + }() + + return reporter } func (reporter *ForwardingReporter) post(path string, data interface{}) { @@ -47,6 +74,7 @@ func (reporter *ForwardingReporter) post(path string, data interface{}) { } func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) { + summary.GinkgoNode = reporter.ginkgoNode data := struct { Config config.GinkgoConfigType `json:"config"` Summary *types.SuiteSummary `json:"suite-summary"` @@ -62,17 +90,20 @@ func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigT func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.GinkgoNode = reporter.ginkgoNode setupSummary.CapturedOutput = output reporter.post("/BeforeSuiteDidRun", setupSummary) } func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { + specSummary.GinkgoNode = reporter.ginkgoNode reporter.post("/SpecWillRun", specSummary) } func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) { output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() reporter.outputInterceptor.StartInterceptingOutput() + specSummary.GinkgoNode = reporter.ginkgoNode specSummary.CapturedOutput = output reporter.post("/SpecDidComplete", specSummary) } @@ -80,11 +111,13 @@ func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSumma func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.GinkgoNode = reporter.ginkgoNode setupSummary.CapturedOutput = output reporter.post("/AfterSuiteDidRun", setupSummary) } func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { reporter.outputInterceptor.StopInterceptingAndReturnOutput() + summary.GinkgoNode = reporter.ginkgoNode reporter.post("/SpecSuiteDidEnd", summary) } diff --git a/internal/remote/forwarding_reporter_test.go b/internal/remote/forwarding_reporter_test.go index be7768e70..5eaa91134 100644 --- a/internal/remote/forwarding_reporter_test.go +++ b/internal/remote/forwarding_reporter_test.go @@ -30,7 +30,7 @@ var _ = Describe("ForwardingReporter", func() { InterceptedOutput: "The intercepted output!", } - reporter = NewForwardingReporter(serverHost, poster, interceptor) + reporter = NewForwardingReporter(serverHost, poster, interceptor, 2, nil) suiteSummary = &types.SuiteSummary{ SuiteDescription: "My Test Suite", diff --git a/internal/remote/output_interceptor.go b/internal/remote/output_interceptor.go index 093f4513c..a140a99a7 100644 --- a/internal/remote/output_interceptor.go +++ b/internal/remote/output_interceptor.go @@ -7,4 +7,5 @@ intercept and capture all stdin and stderr output during a test run. type OutputInterceptor interface { StartInterceptingOutput() error StopInterceptingAndReturnOutput() (string, error) + Output() (string, error) } diff --git a/internal/remote/output_interceptor_unix.go b/internal/remote/output_interceptor_unix.go index 980065da5..adb559a17 100644 --- a/internal/remote/output_interceptor_unix.go +++ b/internal/remote/output_interceptor_unix.go @@ -40,6 +40,15 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error { return nil } +func (interceptor *outputInterceptor) Output() (string, error) { + if !interceptor.intercepting { + return "", errors.New("Not intercepting output!") + } + + output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) + return string(output), err +} + func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { if !interceptor.intercepting { return "", errors.New("Not intercepting output!") diff --git a/internal/remote/output_interceptor_win.go b/internal/remote/output_interceptor_win.go index c8f97d97f..bf718d17c 100644 --- a/internal/remote/output_interceptor_win.go +++ b/internal/remote/output_interceptor_win.go @@ -2,9 +2,7 @@ package remote -import ( - "errors" -) +import "errors" func NewOutputInterceptor() OutputInterceptor { return &outputInterceptor{} @@ -31,3 +29,7 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, return "", nil } + +func (interceptor *outputInterceptor) Output() (string, error) { + return "", nil +} diff --git a/internal/remote/server.go b/internal/remote/server.go index 367c54daf..c647a1844 100644 --- a/internal/remote/server.go +++ b/internal/remote/server.go @@ -13,11 +13,11 @@ import ( "net" "net/http" "sync" + "time" "github.com/onsi/ginkgo/internal/spec_iterator" "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/reporters" "github.com/onsi/ginkgo/types" ) @@ -27,8 +27,9 @@ It then forwards that communication to attached reporters. */ type Server struct { listener net.Listener - reporters []reporters.Reporter + aggregator *Aggregator alives []func() bool + signalDebug []func() lock *sync.Mutex beforeSuiteData types.RemoteBeforeSuiteData parallelTotal int @@ -36,15 +37,17 @@ type Server struct { } //Create a new server, automatically selecting a port -func NewServer(parallelTotal int) (*Server, error) { +func NewServer(parallelTotal int, aggregator *Aggregator) (*Server, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, err } return &Server{ + aggregator: aggregator, listener: listener, lock: &sync.Mutex{}, alives: make([]func() bool, parallelTotal), + signalDebug: make([]func(), parallelTotal), beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}, parallelTotal: parallelTotal, }, nil @@ -57,10 +60,12 @@ func (server *Server) Start() { httpServer.Handler = mux //streaming endpoints + mux.HandleFunc("/debug", server.debug) mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin) mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun) mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun) mux.HandleFunc("/SpecWillRun", server.specWillRun) + mux.HandleFunc("/UpdateSpecDebugOutput", server.updateSpecDebugOutput) mux.HandleFunc("/SpecDidComplete", server.specDidComplete) mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd) @@ -94,8 +99,23 @@ func (server *Server) readAll(request *http.Request) []byte { return body } -func (server *Server) RegisterReporters(reporters ...reporters.Reporter) { - server.reporters = reporters +func (server *Server) RegisterSignalDebug(node int, debug func()) { + server.lock.Lock() + defer server.lock.Unlock() + server.signalDebug[node-1] = debug +} + +func (server *Server) debug(writer http.ResponseWriter, request *http.Request) { + server.lock.Lock() + for cpu := 1; cpu <= server.parallelTotal; cpu++ { + server.signalDebug[cpu-1]() + } + server.lock.Unlock() + + //Give nodes a chance to report back + time.Sleep(time.Second) + + writer.Write(server.aggregator.DebugReport()) } func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { @@ -108,8 +128,8 @@ func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *ht json.Unmarshal(body, &data) - for _, reporter := range server.reporters { - reporter.SpecSuiteWillBegin(data.Config, data.Summary) + if server.aggregator != nil { + server.aggregator.SpecSuiteWillBegin(data.Config, data.Summary) } } @@ -118,8 +138,8 @@ func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *htt var setupSummary *types.SetupSummary json.Unmarshal(body, &setupSummary) - for _, reporter := range server.reporters { - reporter.BeforeSuiteDidRun(setupSummary) + if server.aggregator != nil { + server.aggregator.BeforeSuiteDidRun(setupSummary) } } @@ -128,8 +148,8 @@ func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http var setupSummary *types.SetupSummary json.Unmarshal(body, &setupSummary) - for _, reporter := range server.reporters { - reporter.AfterSuiteDidRun(setupSummary) + if server.aggregator != nil { + server.aggregator.AfterSuiteDidRun(setupSummary) } } @@ -138,8 +158,18 @@ func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Requ var specSummary *types.SpecSummary json.Unmarshal(body, &specSummary) - for _, reporter := range server.reporters { - reporter.SpecWillRun(specSummary) + if server.aggregator != nil { + server.aggregator.SpecWillRun(specSummary) + } +} + +func (server *Server) updateSpecDebugOutput(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var debugOutput *types.ParallelSpecDebugOutput + json.Unmarshal(body, &debugOutput) + + if server.aggregator != nil { + server.aggregator.UpdateSpecDebugOutput(debugOutput) } } @@ -148,8 +178,8 @@ func (server *Server) specDidComplete(writer http.ResponseWriter, request *http. var specSummary *types.SpecSummary json.Unmarshal(body, &specSummary) - for _, reporter := range server.reporters { - reporter.SpecDidComplete(specSummary) + if server.aggregator != nil { + server.aggregator.SpecDidComplete(specSummary) } } @@ -158,8 +188,8 @@ func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http. var suiteSummary *types.SuiteSummary json.Unmarshal(body, &suiteSummary) - for _, reporter := range server.reporters { - reporter.SpecSuiteDidEnd(suiteSummary) + if server.aggregator != nil { + server.aggregator.SpecSuiteDidEnd(suiteSummary) } } diff --git a/internal/remote/server_test.go b/internal/remote/server_test.go deleted file mode 100644 index 569fafae0..000000000 --- a/internal/remote/server_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package remote_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/internal/remote" - . "github.com/onsi/gomega" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/types" - - "bytes" - "encoding/json" - "net/http" -) - -var _ = Describe("Server", func() { - var ( - server *Server - ) - - BeforeEach(func() { - var err error - server, err = NewServer(3) - Ω(err).ShouldNot(HaveOccurred()) - - server.Start() - }) - - AfterEach(func() { - server.Close() - }) - - Describe("Streaming endpoints", func() { - var ( - reporterA, reporterB *reporters.FakeReporter - forwardingReporter *ForwardingReporter - - suiteSummary *types.SuiteSummary - setupSummary *types.SetupSummary - specSummary *types.SpecSummary - ) - - BeforeEach(func() { - reporterA = reporters.NewFakeReporter() - reporterB = reporters.NewFakeReporter() - - server.RegisterReporters(reporterA, reporterB) - - forwardingReporter = NewForwardingReporter(server.Address(), &http.Client{}, &fakeOutputInterceptor{}) - - suiteSummary = &types.SuiteSummary{ - SuiteDescription: "My Test Suite", - } - - setupSummary = &types.SetupSummary{ - State: types.SpecStatePassed, - } - - specSummary = &types.SpecSummary{ - ComponentTexts: []string{"My", "Spec"}, - State: types.SpecStatePassed, - } - }) - - It("should make its address available", func() { - Ω(server.Address()).Should(MatchRegexp(`http://127.0.0.1:\d{2,}`)) - }) - - Describe("/SpecSuiteWillBegin", func() { - It("should decode and forward the Ginkgo config and suite summary", func(done Done) { - forwardingReporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary) - Ω(reporterA.Config).Should(Equal(config.GinkgoConfig)) - Ω(reporterB.Config).Should(Equal(config.GinkgoConfig)) - Ω(reporterA.BeginSummary).Should(Equal(suiteSummary)) - Ω(reporterB.BeginSummary).Should(Equal(suiteSummary)) - close(done) - }) - }) - - Describe("/BeforeSuiteDidRun", func() { - It("should decode and forward the setup summary", func() { - forwardingReporter.BeforeSuiteDidRun(setupSummary) - Ω(reporterA.BeforeSuiteSummary).Should(Equal(setupSummary)) - Ω(reporterB.BeforeSuiteSummary).Should(Equal(setupSummary)) - }) - }) - - Describe("/AfterSuiteDidRun", func() { - It("should decode and forward the setup summary", func() { - forwardingReporter.AfterSuiteDidRun(setupSummary) - Ω(reporterA.AfterSuiteSummary).Should(Equal(setupSummary)) - Ω(reporterB.AfterSuiteSummary).Should(Equal(setupSummary)) - }) - }) - - Describe("/SpecWillRun", func() { - It("should decode and forward the spec summary", func(done Done) { - forwardingReporter.SpecWillRun(specSummary) - Ω(reporterA.SpecWillRunSummaries[0]).Should(Equal(specSummary)) - Ω(reporterB.SpecWillRunSummaries[0]).Should(Equal(specSummary)) - close(done) - }) - }) - - Describe("/SpecDidComplete", func() { - It("should decode and forward the spec summary", func(done Done) { - forwardingReporter.SpecDidComplete(specSummary) - Ω(reporterA.SpecSummaries[0]).Should(Equal(specSummary)) - Ω(reporterB.SpecSummaries[0]).Should(Equal(specSummary)) - close(done) - }) - }) - - Describe("/SpecSuiteDidEnd", func() { - It("should decode and forward the suite summary", func(done Done) { - forwardingReporter.SpecSuiteDidEnd(suiteSummary) - Ω(reporterA.EndSummary).Should(Equal(suiteSummary)) - Ω(reporterB.EndSummary).Should(Equal(suiteSummary)) - close(done) - }) - }) - }) - - Describe("Synchronization endpoints", func() { - Describe("GETting and POSTing BeforeSuiteState", func() { - getBeforeSuite := func() types.RemoteBeforeSuiteData { - resp, err := http.Get(server.Address() + "/BeforeSuiteState") - Ω(err).ShouldNot(HaveOccurred()) - Ω(resp.StatusCode).Should(Equal(http.StatusOK)) - - r := types.RemoteBeforeSuiteData{} - decoder := json.NewDecoder(resp.Body) - err = decoder.Decode(&r) - Ω(err).ShouldNot(HaveOccurred()) - - return r - } - - postBeforeSuite := func(r types.RemoteBeforeSuiteData) { - resp, err := http.Post(server.Address()+"/BeforeSuiteState", "application/json", bytes.NewReader(r.ToJSON())) - Ω(err).ShouldNot(HaveOccurred()) - Ω(resp.StatusCode).Should(Equal(http.StatusOK)) - } - - Context("when the first node's Alive has not been registered yet", func() { - It("should return pending", func() { - state := getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending})) - - state = getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending})) - }) - }) - - Context("when the first node is Alive but has not responded yet", func() { - BeforeEach(func() { - server.RegisterAlive(1, func() bool { - return true - }) - }) - - It("should return pending", func() { - state := getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending})) - - state = getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending})) - }) - }) - - Context("when the first node has responded", func() { - var state types.RemoteBeforeSuiteData - BeforeEach(func() { - server.RegisterAlive(1, func() bool { - return false - }) - - state = types.RemoteBeforeSuiteData{ - Data: []byte("my data"), - State: types.RemoteBeforeSuiteStatePassed, - } - postBeforeSuite(state) - }) - - It("should return the passed in state", func() { - returnedState := getBeforeSuite() - Ω(returnedState).Should(Equal(state)) - }) - }) - - Context("when the first node is no longer Alive and has not responded yet", func() { - BeforeEach(func() { - server.RegisterAlive(1, func() bool { - return false - }) - }) - - It("should return disappeared", func() { - state := getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared})) - - state = getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared})) - }) - }) - }) - - Describe("GETting RemoteAfterSuiteData", func() { - getRemoteAfterSuiteData := func() bool { - resp, err := http.Get(server.Address() + "/RemoteAfterSuiteData") - Ω(err).ShouldNot(HaveOccurred()) - Ω(resp.StatusCode).Should(Equal(http.StatusOK)) - - a := types.RemoteAfterSuiteData{} - decoder := json.NewDecoder(resp.Body) - err = decoder.Decode(&a) - Ω(err).ShouldNot(HaveOccurred()) - - return a.CanRun - } - - Context("when there are unregistered nodes", func() { - BeforeEach(func() { - server.RegisterAlive(2, func() bool { - return false - }) - }) - - It("should return false", func() { - Ω(getRemoteAfterSuiteData()).Should(BeFalse()) - }) - }) - - Context("when all none-node-1 nodes are still running", func() { - BeforeEach(func() { - server.RegisterAlive(2, func() bool { - return true - }) - - server.RegisterAlive(3, func() bool { - return false - }) - }) - - It("should return false", func() { - Ω(getRemoteAfterSuiteData()).Should(BeFalse()) - }) - }) - - Context("when all none-1 nodes are done", func() { - BeforeEach(func() { - server.RegisterAlive(2, func() bool { - return false - }) - - server.RegisterAlive(3, func() bool { - return false - }) - }) - - It("should return true", func() { - Ω(getRemoteAfterSuiteData()).Should(BeTrue()) - }) - - }) - }) - }) -}) diff --git a/types/types.go b/types/types.go index baf1bd1c4..84ef2a8f0 100644 --- a/types/types.go +++ b/types/types.go @@ -24,6 +24,7 @@ type SuiteSummary struct { SuiteDescription string SuiteSucceeded bool SuiteID string + GinkgoNode int NumberOfSpecsBeforeParallelization int NumberOfTotalSpecs int @@ -51,6 +52,13 @@ type SpecSummary struct { CapturedOutput string SuiteID string + GinkgoNode int +} + +type ParallelSpecDebugOutput struct { + GinkgoNode int + CapturedOutput string + GinkgoWriterOutput string } func (s SpecSummary) HasFailureState() bool { @@ -91,6 +99,7 @@ type SetupSummary struct { CapturedOutput string SuiteID string + GinkgoNode int } type SpecFailure struct {