Let me start off by saying I agree a test SHOULD be stable and SHOULD never be retried. However, we do not live in an ideal world and in some very specific scenario retrying a test can be a valid usecase.
I am running UI tests (using selenium against an angular app) where sometimes the chromedriver turns unresponsive for unclear reasons. This behavior is entirely out of my control and working solutions do not exist. I cannot retry this in a SpecFlow step since I have "Given" steps that login to the application. When it fails in a "When" step I need to rerun the "Given" steps as well. In this scenario I want to close the driver, start it again, and rerun all previous steps. As a last resort, I wrote a custom testrunner for SpecFlow that can recover from an error like this:
Disclaimer: This is not intended usage and it may break in any version of SpecFlow. If you are a testing purist, do not read any further.
First we create a class that makes it easy to create a custom ITestRunner (provide all methods as virtual so they can be overridden):
public class OverrideableTestRunner : ITestRunner
{
private readonly ITestRunner _runner;
public OverrideableTestRunner(ITestRunner runner)
{
_runner = runner;
}
public int ThreadId => _runner.ThreadId;
public FeatureContext FeatureContext => _runner.FeatureContext;
public ScenarioContext ScenarioContext => _runner.ScenarioContext;
public virtual void And(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
_runner.And(text, multilineTextArg, tableArg, keyword);
}
public virtual void But(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
_runner.But(text, multilineTextArg, tableArg, keyword);
}
public virtual void CollectScenarioErrors()
{
_runner.CollectScenarioErrors();
}
public virtual void Given(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
_runner.Given(text, multilineTextArg, tableArg, keyword);
}
public virtual void InitializeTestRunner(int threadId)
{
_runner.InitializeTestRunner(threadId);
}
public virtual void OnFeatureEnd()
{
_runner.OnFeatureEnd();
}
public virtual void OnFeatureStart(FeatureInfo featureInfo)
{
_runner.OnFeatureStart(featureInfo);
}
public virtual void OnScenarioEnd()
{
_runner.OnScenarioEnd();
}
public virtual void OnScenarioInitialize(ScenarioInfo scenarioInfo)
{
_runner.OnScenarioInitialize(scenarioInfo);
}
public virtual void OnScenarioStart()
{
_runner.OnScenarioStart();
}
public virtual void OnTestRunEnd()
{
_runner.OnTestRunEnd();
}
public virtual void OnTestRunStart()
{
_runner.OnTestRunStart();
}
public virtual void Pending()
{
_runner.Pending();
}
public virtual void SkipScenario()
{
_runner.SkipScenario();
}
public virtual void Then(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
_runner.Then(text, multilineTextArg, tableArg, keyword);
}
public virtual void When(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
_runner.When(text, multilineTextArg, tableArg, keyword);
}
}
Next we create the custom testrunner that remembers the calls made for a scenario and can rerun the previous steps:
public class RetryTestRunner : OverrideableTestRunner
{
/// <summary>
/// Which exceptions to handle (default: all)
/// </summary>
public Predicate<Exception> HandleExceptionFilter { private get; set; } = _ => true;
/// <summary>
/// The action that is executed to recover
/// </summary>
public Action RecoverAction { private get; set; } = () => { };
/// <summary>
/// The maximum number of retries
/// </summary>
public int MaxRetries { private get; set; } = 10;
/// <summary>
/// The executed actions for this scenario, these need to be replayed in the case of an error
/// </summary>
private readonly List<(MethodInfo method, object[] args)> _previousSteps = new List<(MethodInfo method, object[] args)>();
/// <summary>
/// The number of the current try (to make sure we don't go over the specified limit)
/// </summary>
private int _currentTryNumber = 0;
public NonSuckingTestRunner(ITestExecutionEngine engine) : base(new TestRunner(engine))
{
}
public override void OnScenarioStart()
{
base.OnScenarioStart();
_previousSteps.Clear();
_currentTryNumber = 0;
}
public override void Given(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
base.Given(text, multilineTextArg, tableArg, keyword);
Checker()(text, multilineTextArg, tableArg, keyword);
}
public override void But(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
base.But(text, multilineTextArg, tableArg, keyword);
Checker()(text, multilineTextArg, tableArg, keyword);
}
public override void And(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
base.And(text, multilineTextArg, tableArg, keyword);
Checker()(text, multilineTextArg, tableArg, keyword);
}
public override void Then(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
base.Then(text, multilineTextArg, tableArg, keyword);
Checker()(text, multilineTextArg, tableArg, keyword);
}
public override void When(string text, string multilineTextArg, Table tableArg, string keyword = null)
{
base.When(text, multilineTextArg, tableArg, keyword);
Checker()(text, multilineTextArg, tableArg, keyword);
}
// Use this delegate combination to make a params call possible
// It is not possible to use a params argument and the CallerMemberName
// in one method, so we curry the method to make it possible. #functionalprogramming
public delegate void ParamsFunc(params object[] args);
private ParamsFunc Checker([CallerMemberName] string method = null)
{
return args =>
{
// Record the previous step
_previousSteps.Add((GetType().GetMethod(method), args));
// Determine if we should retry
if (ScenarioContext.ScenarioExecutionStatus != ScenarioExecutionStatus.TestError || !HandleExceptionFilter(ScenarioContext.TestError) || _currentTryNumber >= MaxRetries)
{
return;
}
// HACKY: Reset the test state to a non-error state
typeof(ScenarioContext).GetProperty(nameof(ScenarioContext.ScenarioExecutionStatus)).SetValue(ScenarioContext, ScenarioExecutionStatus.OK);
typeof(ScenarioContext).GetProperty(nameof(ScenarioContext.TestError)).SetValue(ScenarioContext, null);
// Trigger the recovery action
RecoverAction.Invoke();
// Retry the steps
_currentTryNumber++;
var stepsToPlay = _previousSteps.ToList();
_previousSteps.Clear();
stepsToPlay.ForEach(s => s.method.Invoke(this, s.args));
};
}
}
Next, configure SpecFlow to use our own testrunner (this can also be added as a plugin).
/// <summary>
/// We need this because this is the only way to configure specflow before it starts
/// </summary>
[TestClass]
public class CustomDependencyProvider : DefaultDependencyProvider
{
[AssemblyInitialize]
public static void AssemblyInitialize(TestContext testContext)
{
// Override the dependency provider of specflow
ContainerBuilder.DefaultDependencyProvider = new CustomDependencyProvider();
TestRunnerManager.OnTestRunStart(typeof(CustomDependencyProvider).Assembly);
}
[AssemblyCleanup]
public static void AssemblyCleanup()
{
TestRunnerManager.OnTestRunEnd(typeof(CustomDependencyProvider).Assembly);
}
public override void RegisterTestThreadContainerDefaults(ObjectContainer testThreadContainer)
{
base.RegisterTestThreadContainerDefaults(testThreadContainer);
// Use our own testrunner
testThreadContainer.RegisterTypeAs<NonSuckingTestRunner, ITestRunner>();
}
}
Also, add this to your .csproj:
<PropertyGroup>
<GenerateSpecFlowAssemblyHooksFile>false</GenerateSpecFlowAssemblyHooksFile>
</PropertyGroup>
Now we can use the testrunner to recover from errors:
[Binding]
public class TestInitialize
{
private readonly RetryTestRunner _testRunner;
public TestInitialize(ITestRunner testRunner)
{
_testRunner = testRunner as RetryTestRunner;
}
[BeforeScenario()]
public void TestInit()
{
_testRunner.RecoverAction = () =>
{
StopDriver();
StartDriver();
};
_testRunner.HandleExceptionFilter = ex => ex is WebDriverException;
}
}
To use this in an AfterScenario step, you could add a RetryScenario() method to the testrunner and call that.
As a last note: Use this as a last resort when there is nothing you can do about it. Running flaky tests is better than running no tests at all.