diff --git a/src/main/scala/leon/repair/Repairman.scala b/src/main/scala/leon/repair/Repairman.scala
index 867456ba511a0bca026edf8e3978ccae608803e7..a6ebf87a3206de2c003f5aa859ebf021ad38cb2c 100644
--- a/src/main/scala/leon/repair/Repairman.scala
+++ b/src/main/scala/leon/repair/Repairman.scala
@@ -31,14 +31,13 @@ class Repairman(ctx: LeonContext, initProgram: Program, fd: FunDef, verifTimeout
 
   def repair() = {
     reporter.info(ASCIIHelpers.title("1. Discovering tests for "+fd.id))
-    val (tests, isVerified) = discoverTests
+    val (passingTests, failingTests) = discoverTests
 
-    if (isVerified) {
-      reporter.info("Program verifies!")
-    }
+    reporter.info(f" - Passing: ${passingTests.size}%3d")
+    reporter.info(f" - Failing: ${failingTests.size}%3d")
 
     reporter.info(ASCIIHelpers.title("2. Locating/Focusing synthesis problem"))
-    val synth = getSynthesizer(tests)
+    val synth = getSynthesizer(passingTests, failingTests)
     val p     = synth.problem
 
     var solutions = List[Solution]()
@@ -87,7 +86,7 @@ class Repairman(ctx: LeonContext, initProgram: Program, fd: FunDef, verifTimeout
       }
   }
 
-  def getSynthesizer(tests: List[Example]): Synthesizer = {
+  def getSynthesizer(passingTests: List[Example], failingTests: List[Example]): Synthesizer = {
     // Create a fresh function
     val nid = FreshIdentifier(fd.id.name+"_repair").copiedFrom(fd.id)
     val nfd = new FunDef(nid, fd.tparams, fd.returnType, fd.params, fd.defType)
@@ -104,7 +103,7 @@ class Repairman(ctx: LeonContext, initProgram: Program, fd: FunDef, verifTimeout
 
     val body = nfd.body.get;
 
-    val (newBody, replacedExpr) = focusRepair(program, nfd, tests)
+    val (newBody, replacedExpr) = focusRepair(program, nfd, passingTests, failingTests)
     nfd.body = Some(newBody)
 
     val guide = guideOf(replacedExpr)
@@ -138,16 +137,11 @@ class Repairman(ctx: LeonContext, initProgram: Program, fd: FunDef, verifTimeout
     FunctionInvocation(gfd, Seq(expr))
   }
 
-  private def focusRepair(program: Program, fd: FunDef, tests: List[Example]): (Expr, Expr) = {
-    // Compute tests
-    val failingTests = tests.collect {
-      case InExample(ins) => ins
-    }
-
+  private def focusRepair(program: Program, fd: FunDef, passingTests: List[Example], failingTests: List[Example]): (Expr, Expr) = {
     reporter.ifDebug { printer =>
       printer("Tests failing are: ")
-      failingTests.foreach { ins =>
-        printer(ins.mkString(", "))
+      failingTests.foreach { ex =>
+        printer(ex.ins.mkString(", "))
       }
     }
 
@@ -171,7 +165,8 @@ class Repairman(ctx: LeonContext, initProgram: Program, fd: FunDef, verifTimeout
     //  - returns Some(false) if for all tests e evaluates to false
     //  - returns None otherwise
     def forAllTests(e: Expr, env: Map[Identifier, Expr]): Option[Boolean] = {
-      val results = failingTests.map { ins =>
+      val results = failingTests.map { ex =>
+        val ins = ex.ins
         evaluator.eval(e, env ++ (args zip ins)) match {
           case EvaluationResults.Successful(BooleanLiteral(true))  => Some(true)
           case EvaluationResults.Successful(BooleanLiteral(false)) => Some(false)
@@ -271,71 +266,63 @@ class Repairman(ctx: LeonContext, initProgram: Program, fd: FunDef, verifTimeout
   }
 
 
-  def discoverTests: (List[Example], Boolean) = {
-    
+  def discoverTests: (List[Example], List[Example]) = {
+
     import bonsai._
     import bonsai.enumerators._
     import utils.ExpressionGrammars.ValueGrammar
     import purescala.Extractors.UnwrapTuple
-    
-    val maxExamples = 1000
-    // A heuristic on how many valid examples we may wish to generate,
-    // according to ValueGrammar
-    def typeComplexity(tp : TypeTree)(implicit seen : Set[ClassType]) : Int = tp match {    
-      case TypeParameter(_) => 3
-      case Int32Type => 3
-      case UnitType => 1
-      case BooleanType => 2 
-      case ab : AbstractClassType => 
-        if (seen contains ab) 
-          ab.knownCCDescendents.size
-        else 
-          ab.knownCCDescendents.map(typeComplexity(_)(seen + ab)).sum
-      case cc : CaseClassType => 
-        if (seen contains cc) 
-          cc.fields.size
-        else
-          cc.fields.map{f => typeComplexity(f.getType)(seen + cc)}.product
-      case SetType(base) => 
-        val forBase = typeComplexity(base)
-        forBase + forBase * forBase
-      case Untyped => 0
-      case _ => 1
-    }
-    val maxValid = scala.math.min(100, fd.params.map { p => typeComplexity(p.getType)(Set()) }.product)
-    
+
+    val maxEnumerated = 1000
+    val maxValid      = 100
+
     val evaluator = new CodeGenEvaluator(ctx, program, CodeGenParams(checkContracts = true))
+    val enum      = new MemoizedEnumerator[TypeTree, Expr](ValueGrammar.getProductions _)
+
+    val inputs = enum.iterator(tupleTypeWrap(fd.params map { _.getType})).map{ case UnwrapTuple(is) => is }
 
-    val enum = new MemoizedEnumerator[TypeTree, Expr](ValueGrammar.getProductions _)
-    val inputs = enum.iterator(tupleTypeWrap(fd.params map { _.getType})).take(maxExamples)
-    
-    val filtering : Seq[Expr] => Boolean = fd.precondition match {
-      case None => 
-        _ => true 
-      case Some(pre) => 
+    val filtering: Seq[Expr] => Boolean = fd.precondition match {
+      case None =>
+        _ => true
+      case Some(pre) =>
         evaluator.compile(pre, fd.params map { _.id }) match {
           case Some(evalFun) =>
-            val sat = EvaluationResults.Successful(BooleanLiteral(true))
-            e : Seq[Expr] => evalFun(e) == sat
+            val sat = EvaluationResults.Successful(BooleanLiteral(true));
+            { (e: Seq[Expr]) => evalFun(e) == sat }
           case None =>
-            _ => false
+            { _ => false }
         }
     }
 
-    val tests = inputs.map{case UnwrapTuple(is) => is}.filter(filtering).map { i =>
-      evaluator.eval(FunctionInvocation(fd.typed(fd.tparams.map(_.tp)), i)) match {
+    val inputsToExample: Seq[Expr] => Example = { ins =>
+      evaluator.eval(FunctionInvocation(fd.typed(fd.tparams.map(_.tp)), ins)) match {
         case EvaluationResults.Successful(res) =>
-          new InOutExample(i, List(res))
-
+          new InOutExample(ins, List(res))
         case _ =>
-          new InExample(i)
+          new InExample(ins)
       }
-    }.take(maxValid).toList
-  
+    }
+
+    val generatedTests = inputs
+      .take(maxEnumerated)
+      .filter(filtering)
+      .take(maxValid)
+      .map(inputsToExample)
+      .toList
+
+    val (generatedPassing, generatedFailing) = generatedTests.partition {
+      case _: InOutExample => true
+      case _               => false
+    }
+
     // Try to verify, if it fails, we have at least one CE
     val ces = getVerificationCounterExamples(fd, program) getOrElse Nil 
 
-    (tests ++ ces, ces.isEmpty)
+    // Extract passing/failing from the passes in POST
+    val ef = new ExamplesFinder(ctx, program)
+    val (userPassing, userFailing) = ef.extractTests(fd)
+
+    (generatedPassing ++ userPassing, generatedFailing ++ ces ++ userFailing)
   }
 
 
@@ -344,7 +331,7 @@ class Repairman(ctx: LeonContext, initProgram: Program, fd: FunDef, verifTimeout
     val s1 = sol1.toSimplifiedExpr(ctx, program)
     val s2 = sol2.toSimplifiedExpr(ctx, program)
 
-    val e = new DefaultEvaluator(ctx, program);
+    val e = new DefaultEvaluator(ctx, program)
 
     def unwrap(e: Expr) = if (p.xs.size > 1) {
       val Tuple(es) = e
diff --git a/src/main/scala/leon/synthesis/ExamplesFinder.scala b/src/main/scala/leon/synthesis/ExamplesFinder.scala
new file mode 100644
index 0000000000000000000000000000000000000000..be7a8c6d14cfdfb71b9a2e3c69b8a6f8823ee85a
--- /dev/null
+++ b/src/main/scala/leon/synthesis/ExamplesFinder.scala
@@ -0,0 +1,258 @@
+/* Copyright 2009-2014 EPFL, Lausanne */
+
+package leon
+package synthesis
+
+import purescala.Trees._
+import purescala.Definitions._
+import purescala.TreeOps._
+import purescala.TypeTrees.TypeTree
+import purescala.Common._
+import purescala.Constructors._
+import purescala.Extractors._
+import evaluators._
+import utils.ExpressionGrammars.ValueGrammar
+import leon.utils.StreamUtils.cartesianProduct
+import bonsai._
+import bonsai.enumerators._
+
+class ExamplesFinder(ctx: LeonContext, program: Program) {
+
+  lazy val evaluator = new DefaultEvaluator(ctx, program)
+
+  val reporter = ctx.reporter
+
+  def extractTests(fd: FunDef): (Seq[Example], Seq[Example]) = fd.postcondition match {
+    case Some((id, post)) =>
+      val tests = extractTestsOf(post)
+
+      val insIds  = fd.params.map(_.id).toSet
+      val outsIds = Set(id)
+      val allIds  = insIds ++ outsIds
+
+      val examples = tests.toSeq.flatMap { t =>
+        val ids = t.keySet
+        if ((ids & allIds) == allIds) {
+          Some(InOutExample(fd.params.map(p => t(p.id)), Seq(t(id))))
+        } else if ((ids & insIds) == insIds) {
+          Some(InExample(fd.params.map(p => t(p.id))))
+        } else {
+          None
+        }
+      }
+
+      def isValidTest(e: Example): Boolean = {
+        e match {
+          case InOutExample(ins, outs) =>
+            evaluator.eval(Equals(outs.head, FunctionInvocation(fd.typed(fd.tparams.map(_.tp)), ins))) match {
+              case EvaluationResults.Successful(BooleanLiteral(true)) => true
+              case _                                                  => false
+            }
+
+          case _ => false
+        }
+      }
+
+      examples.partition(isValidTest)
+
+    case None =>
+      (Nil, Nil)
+  }
+
+  def extractTests(p: Problem): Seq[Example] = {
+
+    val testClusters = extractTestsOf(and(p.pc, p.phi))
+
+    // Finally, we keep complete tests covering all as++xs
+    val allIds  = (p.as ++ p.xs).toSet
+    val insIds  = p.as.toSet
+    val outsIds = p.xs.toSet
+
+    val examples = testClusters.toSeq.flatMap { t =>
+      val ids = t.keySet
+      if ((ids & allIds) == allIds) {
+        Some(InOutExample(p.as.map(t), p.xs.map(t)))
+      } else if ((ids & insIds) == insIds) {
+        Some(InExample(p.as.map(t)))
+      } else {
+        None
+      }
+    }
+
+    def isValidExample(ex: Example): Boolean = {
+      val (mapping, cond) = ex match {
+        case io: InOutExample =>
+          (Map((p.as zip io.ins) ++ (p.xs zip io.outs): _*), And(p.pc, p.phi))
+        case i =>
+          ((p.as zip i.ins).toMap, p.pc)
+      }
+
+      evaluator.eval(cond, mapping) match {
+        case EvaluationResults.Successful(BooleanLiteral(true)) => true
+        case _ => false
+      }
+    }
+
+    examples.filter(isValidExample)
+  }
+
+  private def extractTestsOf(e: Expr): Set[Map[Identifier, Expr]] = {
+    val allTests = collect[Map[Identifier, Expr]] {
+      case Passes(ins, outs, cases) =>
+        val infos   = extractIds(Tuple(Seq(ins, outs)))
+        val ioPairs = cases.flatMap(caseToExamples(ins, outs, _))
+        
+        val exs     = ioPairs.map{ case (i, o) =>
+          val test = Tuple(Seq(i, o))
+          val ids  = variablesOf(test)
+
+          // Test could contain expressions, we evaluate
+          evaluator.eval(test, ids.map { (i: Identifier) => i -> i.toVariable }.toMap) match {
+            case EvaluationResults.Successful(res) => res
+            case _                                 => test
+          }
+        }
+
+        // Check whether we can extract all ids from example
+        val results = exs.collect { case e if infos.forall(_._2.isDefinedAt(e)) =>
+          infos.map{ case (id, f) => id -> f(e) }.toMap
+        }
+
+        results.toSet
+
+      case _ =>
+        Set()
+    }(e)
+
+
+    consolidateTests(allTests)
+  }
+
+
+  private def caseToExamples(in: Expr, out: Expr, cs: MatchCase, examplesPerCase: Int = 5): Seq[(Expr,Expr)] = {
+
+    def doSubstitute(subs : Seq[(Identifier, Expr)], e : Expr) = 
+      subs.foldLeft(e) { 
+        case (from, (id, to)) => replaceFromIDs(Map(id -> to), from)
+      }
+
+    if (cs.rhs == out) {
+      // The trivial example
+      Seq()
+    } else {
+      // The pattern as expression (input expression)(may contain free variables)
+      val (pattExpr, ieMap) = patternToExpression(cs.pattern, in.getType)
+      val freeVars = variablesOf(pattExpr).toSeq
+      if (freeVars.isEmpty) {
+        // The input contains no free vars. Trivially return input-output pair
+        Seq((pattExpr, doSubstitute(ieMap,cs.rhs)))
+      } else {
+        // If the input contains free variables, it does not provide concrete examples. 
+        // We will instantiate them according to a simple grammar to get them.
+        val enum = new MemoizedEnumerator[TypeTree, Expr](ValueGrammar.getProductions _)
+        val values = enum.iterator(tupleTypeWrap(freeVars.map{ _.getType }))
+        val instantiations = values map {
+          case UnwrapTuple(ins) =>
+            (freeVars zip ins).toMap
+        }
+        
+        def filterGuard(e: Expr, mapping: Map[Identifier, Expr]): Boolean = cs.optGuard match {
+          case Some(guard) => 
+            // in -> e should be enough. We shouldn't find any subexpressions of in.
+            evaluator.eval(replace(Map(in -> e), guard), mapping) match {
+              case EvaluationResults.Successful(BooleanLiteral(true)) => true
+              case _ => false
+            }
+
+          case None =>
+            true
+        }
+
+        (for {
+          inst <- instantiations.toSeq
+          inR = replaceFromIDs(inst, pattExpr)
+          outR = replaceFromIDs(inst, doSubstitute(ieMap, cs.rhs))
+          if filterGuard(inR, inst)
+        } yield (inR,outR) ).take(examplesPerCase)
+      }
+    }
+  }
+
+  /** 
+   * Check if two tests are compatible.
+   * Compatible should evaluate to the same value for the same identifier
+   */
+  private def isCompatible(m1: Map[Identifier, Expr], m2: Map[Identifier, Expr]) = {
+    val ks = m1.keySet & m2.keySet
+    ks.nonEmpty && ks.map(m1) == ks.map(m2)
+  }
+
+  /** 
+   * Merge tests t1 and t2 if they are compatible. Return m1 if not.
+   */
+  private def mergeTest(m1: Map[Identifier, Expr], m2: Map[Identifier, Expr]) = {
+    if (!isCompatible(m1, m2)) {
+      m1
+    } else {
+      m1 ++ m2
+    }
+  }
+
+  /**
+   * we now need to consolidate different clusters of compatible tests together
+   * t1: a->1, c->3
+   * t2: a->1, b->4
+   *   => a->1, b->4, c->3
+   */
+  private def consolidateTests(ts: Set[Map[Identifier, Expr]]): Set[Map[Identifier, Expr]] = {
+
+    var consolidated = Set[Map[Identifier, Expr]]()
+    for (t <- ts) {
+      consolidated += t
+
+      consolidated = consolidated.map { c =>
+        mergeTest(c, t)
+      }
+    }
+    consolidated
+  }
+
+  /**
+   * Extract ids in ins/outs args, and compute corresponding extractors for values map
+   *
+   * Examples:
+   * (a,b) =>
+   *     a -> _.1
+   *     b -> _.2
+   *
+   * Cons(a, Cons(b, c)) =>
+   *     a -> _.head
+   *     b -> _.tail.head
+   *     c -> _.tail.tail
+   */
+  private def extractIds(e: Expr): Seq[(Identifier, PartialFunction[Expr, Expr])] = e match {
+    case Variable(id) =>
+      List((id, { case e => e }))
+    case Tuple(vs) =>
+      vs.map(extractIds).zipWithIndex.flatMap{ case (ids, i) =>
+        ids.map{ case (id, e) =>
+          (id, andThen({ case Tuple(vs) => vs(i) }, e))
+        }
+      }
+    case CaseClass(cct, args) =>
+      args.map(extractIds).zipWithIndex.flatMap { case (ids, i) =>
+        ids.map{ case (id, e) =>
+          (id, andThen({ case CaseClass(cct2, vs) if cct2 == cct => vs(i) } ,e))
+        }
+      }
+
+    case _ =>
+      reporter.warning("Unnexpected pattern in test-ids extraction: "+e)
+      Nil
+  }
+
+  // Compose partial functions
+  private def andThen(pf1: PartialFunction[Expr, Expr], pf2: PartialFunction[Expr, Expr]): PartialFunction[Expr, Expr] = {
+    Function.unlift(pf1.lift(_) flatMap pf2.lift)
+  }
+}
diff --git a/src/main/scala/leon/synthesis/InOutExample.scala b/src/main/scala/leon/synthesis/InOutExample.scala
index 0e2e4faa84d51d59885b59814909428bcc30c57b..932801c899a64ac5ba18c96f151908b531fb212d 100644
--- a/src/main/scala/leon/synthesis/InOutExample.scala
+++ b/src/main/scala/leon/synthesis/InOutExample.scala
@@ -3,8 +3,9 @@
 package leon
 package synthesis
 
-import purescala.Trees.Expr
+import purescala.Trees._
 
 class Example(val ins: Seq[Expr])
 case class InOutExample(is: Seq[Expr], val outs: Seq[Expr]) extends Example(is)
 case class InExample(is: Seq[Expr]) extends Example(is)
+
diff --git a/src/main/scala/leon/synthesis/Problem.scala b/src/main/scala/leon/synthesis/Problem.scala
index 3c0d430e8d516fc0bf70b0175a7b2bc21c10fa73..468754c7c638347678d94d62ef0b716663dc5f04 100644
--- a/src/main/scala/leon/synthesis/Problem.scala
+++ b/src/main/scala/leon/synthesis/Problem.scala
@@ -20,205 +20,6 @@ case class Problem(as: List[Identifier], ws: Expr, pc: Expr, phi: Expr, xs: List
     "⟦ "+as.mkString(";")+", "+(if (pcws != BooleanLiteral(true)) pcws+" ≺ " else "")+" ⟨ "+phi+" ⟩ "+xs.mkString(";")+" ⟧ "
   }
 
-  def getTests(sctx: SynthesisContext): Seq[Example] = {
-    import purescala.Extractors._
-    import evaluators._
-
-    val predicates = and(pc, phi)
-
-    val ev = new DefaultEvaluator(sctx.context, sctx.program)
-
-    def isValidExample(ex: Example): Boolean = {
-      val (mapping, cond) = ex match {
-        case io: InOutExample =>
-          (Map((as zip io.ins) ++ (xs zip io.outs): _*), And(pc, phi))
-        case i =>
-          ((as zip i.ins).toMap, pc)
-      }
-
-      ev.eval(cond, mapping) match {
-        case EvaluationResults.Successful(BooleanLiteral(true)) => true
-        case _ => false
-      }
-    }
-
-    // Returns a list of identifiers, and extractors
-    def andThen(pf1: PartialFunction[Expr, Expr], pf2: PartialFunction[Expr, Expr]): PartialFunction[Expr, Expr] = {
-      Function.unlift(pf1.lift(_) flatMap pf2.lift)
-    }
-
-    /**
-     * Extract ids in ins/outs args, and compute corresponding extractors for values map
-     *
-     * Examples:
-     * (a,b) =>
-     *     a -> _.1
-     *     b -> _.2
-     *
-     * Cons(a, Cons(b, c)) =>
-     *     a -> _.head
-     *     b -> _.tail.head
-     *     c -> _.tail.tail
-     */
-    def extractIds(e: Expr): Seq[(Identifier, PartialFunction[Expr, Expr])] = e match {
-      case Variable(id) =>
-        List((id, { case e => e }))
-      case Tuple(vs) =>
-        vs.map(extractIds).zipWithIndex.flatMap{ case (ids, i) =>
-          ids.map{ case (id, e) =>
-            (id, andThen({ case Tuple(vs) => vs(i) }, e))
-          }
-        }
-      case CaseClass(cct, args) =>
-        args.map(extractIds).zipWithIndex.flatMap { case (ids, i) =>
-          ids.map{ case (id, e) =>
-            (id, andThen({ case CaseClass(cct2, vs) if cct2 == cct => vs(i) } ,e))
-          }
-        }
-
-      case _ =>
-        sctx.reporter.warning("Unnexpected pattern in test-ids extraction: "+e)
-        Nil
-    }
-
-    def exprToIds(e: Expr): List[Identifier] = e match {
-      case Variable(i) => List(i)
-      case Tuple(is) => is.collect { case Variable(i) => i }.toList
-      case _ => Nil
-    }
-  
-
-    def toIOExamples(in: Expr, out : Expr, cs : MatchCase) : Seq[(Expr,Expr)] = {
-      import utils.ExpressionGrammars.ValueGrammar
-      import leon.utils.StreamUtils.cartesianProduct
-      import bonsai._
-      import bonsai.enumerators._
-
-      val examplesPerCase = 5
-     
-      def doSubstitute(subs : Seq[(Identifier, Expr)], e : Expr) = 
-        subs.foldLeft(e) { 
-          case (from, (id, to)) => replaceFromIDs(Map(id -> to), from)
-        }
-
-      if (cs.rhs == out) {
-        // The trivial example
-        Seq()
-      } else {
-        // The pattern as expression (input expression)(may contain free variables)
-        val (pattExpr, ieMap) = patternToExpression(cs.pattern, in.getType)
-        val freeVars = variablesOf(pattExpr).toSeq
-        if (freeVars.isEmpty) {
-          // The input contains no free vars. Trivially return input-output pair
-          Seq((pattExpr, doSubstitute(ieMap,cs.rhs)))
-        } else {
-          // If the input contains free variables, it does not provide concrete examples. 
-          // We will instantiate them according to a simple grammar to get them.
-          val enum = new MemoizedEnumerator[TypeTree, Expr](ValueGrammar.getProductions _)
-          val types = freeVars.map{ _.getType }
-          val typesWithValues = types.map { tp => (tp, enum.iterator(tp).toStream) }.toMap
-          val values = freeVars map { v => typesWithValues(v.getType) }
-          val instantiations = cartesianProduct(values) map { freeVars.zip(_).toMap }
-          
-          def filterG(e : Expr, mapping: Map[Identifier, Expr]) : Boolean = cs.optGuard match {
-            // in -> e should be enough. We shouldn't find any subexpressions of in.
-            case Some(guard) => ev.eval(replace(Map(in -> e), guard), mapping) match {
-              case EvaluationResults.Successful(BooleanLiteral(true)) => true
-              case _ => false
-            }
-            case None => true
-          }
-
-          (for {
-            inst <- instantiations
-            inR = replaceFromIDs(inst, pattExpr)
-            outR = replaceFromIDs(inst, doSubstitute(ieMap, cs.rhs))
-            if filterG(inR, inst)
-          } yield (inR,outR) ).take(examplesPerCase)
-          
-          
-          
-        }
-        
-      }
-    }
-
-    val evaluator = new DefaultEvaluator(sctx.context, sctx.program)
-
-    val testClusters = collect[Map[Identifier, Expr]] {
-
-      case Passes(ins, out, cases) =>
-        
-        val ioPairs = cases flatMap { toIOExamples(ins,out,_) }
-        val infos = extractIds(Tuple(Seq(ins,out)))
-        val exs   = ioPairs.map{ case (i, o) =>
-          val test = Tuple(Seq(i, o))
-          val ids = variablesOf(test)
-          evaluator.eval(test, ids.map { (i: Identifier) => i -> i.toVariable }.toMap) match {
-            case EvaluationResults.Successful(res) => res
-            case _ =>
-              test
-          }
-        }
-
-        // Check whether we can extract all ids from example
-        val results = exs.collect { case e if infos.forall(_._2.isDefinedAt(e)) =>
-          infos.map{ case (id, f) => id -> f(e) }.toMap
-        }
-
-        results.toSet
-
-      case _ =>
-        Set()
-    }(predicates)
-
-    /**
-     * we now need to consolidate different clusters of compatible tests together
-     * t1: a->1, c->3
-     * t2: a->1, b->4
-     *   => a->1, b->4, c->3
-     */
-
-    def isCompatible(m1: Map[Identifier, Expr], m2: Map[Identifier, Expr]) = {
-      val ks = m1.keySet & m2.keySet
-      ks.nonEmpty && ks.map(m1) == ks.map(m2)
-    }
-
-    def mergeTest(m1: Map[Identifier, Expr], m2: Map[Identifier, Expr]) = {
-      if (!isCompatible(m1, m2)) {
-        m1
-      } else {
-        m1 ++ m2
-      }
-    }
-
-    var consolidated = Set[Map[Identifier, Expr]]()
-    for (t <- testClusters) {
-      consolidated += t
-
-      consolidated = consolidated.map { c =>
-        mergeTest(c, t)
-      }
-    }
-
-    // Finally, we keep complete tests covering all as++xs
-    val allIds  = (as ++ xs).toSet
-    val insIds  = as.toSet
-    val outsIds = xs.toSet
-
-    val examples = consolidated.toSeq.flatMap { t =>
-      val ids = t.keySet
-      if ((ids & allIds) == allIds) {
-        Some(InOutExample(as.map(t), xs.map(t)))
-      } else if ((ids & insIds) == insIds) {
-        Some(InExample(as.map(t)))
-      } else {
-        None
-      }
-    }
-
-    examples.filter(isValidExample)
-  }
 }
 
 object Problem {
diff --git a/src/main/scala/leon/synthesis/rules/BottomUpTegis.scala b/src/main/scala/leon/synthesis/rules/BottomUpTegis.scala
index e409b4d1b72fe0acbd3817693aecb3f7d9ed2a67..33947304acac376f1bae75db4df1e75be39aa2e0 100644
--- a/src/main/scala/leon/synthesis/rules/BottomUpTegis.scala
+++ b/src/main/scala/leon/synthesis/rules/BottomUpTegis.scala
@@ -42,7 +42,9 @@ abstract class BottomUpTEGISLike[T <% Typed](name: String) extends Rule(name) {
   def getRootLabel(tpe: TypeTree): T
 
   def instantiateOn(sctx: SynthesisContext, p: Problem): Traversable[RuleInstantiation] = {
-    var tests = p.getTests(sctx).collect {
+    val ef = new ExamplesFinder(sctx.context, sctx.program)
+
+    var tests = ef.extractTests(p).collect {
       case io: InOutExample => (io.ins, io.outs)
     }
 
diff --git a/src/main/scala/leon/synthesis/rules/CegisLike.scala b/src/main/scala/leon/synthesis/rules/CegisLike.scala
index d95ac13a3212a9f5e5ed579d15aa70f81a009b0f..a76666c3dd9a950517c6447118faa72471876f4a 100644
--- a/src/main/scala/leon/synthesis/rules/CegisLike.scala
+++ b/src/main/scala/leon/synthesis/rules/CegisLike.scala
@@ -437,7 +437,8 @@ abstract class CEGISLike[T <% Typed](name: String) extends Rule(name) {
         // We populate the list of examples with a predefined one
         sctx.reporter.debug("Acquiring list of examples")
 
-        baseExampleInputs ++= p.getTests(sctx).map(_.ins).toSet
+        val ef = new ExamplesFinder(sctx.context, sctx.program)
+        baseExampleInputs ++= ef.extractTests(p).map(_.ins).toSet
 
         val pc = p.pc
 
diff --git a/src/main/scala/leon/synthesis/rules/TegisLike.scala b/src/main/scala/leon/synthesis/rules/TegisLike.scala
index 3f55650e38646c654d788932cfcfbfae647744d3..96a5b242e857f7724a7c4c7451f7366b7a74370b 100644
--- a/src/main/scala/leon/synthesis/rules/TegisLike.scala
+++ b/src/main/scala/leon/synthesis/rules/TegisLike.scala
@@ -38,7 +38,8 @@ abstract class TEGISLike[T <% Typed](name: String) extends Rule(name) {
         val params = getParams(sctx, p)
         val grammar = params.grammar
 
-        var tests = p.getTests(sctx).map(_.ins).distinct
+        val ef = new ExamplesFinder(sctx.context, sctx.program)
+        var tests = ef.extractTests(p).map(_.ins).distinct
         if (tests.nonEmpty) {
 
           val evalParams            = CodeGenParams(maxFunctionInvocations = 2000, checkContracts = true)